code stringlengths 1 25.8M | language stringclasses 18 values | source stringclasses 4 values | repo stringclasses 78 values | path stringlengths 0 268 |
|---|---|---|---|---|
#!/usr/bin/env python2
# vim:fileencoding=UTF-8:ts=4:sw=4:sta:et:sts=4:ai
from __future__ import (unicode_literals, division, absolute_import,
print_function)
__license__ = 'GPL v3'
__copyright__ = '2014, Kovid Goyal <kovid@kovidgoyal.net>'
__docformat__ = 'restructuredtext en'
'''
Setup instructions for linux build-system
Edit /etc/network/interfaces and add
iface eth1 inet static
address 192.168.xxx.xxx
Also add eth1 to the auto line (use sudo ifup eth1 to start eth1 without rebooting)
sudo visudo (all no password actions for user)
sudo apt-get install build-essential module-assistant vim zsh vim-scripts rsync \
htop nasm unzip libdbus-1-dev cmake libltdl-dev libudev-dev apt-file \
libdbus-glib-1-dev libcups2-dev "^libxcb.*" libx11-xcb-dev libglu1-mesa-dev \
libxrender-dev flex bison gperf libasound2-dev libgstreamer0.10-dev \
libgstreamer-plugins-base0.10-dev libpulse-dev libgtk2.0-dev libffi-dev xcb-proto python-xcbgen dh-autoreconf
apt-file update
# For recent enough version of debian (>= sid) also install libxkbcommon-dev
mkdir -p ~/bin ~/sw/sources ~/sw/build
chsh -s /bin/zsh
Edit /etc/default/grub and change the GRUB_TIMEOUT to 1, then run
sudo update-grub
Copy over authorized_keys, .vimrc and .zshrc
Create ~/.zshenv as
export SW=$HOME/sw
export MAKEOPTS="-j2"
export CFLAGS=-I$SW/include
export LDFLAGS=-L$SW/lib
export LD_LIBRARY_PATH=$SW/lib
export PKG_CONFIG_PATH=$SW/lib/pkgconfig:$PKG_CONFIG_PATH
typeset -U path
path=($SW/bin "$path[@]")
path=($SW/qt/bin "$path[@]")
path=(~/bin "$path[@]")
'''
import sys, os, shutil, platform, subprocess, stat, py_compile, glob, textwrap, tarfile, time
from functools import partial
from setup import Command, modules, basenames, functions, __version__, __appname__
from setup.build_environment import QT_DLLS, QT_PLUGINS, qt, PYQT_MODULES, sw as SW
from setup.parallel_build import create_job, parallel_build
j = os.path.join
is64bit = platform.architecture()[0] == '64bit'
py_ver = '.'.join(map(str, sys.version_info[:2]))
arch = 'x86_64' if is64bit else 'i686'
def binary_includes():
return [
j(SW, 'bin', x) for x in ('pdftohtml', 'pdfinfo', 'pdftoppm')] + [
j(SW, 'lib', 'lib' + x) for x in (
'usb-1.0.so.0', 'mtp.so.9', 'expat.so.1', 'sqlite3.so.0',
'podofo.so.0.9.1', 'z.so.1', 'bz2.so.1.0', 'poppler.so.46',
'iconv.so.2', 'xml2.so.2', 'xslt.so.1', 'jpeg.so.8', 'png16.so.16', 'webp.so.5',
'exslt.so.0', 'imobiledevice.so.5', 'usbmuxd.so.4', 'plist.so.3',
'MagickCore-6.Q16.so.2', 'MagickWand-6.Q16.so.2', 'ssl.so.1.0.0',
'crypto.so.1.0.0', 'readline.so.6', 'chm.so.0', 'icudata.so.53',
'icui18n.so.53', 'icuuc.so.53', 'icuio.so.53', 'python%s.so.1.0' % py_ver,
'gcrypt.so.20', 'gpg-error.so.0', 'gobject-2.0.so.0', 'glib-2.0.so.0',
'gthread-2.0.so.0', 'gmodule-2.0.so.0', 'gio-2.0.so.0', 'dbus-glib-1.so.2',
)] + [
glob.glob('/lib/*/lib' + x)[-1] for x in (
'dbus-1.so.3', 'pcre.so.3'
)] + [
glob.glob('/usr/lib/*/lib' + x)[-1] for x in (
'gstreamer-0.10.so.0', 'gstbase-0.10.so.0', 'gstpbutils-0.10.so.0',
'gstapp-0.10.so.0', 'gstinterfaces-0.10.so.0', 'gstvideo-0.10.so.0', 'orc-0.4.so.0',
'ffi.so.5',
# 'stdc++.so.6',
# We dont include libstdc++.so as the OpenGL dlls on the target
# computer fail to load in the QPA xcb plugin if they were compiled
# with a newer version of gcc than the one on the build computer.
# libstdc++, like glibc is forward compatible and I dont think any
# distros do not have libstdc++.so.6, so it should be safe to leave it out.
# https://gcc.gnu.org/onlinedocs/libstdc++/manual/abi.html (The current
# debian stable libstdc++ is libstdc++.so.6.0.17)
)] + [
j(qt['libs'], 'lib%s.so.5' % x) for x in QT_DLLS]
def ignore_in_lib(base, items, ignored_dirs=None):
ans = []
if ignored_dirs is None:
ignored_dirs = {'.svn', '.bzr', '.git', 'test', 'tests', 'testing'}
for name in items:
path = os.path.join(base, name)
if os.path.isdir(path):
if name in ignored_dirs or not os.path.exists(j(path, '__init__.py')):
if name != 'plugins':
ans.append(name)
else:
if name.rpartition('.')[-1] not in ('so', 'py'):
ans.append(name)
return ans
def import_site_packages(srcdir, dest):
if not os.path.exists(dest):
os.mkdir(dest)
for x in os.listdir(srcdir):
ext = x.rpartition('.')[-1]
f = j(srcdir, x)
if ext in ('py', 'so'):
shutil.copy2(f, dest)
elif ext == 'pth' and x != 'setuptools.pth':
for line in open(f, 'rb').read().splitlines():
src = os.path.abspath(j(srcdir, line))
if os.path.exists(src) and os.path.isdir(src):
import_site_packages(src, dest)
elif os.path.exists(j(f, '__init__.py')):
shutil.copytree(f, j(dest, x), ignore=ignore_in_lib)
def is_elf(path):
with open(path, 'rb') as f:
return f.read(4) == b'\x7fELF'
STRIPCMD = ['strip']
def strip_files(files, argv_max=(256 * 1024)):
""" Strip a list of files """
while files:
cmd = list(STRIPCMD)
pathlen = sum(len(s) + 1 for s in cmd)
while pathlen < argv_max and files:
f = files.pop()
cmd.append(f)
pathlen += len(f) + 1
if len(cmd) > len(STRIPCMD):
all_files = cmd[len(STRIPCMD):]
unwritable_files = tuple(filter(None, (None if os.access(x, os.W_OK) else (x, os.stat(x).st_mode) for x in all_files)))
[os.chmod(x, stat.S_IWRITE | old_mode) for x, old_mode in unwritable_files]
subprocess.check_call(cmd)
[os.chmod(x, old_mode) for x, old_mode in unwritable_files]
class LinuxFreeze(Command):
def add_options(self, parser):
if not parser.has_option('--dont-strip'):
parser.add_option('-x', '--dont-strip', default=False,
action='store_true', help='Dont strip the generated binaries')
def run(self, opts):
self.drop_privileges()
self.opts = opts
self.src_root = self.d(self.SRC)
self.base = self.j(self.src_root, 'build', 'linfrozen')
self.lib_dir = self.j(self.base, 'lib')
self.bin_dir = self.j(self.base, 'bin')
self.initbase()
self.copy_libs()
self.copy_python()
self.build_launchers()
if not self.opts.dont_strip:
self.strip_files()
self.create_tarfile()
def initbase(self):
if os.path.exists(self.base):
shutil.rmtree(self.base)
os.makedirs(self.base)
def copy_libs(self):
self.info('Copying libs...')
os.mkdir(self.lib_dir)
os.mkdir(self.bin_dir)
for x in binary_includes():
dest = self.bin_dir if '/bin/' in x else self.lib_dir
shutil.copy2(x, dest)
base = qt['plugins']
dest = self.j(self.lib_dir, 'qt_plugins')
os.mkdir(dest)
for x in QT_PLUGINS:
shutil.copytree(self.j(base, x), self.j(dest, x))
im = glob.glob(SW + '/lib/ImageMagick-*')[-1]
self.magick_base = os.path.basename(im)
dest = self.j(self.lib_dir, self.magick_base)
shutil.copytree(im, dest, ignore=shutil.ignore_patterns('*.a'))
def copy_python(self):
self.info('Copying python...')
srcdir = self.j(SW, 'lib/python'+py_ver)
self.py_dir = self.j(self.lib_dir, self.b(srcdir))
if not os.path.exists(self.py_dir):
os.mkdir(self.py_dir)
for x in os.listdir(srcdir):
y = self.j(srcdir, x)
ext = os.path.splitext(x)[1]
if os.path.isdir(y) and x not in ('test', 'hotshot', 'distutils',
'site-packages', 'idlelib', 'lib2to3', 'dist-packages'):
shutil.copytree(y, self.j(self.py_dir, x),
ignore=ignore_in_lib)
if os.path.isfile(y) and ext in ('.py', '.so'):
shutil.copy2(y, self.py_dir)
srcdir = self.j(srcdir, 'site-packages')
dest = self.j(self.py_dir, 'site-packages')
import_site_packages(srcdir, dest)
filter_pyqt = {x+'.so' for x in PYQT_MODULES}
pyqt = self.j(dest, 'PyQt5')
for x in os.listdir(pyqt):
if x.endswith('.so') and x not in filter_pyqt:
os.remove(self.j(pyqt, x))
for x in os.listdir(self.SRC):
c = self.j(self.SRC, x)
if os.path.exists(self.j(c, '__init__.py')):
shutil.copytree(c, self.j(dest, x), ignore=partial(ignore_in_lib, ignored_dirs={}))
elif os.path.isfile(c):
shutil.copy2(c, self.j(dest, x))
shutil.copytree(self.j(self.src_root, 'resources'), self.j(self.base,
'resources'))
self.create_site_py()
for x in os.walk(self.py_dir):
for f in x[-1]:
if f.endswith('.py'):
y = self.j(x[0], f)
rel = os.path.relpath(y, self.py_dir)
try:
py_compile.compile(y, dfile=rel, doraise=True)
os.remove(y)
z = y+'c'
if os.path.exists(z):
os.remove(z)
except:
if '/uic/port_v3/' not in y:
self.warn('Failed to byte-compile', y)
def run_builder(self, cmd, verbose=True):
p = subprocess.Popen(cmd, stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
if verbose:
self.info(*cmd)
x = p.stdout.read() + p.stderr.read()
if x.strip():
self.info(x.strip())
if p.wait() != 0:
self.info('Failed to run builder')
sys.exit(1)
def create_tarfile(self):
self.info('Creating archive...')
base = self.j(self.d(self.SRC), 'dist')
if not os.path.exists(base):
os.mkdir(base)
dist = os.path.join(base, '%s-%s-%s.tar'%(__appname__, __version__, arch))
with tarfile.open(dist, mode='w', format=tarfile.PAX_FORMAT) as tf:
cwd = os.getcwd()
os.chdir(self.base)
try:
for x in os.listdir('.'):
tf.add(x)
finally:
os.chdir(cwd)
self.info('Compressing archive...')
ans = dist.rpartition('.')[0] + '.txz'
if False:
os.rename(dist, ans)
else:
start_time = time.time()
subprocess.check_call(['xz', '--threads=0', '-f', '-9', dist])
secs = time.time() - start_time
self.info('Compressed in %d minutes %d seconds' % (secs // 60, secs % 60))
os.rename(dist + '.xz', ans)
self.info('Archive %s created: %.2f MB'%(
os.path.basename(ans), os.stat(ans).st_size/(1024.**2)))
def build_launchers(self):
self.obj_dir = self.j(self.src_root, 'build', 'launcher')
if not os.path.exists(self.obj_dir):
os.makedirs(self.obj_dir)
base = self.j(self.src_root, 'setup', 'installer', 'linux')
sources = [self.j(base, x) for x in ['util.c']]
headers = [self.j(base, x) for x in ['util.h']]
objects = [self.j(self.obj_dir, self.b(x)+'.o') for x in sources]
cflags = '-fno-strict-aliasing -W -Wall -c -O2 -pipe -DPYTHON_VER="python%s"'%py_ver
cflags = cflags.split() + ['-I%s/include/python%s' % (SW, py_ver)]
for src, obj in zip(sources, objects):
if not self.newer(obj, headers+[src, __file__]):
continue
cmd = ['gcc'] + cflags + ['-fPIC', '-o', obj, src]
self.run_builder(cmd)
dll = self.j(self.lib_dir, 'libcalibre-launcher.so')
if self.newer(dll, objects):
cmd = ['gcc', '-O2', '-Wl,--rpath=$ORIGIN/../lib', '-fPIC', '-o', dll, '-shared'] + objects + \
['-L%s/lib'%SW, '-lpython'+py_ver]
self.info('Linking libcalibre-launcher.so')
self.run_builder(cmd)
src = self.j(base, 'main.c')
modules['console'].append('calibre.linux')
basenames['console'].append('calibre_postinstall')
functions['console'].append('main')
c_launcher = '/tmp/calibre-c-launcher'
lsrc = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'launcher.c')
cmd = ['gcc', '-O2', '-DMAGICK_BASE="%s"' % self.magick_base, '-o', c_launcher, lsrc, ]
self.info('Compiling launcher')
self.run_builder(cmd, verbose=False)
jobs = []
self.info('Processing launchers')
for typ in ('console', 'gui', ):
for mod, bname, func in zip(modules[typ], basenames[typ],
functions[typ]):
xflags = list(cflags)
xflags.remove('-c')
xflags += ['-DGUI_APP='+('1' if typ == 'gui' else '0')]
xflags += ['-DMODULE="%s"'%mod, '-DBASENAME="%s"'%bname,
'-DFUNCTION="%s"'%func]
exe = self.j(self.bin_dir, bname)
if self.newer(exe, [src, __file__]+headers):
cmd = ['gcc'] + xflags + [src, '-o', exe, '-L' + self.lib_dir, '-lcalibre-launcher']
jobs.append(create_job(cmd))
sh = self.j(self.base, bname)
shutil.copy2(c_launcher, sh)
os.chmod(sh,
stat.S_IREAD|stat.S_IEXEC|stat.S_IWRITE|stat.S_IRGRP|stat.S_IXGRP|stat.S_IROTH|stat.S_IXOTH)
if jobs:
if not parallel_build(jobs, self.info, verbose=False):
raise SystemExit(1)
def strip_files(self):
from calibre import walk
files = {self.j(self.bin_dir, x) for x in os.listdir(self.bin_dir)} | {
x for x in {
self.j(self.d(self.bin_dir), x) for x in os.listdir(self.bin_dir)} if os.path.exists(x)}
for x in walk(self.lib_dir):
x = os.path.realpath(x)
if x not in files and is_elf(x):
files.add(x)
self.info('Stripping %d files...' % len(files))
before = sum(os.path.getsize(x) for x in files)
strip_files(files)
after = sum(os.path.getsize(x) for x in files)
self.info('Stripped %.1f MB' % ((before - after)/(1024*1024.)))
def create_site_py(self): # {{{
with open(self.j(self.py_dir, 'site.py'), 'wb') as f:
f.write(textwrap.dedent('''\
import sys
import encodings # noqa
import __builtin__
import locale
import os
import codecs
def set_default_encoding():
try:
locale.setlocale(locale.LC_ALL, '')
except:
print ('WARNING: Failed to set default libc locale, using en_US.UTF-8')
locale.setlocale(locale.LC_ALL, 'en_US.UTF-8')
enc = locale.getdefaultlocale()[1]
if not enc:
enc = locale.nl_langinfo(locale.CODESET)
if not enc or enc.lower() == 'ascii':
enc = 'UTF-8'
try:
enc = codecs.lookup(enc).name
except LookupError:
enc = 'UTF-8'
sys.setdefaultencoding(enc)
del sys.setdefaultencoding
class _Helper(object):
"""Define the builtin 'help'.
This is a wrapper around pydoc.help (with a twist).
"""
def __repr__(self):
return "Type help() for interactive help, " \
"or help(object) for help about object."
def __call__(self, *args, **kwds):
import pydoc
return pydoc.help(*args, **kwds)
def set_helper():
__builtin__.help = _Helper()
def setup_openssl_environment():
# Workaround for Linux distros that have still failed to get their heads
# out of their asses and implement a common location for SSL certificates.
# It's not that hard people, there exists a wonderful tool called the symlink
# See http://www.mobileread.com/forums/showthread.php?t=256095
if b'SSL_CERT_FILE' not in os.environ and b'SSL_CERT_DIR' not in os.environ:
if os.access('/etc/pki/tls/certs/ca-bundle.crt', os.R_OK):
os.environ['SSL_CERT_FILE'] = '/etc/pki/tls/certs/ca-bundle.crt'
elif os.path.isdir('/etc/ssl/certs'):
os.environ['SSL_CERT_DIR'] = '/etc/ssl/certs'
def main():
try:
sys.argv[0] = sys.calibre_basename
dfv = os.environ.get('CALIBRE_DEVELOP_FROM', None)
if dfv and os.path.exists(dfv):
sys.path.insert(0, os.path.abspath(dfv))
set_default_encoding()
set_helper()
setup_openssl_environment()
mod = __import__(sys.calibre_module, fromlist=[1])
func = getattr(mod, sys.calibre_function)
return func()
except SystemExit as err:
if err.code is None:
return 0
if isinstance(err.code, int):
return err.code
print (err.code)
return 1
except:
import traceback
traceback.print_exc()
return 1
'''))
# }}} | unknown | codeparrot/codeparrot-clean | ||
"""Crypto utilities."""
import contextlib
import logging
import socket
import sys
from six.moves import range # pylint: disable=import-error,redefined-builtin
import OpenSSL
from acme import errors
logger = logging.getLogger(__name__)
# TLSSNI01 certificate serving and probing is not affected by SSL
# vulnerabilities: prober needs to check certificate for expected
# contents anyway. Working SNI is the only thing that's necessary for
# the challenge and thus scoping down SSL/TLS method (version) would
# cause interoperability issues: TLSv1_METHOD is only compatible with
# TLSv1_METHOD, while SSLv23_METHOD is compatible with all other
# methods, including TLSv2_METHOD (read more at
# https://www.openssl.org/docs/ssl/SSLv23_method.html). _serve_sni
# should be changed to use "set_options" to disable SSLv2 and SSLv3,
# in case it's used for things other than probing/serving!
_DEFAULT_TLSSNI01_SSL_METHOD = OpenSSL.SSL.SSLv23_METHOD
class SSLSocket(object): # pylint: disable=too-few-public-methods
"""SSL wrapper for sockets.
:ivar socket sock: Original wrapped socket.
:ivar dict certs: Mapping from domain names (`bytes`) to
`OpenSSL.crypto.X509`.
:ivar method: See `OpenSSL.SSL.Context` for allowed values.
"""
def __init__(self, sock, certs, method=_DEFAULT_TLSSNI01_SSL_METHOD):
self.sock = sock
self.certs = certs
self.method = method
def __getattr__(self, name):
return getattr(self.sock, name)
def _pick_certificate_cb(self, connection):
"""SNI certificate callback.
This method will set a new OpenSSL context object for this
connection when an incoming connection provides an SNI name
(in order to serve the appropriate certificate, if any).
:param connection: The TLS connection object on which the SNI
extension was received.
:type connection: :class:`OpenSSL.Connection`
"""
server_name = connection.get_servername()
try:
key, cert = self.certs[server_name]
except KeyError:
logger.debug("Server name (%s) not recognized, dropping SSL",
server_name)
return
new_context = OpenSSL.SSL.Context(self.method)
new_context.use_privatekey(key)
new_context.use_certificate(cert)
connection.set_context(new_context)
class FakeConnection(object):
"""Fake OpenSSL.SSL.Connection."""
# pylint: disable=missing-docstring
def __init__(self, connection):
self._wrapped = connection
def __getattr__(self, name):
return getattr(self._wrapped, name)
def shutdown(self, *unused_args):
# OpenSSL.SSL.Connection.shutdown doesn't accept any args
return self._wrapped.shutdown()
def accept(self): # pylint: disable=missing-docstring
sock, addr = self.sock.accept()
context = OpenSSL.SSL.Context(self.method)
context.set_tlsext_servername_callback(self._pick_certificate_cb)
ssl_sock = self.FakeConnection(OpenSSL.SSL.Connection(context, sock))
ssl_sock.set_accept_state()
logger.debug("Performing handshake with %s", addr)
try:
ssl_sock.do_handshake()
except OpenSSL.SSL.Error as error:
# _pick_certificate_cb might have returned without
# creating SSL context (wrong server name)
raise socket.error(error)
return ssl_sock, addr
def probe_sni(name, host, port=443, timeout=300,
method=_DEFAULT_TLSSNI01_SSL_METHOD, source_address=('0', 0)):
"""Probe SNI server for SSL certificate.
:param bytes name: Byte string to send as the server name in the
client hello message.
:param bytes host: Host to connect to.
:param int port: Port to connect to.
:param int timeout: Timeout in seconds.
:param method: See `OpenSSL.SSL.Context` for allowed values.
:param tuple source_address: Enables multi-path probing (selection
of source interface). See `socket.creation_connection` for more
info. Available only in Python 2.7+.
:raises acme.errors.Error: In case of any problems.
:returns: SSL certificate presented by the server.
:rtype: OpenSSL.crypto.X509
"""
context = OpenSSL.SSL.Context(method)
context.set_timeout(timeout)
socket_kwargs = {} if sys.version_info < (2, 7) else {
'source_address': source_address}
try:
# pylint: disable=star-args
sock = socket.create_connection((host, port), **socket_kwargs)
except socket.error as error:
raise errors.Error(error)
with contextlib.closing(sock) as client:
client_ssl = OpenSSL.SSL.Connection(context, client)
client_ssl.set_connect_state()
client_ssl.set_tlsext_host_name(name) # pyOpenSSL>=0.13
try:
client_ssl.do_handshake()
client_ssl.shutdown()
except OpenSSL.SSL.Error as error:
raise errors.Error(error)
return client_ssl.get_peer_certificate()
def _pyopenssl_cert_or_req_san(cert_or_req):
"""Get Subject Alternative Names from certificate or CSR using pyOpenSSL.
.. todo:: Implement directly in PyOpenSSL!
.. note:: Although this is `acme` internal API, it is used by
`letsencrypt`.
:param cert_or_req: Certificate or CSR.
:type cert_or_req: `OpenSSL.crypto.X509` or `OpenSSL.crypto.X509Req`.
:returns: A list of Subject Alternative Names.
:rtype: `list` of `unicode`
"""
# constants based on implementation of
# OpenSSL.crypto.X509Error._subjectAltNameString
parts_separator = ", "
part_separator = ":"
extension_short_name = b"subjectAltName"
if hasattr(cert_or_req, 'get_extensions'): # X509Req
extensions = cert_or_req.get_extensions()
else: # X509
extensions = [cert_or_req.get_extension(i)
for i in range(cert_or_req.get_extension_count())]
# pylint: disable=protected-access,no-member
label = OpenSSL.crypto.X509Extension._prefixes[OpenSSL.crypto._lib.GEN_DNS]
assert parts_separator not in label
prefix = label + part_separator
san_extensions = [
ext._subjectAltNameString().split(parts_separator)
for ext in extensions if ext.get_short_name() == extension_short_name]
# WARNING: this function assumes that no SAN can include
# parts_separator, hence the split!
return [part.split(part_separator)[1] for parts in san_extensions
for part in parts if part.startswith(prefix)]
def gen_ss_cert(key, domains, not_before=None,
validity=(7 * 24 * 60 * 60), force_san=True):
"""Generate new self-signed certificate.
:type domains: `list` of `unicode`
:param OpenSSL.crypto.PKey key:
:param bool force_san:
If more than one domain is provided, all of the domains are put into
``subjectAltName`` X.509 extension and first domain is set as the
subject CN. If only one domain is provided no ``subjectAltName``
extension is used, unless `force_san` is ``True``.
"""
assert domains, "Must provide one or more hostnames for the cert."
cert = OpenSSL.crypto.X509()
cert.set_serial_number(1337)
cert.set_version(2)
extensions = [
OpenSSL.crypto.X509Extension(
b"basicConstraints", True, b"CA:TRUE, pathlen:0"),
]
cert.get_subject().CN = domains[0]
# TODO: what to put into cert.get_subject()?
cert.set_issuer(cert.get_subject())
if force_san or len(domains) > 1:
extensions.append(OpenSSL.crypto.X509Extension(
b"subjectAltName",
critical=False,
value=b", ".join(b"DNS:" + d.encode() for d in domains)
))
cert.add_extensions(extensions)
cert.gmtime_adj_notBefore(0 if not_before is None else not_before)
cert.gmtime_adj_notAfter(validity)
cert.set_pubkey(key)
cert.sign(key, "sha256")
return cert | unknown | codeparrot/codeparrot-clean | ||
#ifndef JEMALLOC_INTERNAL_BACKGROUND_THREAD_EXTERNS_H
#define JEMALLOC_INTERNAL_BACKGROUND_THREAD_EXTERNS_H
extern bool opt_background_thread;
extern size_t opt_max_background_threads;
extern malloc_mutex_t background_thread_lock;
extern atomic_b_t background_thread_enabled_state;
extern size_t n_background_threads;
extern size_t max_background_threads;
extern background_thread_info_t *background_thread_info;
bool background_thread_create(tsd_t *tsd, unsigned arena_ind);
bool background_threads_enable(tsd_t *tsd);
bool background_threads_disable(tsd_t *tsd);
bool background_thread_is_started(background_thread_info_t* info);
void background_thread_wakeup_early(background_thread_info_t *info,
nstime_t *remaining_sleep);
void background_thread_prefork0(tsdn_t *tsdn);
void background_thread_prefork1(tsdn_t *tsdn);
void background_thread_postfork_parent(tsdn_t *tsdn);
void background_thread_postfork_child(tsdn_t *tsdn);
bool background_thread_stats_read(tsdn_t *tsdn,
background_thread_stats_t *stats);
void background_thread_ctl_init(tsdn_t *tsdn);
#ifdef JEMALLOC_PTHREAD_CREATE_WRAPPER
extern int pthread_create_wrapper(pthread_t *__restrict, const pthread_attr_t *,
void *(*)(void *), void *__restrict);
#endif
bool background_thread_boot0(void);
bool background_thread_boot1(tsdn_t *tsdn, base_t *base);
#endif /* JEMALLOC_INTERNAL_BACKGROUND_THREAD_EXTERNS_H */ | c | github | https://github.com/redis/redis | deps/jemalloc/include/jemalloc/internal/background_thread_externs.h |
// Copyright 2015 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build ignore
// detect attempts to autodetect the correct
// values of the environment variables
// used by go_ios_exec.
// detect shells out to ideviceinfo, a third party program that can
// be obtained by following the instructions at
// https://github.com/libimobiledevice/libimobiledevice.
package main
import (
"bytes"
"crypto/x509"
"fmt"
"os"
"os/exec"
"strings"
)
func main() {
udids := getLines(exec.Command("idevice_id", "-l"))
if len(udids) == 0 {
fail("no udid found; is a device connected?")
}
mps := detectMobileProvisionFiles(udids)
if len(mps) == 0 {
fail("did not find mobile provision matching device udids %q", udids)
}
fmt.Println("# Available provisioning profiles below.")
fmt.Println("# NOTE: Any existing app on the device with the app id specified by GOIOS_APP_ID")
fmt.Println("# will be overwritten when running Go programs.")
for _, mp := range mps {
fmt.Println()
f, err := os.CreateTemp("", "go_ios_detect_")
check(err)
fname := f.Name()
defer os.Remove(fname)
out := output(parseMobileProvision(mp))
_, err = f.Write(out)
check(err)
check(f.Close())
cert, err := plistExtract(fname, "DeveloperCertificates:0")
check(err)
pcert, err := x509.ParseCertificate(cert)
check(err)
fmt.Printf("export GOIOS_DEV_ID=\"%s\"\n", pcert.Subject.CommonName)
appID, err := plistExtract(fname, "Entitlements:application-identifier")
check(err)
fmt.Printf("export GOIOS_APP_ID=%s\n", appID)
teamID, err := plistExtract(fname, "Entitlements:com.apple.developer.team-identifier")
check(err)
fmt.Printf("export GOIOS_TEAM_ID=%s\n", teamID)
}
}
func detectMobileProvisionFiles(udids [][]byte) []string {
cmd := exec.Command("mdfind", "-name", ".mobileprovision")
lines := getLines(cmd)
var files []string
for _, line := range lines {
if len(line) == 0 {
continue
}
xmlLines := getLines(parseMobileProvision(string(line)))
matches := 0
for _, udid := range udids {
for _, xmlLine := range xmlLines {
if bytes.Contains(xmlLine, udid) {
matches++
}
}
}
if matches == len(udids) {
files = append(files, string(line))
}
}
return files
}
func parseMobileProvision(fname string) *exec.Cmd {
return exec.Command("security", "cms", "-D", "-i", string(fname))
}
func plistExtract(fname string, path string) ([]byte, error) {
out, err := exec.Command("/usr/libexec/PlistBuddy", "-c", "Print "+path, fname).CombinedOutput()
if err != nil {
return nil, err
}
return bytes.TrimSpace(out), nil
}
func getLines(cmd *exec.Cmd) [][]byte {
out := output(cmd)
lines := bytes.Split(out, []byte("\n"))
// Skip the empty line at the end.
if len(lines[len(lines)-1]) == 0 {
lines = lines[:len(lines)-1]
}
return lines
}
func output(cmd *exec.Cmd) []byte {
out, err := cmd.Output()
if err != nil {
fmt.Println(strings.Join(cmd.Args, "\n"))
fmt.Fprintln(os.Stderr, err)
os.Exit(1)
}
return out
}
func check(err error) {
if err != nil {
fail(err.Error())
}
}
func fail(msg string, v ...interface{}) {
fmt.Fprintf(os.Stderr, msg, v...)
fmt.Fprintln(os.Stderr)
os.Exit(1)
} | go | github | https://github.com/golang/go | misc/ios/detect.go |
# Copyright 2018 Open Source Robotics Foundation, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Launch a add_two_ints_server and a (synchronous) add_two_ints_client."""
import launch
import launch_ros.actions
def generate_launch_description():
server = launch_ros.actions.Node(
package='demo_nodes_cpp', executable='add_two_ints_server', output='screen')
client = launch_ros.actions.Node(
package='demo_nodes_cpp', executable='add_two_ints_client', output='screen')
return launch.LaunchDescription([
server,
client,
# TODO(wjwwood): replace this with a `required=True|False` option on ExecuteProcess().
# Shutdown launch when client exits.
launch.actions.RegisterEventHandler(
event_handler=launch.event_handlers.OnProcessExit(
target_action=client,
on_exit=[launch.actions.EmitEvent(event=launch.events.Shutdown())],
)),
]) | unknown | codeparrot/codeparrot-clean | ||
import numpy as np
import pandas as pd
import matplotlib
import matplotlib.pyplot as plt
import plotly.offline as py
import plotly.graph_objs as go
from plotly.graph_objs import *
import plotly.tools as tls
import seaborn as sns
import plotly
plotly.offline.init_notebook_mode()
def plot_pollutants(df, year, state):
#split the date into three columns
df["year"], df["month"], df["day"] = zip(*df["Date Local"].apply(lambda x: x.split('-', 2)))
#multiindex dataframe with Year and State and groupby mean
df2 = df.groupby(['year', 'State']).mean()
#removed useless columns
del df2['State Code']
del df2['County Code']
del df2['Site Num']
del df2['Unnamed: 0']
#create a new dataframe with the users input
df3 = df2.loc[year, state]
df4 = df3.round(4)
# plot all levels of pollutants per Year and State
trace1 = go.Scatter(
x=df4.index[0:4], y=df4[0:4],
mode = 'markers',
marker=dict(
size='16',
colorscale='Viridis',
showscale=False
),
line=Line(
color='#FFD700',
width=3
),
name='NO2'
)
trace2 = go.Scatter(
x=df4.index[4:8], y=df4[4:8],
mode = 'markers',
marker=dict(
size='16',
colorscale='Viridis',
showscale=False
),
line=Line(
color='#C0C0C0',
width=3
),
name='O3'
)
trace3 = go.Scatter(
x=df4.index[8:12], y=df4[8:12],
mode = 'markers',
marker=dict(
size='16',
colorscale='Viridis',
showscale=False
),
line=Line(
color='#BA8651',
width=3
),
name='SO2'
)
trace4 = go.Scatter(
x=df4.index[12:16], y=df4[12:16],
mode = 'markers',
marker=dict(
size='16',
colorscale='Viridis',
showscale=False
),
line=Line(
color='#000000',
width=4
),
name='CO'
)
data = Data([ trace1, trace2, trace3, trace4])
layout = Layout(
title='Levels of pollutants in ' + state + ". " + "Year: " + year,
updatemenus=list([
dict(
x=-0.05,
y=1,
yanchor='top',
buttons=list([
dict(
args=['visible', [True, True, True, True]],
label='All',
method='restyle'
),
dict(
args=['visible', [True, False, False, False]],
label='NO2',
method='restyle'
),
dict(
args=['visible', [False, True, False, False]],
label='O3',
method='restyle'
),
dict(
args=['visible', [False, False, True, False]],
label='SO2',
method='restyle'
),
dict(
args=['visible', [False, False, False, True]],
label='CO',
method='restyle'
)
]),
)
]),
)
fig = Figure(data=data, layout=layout)
py.iplot(fig)
plotSuccessful= "Levels of pollutants plotted."
return fig, plotSuccessful | unknown | codeparrot/codeparrot-clean | ||
# Licensed to the StackStorm, Inc ('StackStorm') under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
This module contains common service setup and teardown code.
"""
from __future__ import absolute_import
import os
from oslo_config import cfg
from st2common import log as logging
from st2common.constants.logging import DEFAULT_LOGGING_CONF_PATH
from st2common.transport.bootstrap_utils import register_exchanges_with_retry
from st2common.signal_handlers import register_common_signal_handlers
from st2common.util.debugging import enable_debugging
from st2common.models.utils.profiling import enable_profiling
from st2common import triggers
from st2common.rbac.migrations import run_all as run_all_rbac_migrations
# Note: This is here for backward compatibility.
# Function has been moved in a standalone module to avoid expensive in-direct
# import costs
from st2common.database_setup import db_setup
from st2common.database_setup import db_teardown
__all__ = [
'setup',
'teardown',
'db_setup',
'db_teardown'
]
LOG = logging.getLogger(__name__)
def setup(service, config, setup_db=True, register_mq_exchanges=True,
register_signal_handlers=True, register_internal_trigger_types=False,
run_migrations=True, config_args=None):
"""
Common setup function.
Currently it performs the following operations:
1. Parses config and CLI arguments
2. Establishes DB connection
3. Set log level for all the loggers to DEBUG if --debug flag is present or
if system.debug config option is set to True.
4. Registers RabbitMQ exchanges
5. Registers common signal handlers
6. Register internal trigger types
:param service: Name of the service.
:param config: Config object to use to parse args.
"""
# Set up logger which logs everything which happens during and before config
# parsing to sys.stdout
logging.setup(DEFAULT_LOGGING_CONF_PATH, excludes=None)
# Parse args to setup config.
if config_args:
config.parse_args(config_args)
else:
config.parse_args()
config_file_paths = cfg.CONF.config_file
config_file_paths = [os.path.abspath(path) for path in config_file_paths]
LOG.debug('Using config files: %s', ','.join(config_file_paths))
# Setup logging.
logging_config_path = config.get_logging_config_path()
logging_config_path = os.path.abspath(logging_config_path)
LOG.debug('Using logging config: %s', logging_config_path)
logging.setup(logging_config_path, redirect_stderr=cfg.CONF.log.redirect_stderr,
excludes=cfg.CONF.log.excludes)
if cfg.CONF.debug or cfg.CONF.system.debug:
enable_debugging()
if cfg.CONF.profile:
enable_profiling()
# All other setup which requires config to be parsed and logging to
# be correctly setup.
if setup_db:
db_setup()
if register_mq_exchanges:
register_exchanges_with_retry()
if register_signal_handlers:
register_common_signal_handlers()
if register_internal_trigger_types:
triggers.register_internal_trigger_types()
# TODO: This is a "not so nice" workaround until we have a proper migration system in place
if run_migrations:
run_all_rbac_migrations()
def teardown():
"""
Common teardown function.
"""
db_teardown() | unknown | codeparrot/codeparrot-clean | ||
import sqlite3
con = sqlite3.connect(":memory:")
cur = con.cursor()
# Create the table
con.execute("create table person(lastname, firstname)")
AUSTRIA = "\xd6sterreich"
# by default, rows are returned as Unicode
cur.execute("select ?", (AUSTRIA,))
row = cur.fetchone()
assert row[0] == AUSTRIA
# but we can make sqlite3 always return bytestrings ...
con.text_factory = str
cur.execute("select ?", (AUSTRIA,))
row = cur.fetchone()
assert type(row[0]) == str
# the bytestrings will be encoded in UTF-8, unless you stored garbage in the
# database ...
assert row[0] == AUSTRIA.encode("utf-8")
# we can also implement a custom text_factory ...
# here we implement one that will ignore Unicode characters that cannot be
# decoded from UTF-8
con.text_factory = lambda x: str(x, "utf-8", "ignore")
cur.execute("select ?", ("this is latin1 and would normally create errors" +
"\xe4\xf6\xfc".encode("latin1"),))
row = cur.fetchone()
assert type(row[0]) == str
# sqlite3 offers a built-in optimized text_factory that will return bytestring
# objects, if the data is in ASCII only, and otherwise return unicode objects
con.text_factory = sqlite3.OptimizedUnicode
cur.execute("select ?", (AUSTRIA,))
row = cur.fetchone()
assert type(row[0]) == str
cur.execute("select ?", ("Germany",))
row = cur.fetchone()
assert type(row[0]) == str | unknown | codeparrot/codeparrot-clean | ||
# Module doctest.
# Released to the public domain 16-Jan-2001, by Tim Peters (tim@python.org).
# Major enhancements and refactoring by:
# Jim Fulton
# Edward Loper
# Provided as-is; use at your own risk; no warranty; no promises; enjoy!
r"""Module doctest -- a framework for running examples in docstrings.
In simplest use, end each module M to be tested with:
def _test():
import doctest
doctest.testmod()
if __name__ == "__main__":
_test()
Then running the module as a script will cause the examples in the
docstrings to get executed and verified:
python M.py
This won't display anything unless an example fails, in which case the
failing example(s) and the cause(s) of the failure(s) are printed to stdout
(why not stderr? because stderr is a lame hack <0.2 wink>), and the final
line of output is "Test failed.".
Run it with the -v switch instead:
python M.py -v
and a detailed report of all examples tried is printed to stdout, along
with assorted summaries at the end.
You can force verbose mode by passing "verbose=True" to testmod, or prohibit
it by passing "verbose=False". In either of those cases, sys.argv is not
examined by testmod.
There are a variety of other ways to run doctests, including integration
with the unittest framework, and support for running non-Python text
files containing doctests. There are also many ways to override parts
of doctest's default behaviors. See the Library Reference Manual for
details.
"""
__docformat__ = 'reStructuredText en'
__all__ = [
# 0, Option Flags
'register_optionflag',
'DONT_ACCEPT_TRUE_FOR_1',
'DONT_ACCEPT_BLANKLINE',
'NORMALIZE_WHITESPACE',
'ELLIPSIS',
'SKIP',
'IGNORE_EXCEPTION_DETAIL',
'COMPARISON_FLAGS',
'REPORT_UDIFF',
'REPORT_CDIFF',
'REPORT_NDIFF',
'REPORT_ONLY_FIRST_FAILURE',
'REPORTING_FLAGS',
'FAIL_FAST',
# 1. Utility Functions
# 2. Example & DocTest
'Example',
'DocTest',
# 3. Doctest Parser
'DocTestParser',
# 4. Doctest Finder
'DocTestFinder',
# 5. Doctest Runner
'DocTestRunner',
'OutputChecker',
'DocTestFailure',
'UnexpectedException',
'DebugRunner',
# 6. Test Functions
'testmod',
'testfile',
'run_docstring_examples',
# 7. Unittest Support
'DocTestSuite',
'DocFileSuite',
'set_unittest_reportflags',
# 8. Debugging Support
'script_from_examples',
'testsource',
'debug_src',
'debug',
]
import __future__
import argparse
import difflib
import inspect
import linecache
import os
import pdb
import re
import sys
import traceback
import unittest
from io import StringIO
from collections import namedtuple
TestResults = namedtuple('TestResults', 'failed attempted')
# There are 4 basic classes:
# - Example: a <source, want> pair, plus an intra-docstring line number.
# - DocTest: a collection of examples, parsed from a docstring, plus
# info about where the docstring came from (name, filename, lineno).
# - DocTestFinder: extracts DocTests from a given object's docstring and
# its contained objects' docstrings.
# - DocTestRunner: runs DocTest cases, and accumulates statistics.
#
# So the basic picture is:
#
# list of:
# +------+ +---------+ +-------+
# |object| --DocTestFinder-> | DocTest | --DocTestRunner-> |results|
# +------+ +---------+ +-------+
# | Example |
# | ... |
# | Example |
# +---------+
# Option constants.
OPTIONFLAGS_BY_NAME = {}
def register_optionflag(name):
# Create a new flag unless `name` is already known.
return OPTIONFLAGS_BY_NAME.setdefault(name, 1 << len(OPTIONFLAGS_BY_NAME))
DONT_ACCEPT_TRUE_FOR_1 = register_optionflag('DONT_ACCEPT_TRUE_FOR_1')
DONT_ACCEPT_BLANKLINE = register_optionflag('DONT_ACCEPT_BLANKLINE')
NORMALIZE_WHITESPACE = register_optionflag('NORMALIZE_WHITESPACE')
ELLIPSIS = register_optionflag('ELLIPSIS')
SKIP = register_optionflag('SKIP')
IGNORE_EXCEPTION_DETAIL = register_optionflag('IGNORE_EXCEPTION_DETAIL')
COMPARISON_FLAGS = (DONT_ACCEPT_TRUE_FOR_1 |
DONT_ACCEPT_BLANKLINE |
NORMALIZE_WHITESPACE |
ELLIPSIS |
SKIP |
IGNORE_EXCEPTION_DETAIL)
REPORT_UDIFF = register_optionflag('REPORT_UDIFF')
REPORT_CDIFF = register_optionflag('REPORT_CDIFF')
REPORT_NDIFF = register_optionflag('REPORT_NDIFF')
REPORT_ONLY_FIRST_FAILURE = register_optionflag('REPORT_ONLY_FIRST_FAILURE')
FAIL_FAST = register_optionflag('FAIL_FAST')
REPORTING_FLAGS = (REPORT_UDIFF |
REPORT_CDIFF |
REPORT_NDIFF |
REPORT_ONLY_FIRST_FAILURE |
FAIL_FAST)
# Special string markers for use in `want` strings:
BLANKLINE_MARKER = '<BLANKLINE>'
ELLIPSIS_MARKER = '...'
######################################################################
## Table of Contents
######################################################################
# 1. Utility Functions
# 2. Example & DocTest -- store test cases
# 3. DocTest Parser -- extracts examples from strings
# 4. DocTest Finder -- extracts test cases from objects
# 5. DocTest Runner -- runs test cases
# 6. Test Functions -- convenient wrappers for testing
# 7. Unittest Support
# 8. Debugging Support
# 9. Example Usage
######################################################################
## 1. Utility Functions
######################################################################
def _extract_future_flags(globs):
"""
Return the compiler-flags associated with the future features that
have been imported into the given namespace (globs).
"""
flags = 0
for fname in __future__.all_feature_names:
feature = globs.get(fname, None)
if feature is getattr(__future__, fname):
flags |= feature.compiler_flag
return flags
def _normalize_module(module, depth=2):
"""
Return the module specified by `module`. In particular:
- If `module` is a module, then return module.
- If `module` is a string, then import and return the
module with that name.
- If `module` is None, then return the calling module.
The calling module is assumed to be the module of
the stack frame at the given depth in the call stack.
"""
if inspect.ismodule(module):
return module
elif isinstance(module, str):
return __import__(module, globals(), locals(), ["*"])
elif module is None:
return sys.modules[sys._getframe(depth).f_globals['__name__']]
else:
raise TypeError("Expected a module, string, or None")
def _load_testfile(filename, package, module_relative, encoding):
if module_relative:
package = _normalize_module(package, 3)
filename = _module_relative_path(package, filename)
if getattr(package, '__loader__', None) is not None:
if hasattr(package.__loader__, 'get_data'):
file_contents = package.__loader__.get_data(filename)
file_contents = file_contents.decode(encoding)
# get_data() opens files as 'rb', so one must do the equivalent
# conversion as universal newlines would do.
return file_contents.replace(os.linesep, '\n'), filename
with open(filename, encoding=encoding) as f:
return f.read(), filename
def _indent(s, indent=4):
"""
Add the given number of space characters to the beginning of
every non-blank line in `s`, and return the result.
"""
# This regexp matches the start of non-blank lines:
return re.sub('(?m)^(?!$)', indent*' ', s)
def _exception_traceback(exc_info):
"""
Return a string containing a traceback message for the given
exc_info tuple (as returned by sys.exc_info()).
"""
# Get a traceback message.
excout = StringIO()
exc_type, exc_val, exc_tb = exc_info
traceback.print_exception(exc_type, exc_val, exc_tb, file=excout)
return excout.getvalue()
# Override some StringIO methods.
class _SpoofOut(StringIO):
def getvalue(self):
result = StringIO.getvalue(self)
# If anything at all was written, make sure there's a trailing
# newline. There's no way for the expected output to indicate
# that a trailing newline is missing.
if result and not result.endswith("\n"):
result += "\n"
return result
def truncate(self, size=None):
self.seek(size)
StringIO.truncate(self)
# Worst-case linear-time ellipsis matching.
def _ellipsis_match(want, got):
"""
Essentially the only subtle case:
>>> _ellipsis_match('aa...aa', 'aaa')
False
"""
if ELLIPSIS_MARKER not in want:
return want == got
# Find "the real" strings.
ws = want.split(ELLIPSIS_MARKER)
assert len(ws) >= 2
# Deal with exact matches possibly needed at one or both ends.
startpos, endpos = 0, len(got)
w = ws[0]
if w: # starts with exact match
if got.startswith(w):
startpos = len(w)
del ws[0]
else:
return False
w = ws[-1]
if w: # ends with exact match
if got.endswith(w):
endpos -= len(w)
del ws[-1]
else:
return False
if startpos > endpos:
# Exact end matches required more characters than we have, as in
# _ellipsis_match('aa...aa', 'aaa')
return False
# For the rest, we only need to find the leftmost non-overlapping
# match for each piece. If there's no overall match that way alone,
# there's no overall match period.
for w in ws:
# w may be '' at times, if there are consecutive ellipses, or
# due to an ellipsis at the start or end of `want`. That's OK.
# Search for an empty string succeeds, and doesn't change startpos.
startpos = got.find(w, startpos, endpos)
if startpos < 0:
return False
startpos += len(w)
return True
def _comment_line(line):
"Return a commented form of the given line"
line = line.rstrip()
if line:
return '# '+line
else:
return '#'
def _strip_exception_details(msg):
# Support for IGNORE_EXCEPTION_DETAIL.
# Get rid of everything except the exception name; in particular, drop
# the possibly dotted module path (if any) and the exception message (if
# any). We assume that a colon is never part of a dotted name, or of an
# exception name.
# E.g., given
# "foo.bar.MyError: la di da"
# return "MyError"
# Or for "abc.def" or "abc.def:\n" return "def".
start, end = 0, len(msg)
# The exception name must appear on the first line.
i = msg.find("\n")
if i >= 0:
end = i
# retain up to the first colon (if any)
i = msg.find(':', 0, end)
if i >= 0:
end = i
# retain just the exception name
i = msg.rfind('.', 0, end)
if i >= 0:
start = i+1
return msg[start: end]
class _OutputRedirectingPdb(pdb.Pdb):
"""
A specialized version of the python debugger that redirects stdout
to a given stream when interacting with the user. Stdout is *not*
redirected when traced code is executed.
"""
def __init__(self, out):
self.__out = out
self.__debugger_used = False
# do not play signal games in the pdb
pdb.Pdb.__init__(self, stdout=out, nosigint=True)
# still use input() to get user input
self.use_rawinput = 1
def set_trace(self, frame=None):
self.__debugger_used = True
if frame is None:
frame = sys._getframe().f_back
pdb.Pdb.set_trace(self, frame)
def set_continue(self):
# Calling set_continue unconditionally would break unit test
# coverage reporting, as Bdb.set_continue calls sys.settrace(None).
if self.__debugger_used:
pdb.Pdb.set_continue(self)
def trace_dispatch(self, *args):
# Redirect stdout to the given stream.
save_stdout = sys.stdout
sys.stdout = self.__out
# Call Pdb's trace dispatch method.
try:
return pdb.Pdb.trace_dispatch(self, *args)
finally:
sys.stdout = save_stdout
# [XX] Normalize with respect to os.path.pardir?
def _module_relative_path(module, path):
if not inspect.ismodule(module):
raise TypeError('Expected a module: %r' % module)
if path.startswith('/'):
raise ValueError('Module-relative files may not have absolute paths')
# Find the base directory for the path.
if hasattr(module, '__file__'):
# A normal module/package
basedir = os.path.split(module.__file__)[0]
elif module.__name__ == '__main__':
# An interactive session.
if len(sys.argv)>0 and sys.argv[0] != '':
basedir = os.path.split(sys.argv[0])[0]
else:
basedir = os.curdir
else:
# A module w/o __file__ (this includes builtins)
raise ValueError("Can't resolve paths relative to the module " +
module + " (it has no __file__)")
# Combine the base directory and the path.
return os.path.join(basedir, *(path.split('/')))
######################################################################
## 2. Example & DocTest
######################################################################
## - An "example" is a <source, want> pair, where "source" is a
## fragment of source code, and "want" is the expected output for
## "source." The Example class also includes information about
## where the example was extracted from.
##
## - A "doctest" is a collection of examples, typically extracted from
## a string (such as an object's docstring). The DocTest class also
## includes information about where the string was extracted from.
class Example:
"""
A single doctest example, consisting of source code and expected
output. `Example` defines the following attributes:
- source: A single Python statement, always ending with a newline.
The constructor adds a newline if needed.
- want: The expected output from running the source code (either
from stdout, or a traceback in case of exception). `want` ends
with a newline unless it's empty, in which case it's an empty
string. The constructor adds a newline if needed.
- exc_msg: The exception message generated by the example, if
the example is expected to generate an exception; or `None` if
it is not expected to generate an exception. This exception
message is compared against the return value of
`traceback.format_exception_only()`. `exc_msg` ends with a
newline unless it's `None`. The constructor adds a newline
if needed.
- lineno: The line number within the DocTest string containing
this Example where the Example begins. This line number is
zero-based, with respect to the beginning of the DocTest.
- indent: The example's indentation in the DocTest string.
I.e., the number of space characters that precede the
example's first prompt.
- options: A dictionary mapping from option flags to True or
False, which is used to override default options for this
example. Any option flags not contained in this dictionary
are left at their default value (as specified by the
DocTestRunner's optionflags). By default, no options are set.
"""
def __init__(self, source, want, exc_msg=None, lineno=0, indent=0,
options=None):
# Normalize inputs.
if not source.endswith('\n'):
source += '\n'
if want and not want.endswith('\n'):
want += '\n'
if exc_msg is not None and not exc_msg.endswith('\n'):
exc_msg += '\n'
# Store properties.
self.source = source
self.want = want
self.lineno = lineno
self.indent = indent
if options is None: options = {}
self.options = options
self.exc_msg = exc_msg
def __eq__(self, other):
if type(self) is not type(other):
return NotImplemented
return self.source == other.source and \
self.want == other.want and \
self.lineno == other.lineno and \
self.indent == other.indent and \
self.options == other.options and \
self.exc_msg == other.exc_msg
def __hash__(self):
return hash((self.source, self.want, self.lineno, self.indent,
self.exc_msg))
class DocTest:
"""
A collection of doctest examples that should be run in a single
namespace. Each `DocTest` defines the following attributes:
- examples: the list of examples.
- globs: The namespace (aka globals) that the examples should
be run in.
- name: A name identifying the DocTest (typically, the name of
the object whose docstring this DocTest was extracted from).
- filename: The name of the file that this DocTest was extracted
from, or `None` if the filename is unknown.
- lineno: The line number within filename where this DocTest
begins, or `None` if the line number is unavailable. This
line number is zero-based, with respect to the beginning of
the file.
- docstring: The string that the examples were extracted from,
or `None` if the string is unavailable.
"""
def __init__(self, examples, globs, name, filename, lineno, docstring):
"""
Create a new DocTest containing the given examples. The
DocTest's globals are initialized with a copy of `globs`.
"""
assert not isinstance(examples, str), \
"DocTest no longer accepts str; use DocTestParser instead"
self.examples = examples
self.docstring = docstring
self.globs = globs.copy()
self.name = name
self.filename = filename
self.lineno = lineno
def __repr__(self):
if len(self.examples) == 0:
examples = 'no examples'
elif len(self.examples) == 1:
examples = '1 example'
else:
examples = '%d examples' % len(self.examples)
return ('<DocTest %s from %s:%s (%s)>' %
(self.name, self.filename, self.lineno, examples))
def __eq__(self, other):
if type(self) is not type(other):
return NotImplemented
return self.examples == other.examples and \
self.docstring == other.docstring and \
self.globs == other.globs and \
self.name == other.name and \
self.filename == other.filename and \
self.lineno == other.lineno
def __hash__(self):
return hash((self.docstring, self.name, self.filename, self.lineno))
# This lets us sort tests by name:
def __lt__(self, other):
if not isinstance(other, DocTest):
return NotImplemented
return ((self.name, self.filename, self.lineno, id(self))
<
(other.name, other.filename, other.lineno, id(other)))
######################################################################
## 3. DocTestParser
######################################################################
class DocTestParser:
"""
A class used to parse strings containing doctest examples.
"""
# This regular expression is used to find doctest examples in a
# string. It defines three groups: `source` is the source code
# (including leading indentation and prompts); `indent` is the
# indentation of the first (PS1) line of the source code; and
# `want` is the expected output (including leading indentation).
_EXAMPLE_RE = re.compile(r'''
# Source consists of a PS1 line followed by zero or more PS2 lines.
(?P<source>
(?:^(?P<indent> [ ]*) >>> .*) # PS1 line
(?:\n [ ]* \.\.\. .*)*) # PS2 lines
\n?
# Want consists of any non-blank lines that do not start with PS1.
(?P<want> (?:(?![ ]*$) # Not a blank line
(?![ ]*>>>) # Not a line starting with PS1
.+$\n? # But any other line
)*)
''', re.MULTILINE | re.VERBOSE)
# A regular expression for handling `want` strings that contain
# expected exceptions. It divides `want` into three pieces:
# - the traceback header line (`hdr`)
# - the traceback stack (`stack`)
# - the exception message (`msg`), as generated by
# traceback.format_exception_only()
# `msg` may have multiple lines. We assume/require that the
# exception message is the first non-indented line starting with a word
# character following the traceback header line.
_EXCEPTION_RE = re.compile(r"""
# Grab the traceback header. Different versions of Python have
# said different things on the first traceback line.
^(?P<hdr> Traceback\ \(
(?: most\ recent\ call\ last
| innermost\ last
) \) :
)
\s* $ # toss trailing whitespace on the header.
(?P<stack> .*?) # don't blink: absorb stuff until...
^ (?P<msg> \w+ .*) # a line *starts* with alphanum.
""", re.VERBOSE | re.MULTILINE | re.DOTALL)
# A callable returning a true value iff its argument is a blank line
# or contains a single comment.
_IS_BLANK_OR_COMMENT = re.compile(r'^[ ]*(#.*)?$').match
def parse(self, string, name='<string>'):
"""
Divide the given string into examples and intervening text,
and return them as a list of alternating Examples and strings.
Line numbers for the Examples are 0-based. The optional
argument `name` is a name identifying this string, and is only
used for error messages.
"""
string = string.expandtabs()
# If all lines begin with the same indentation, then strip it.
min_indent = self._min_indent(string)
if min_indent > 0:
string = '\n'.join([l[min_indent:] for l in string.split('\n')])
output = []
charno, lineno = 0, 0
# Find all doctest examples in the string:
for m in self._EXAMPLE_RE.finditer(string):
# Add the pre-example text to `output`.
output.append(string[charno:m.start()])
# Update lineno (lines before this example)
lineno += string.count('\n', charno, m.start())
# Extract info from the regexp match.
(source, options, want, exc_msg) = \
self._parse_example(m, name, lineno)
# Create an Example, and add it to the list.
if not self._IS_BLANK_OR_COMMENT(source):
output.append( Example(source, want, exc_msg,
lineno=lineno,
indent=min_indent+len(m.group('indent')),
options=options) )
# Update lineno (lines inside this example)
lineno += string.count('\n', m.start(), m.end())
# Update charno.
charno = m.end()
# Add any remaining post-example text to `output`.
output.append(string[charno:])
return output
def get_doctest(self, string, globs, name, filename, lineno):
"""
Extract all doctest examples from the given string, and
collect them into a `DocTest` object.
`globs`, `name`, `filename`, and `lineno` are attributes for
the new `DocTest` object. See the documentation for `DocTest`
for more information.
"""
return DocTest(self.get_examples(string, name), globs,
name, filename, lineno, string)
def get_examples(self, string, name='<string>'):
"""
Extract all doctest examples from the given string, and return
them as a list of `Example` objects. Line numbers are
0-based, because it's most common in doctests that nothing
interesting appears on the same line as opening triple-quote,
and so the first interesting line is called \"line 1\" then.
The optional argument `name` is a name identifying this
string, and is only used for error messages.
"""
return [x for x in self.parse(string, name)
if isinstance(x, Example)]
def _parse_example(self, m, name, lineno):
"""
Given a regular expression match from `_EXAMPLE_RE` (`m`),
return a pair `(source, want)`, where `source` is the matched
example's source code (with prompts and indentation stripped);
and `want` is the example's expected output (with indentation
stripped).
`name` is the string's name, and `lineno` is the line number
where the example starts; both are used for error messages.
"""
# Get the example's indentation level.
indent = len(m.group('indent'))
# Divide source into lines; check that they're properly
# indented; and then strip their indentation & prompts.
source_lines = m.group('source').split('\n')
self._check_prompt_blank(source_lines, indent, name, lineno)
self._check_prefix(source_lines[1:], ' '*indent + '.', name, lineno)
source = '\n'.join([sl[indent+4:] for sl in source_lines])
# Divide want into lines; check that it's properly indented; and
# then strip the indentation. Spaces before the last newline should
# be preserved, so plain rstrip() isn't good enough.
want = m.group('want')
want_lines = want.split('\n')
if len(want_lines) > 1 and re.match(r' *$', want_lines[-1]):
del want_lines[-1] # forget final newline & spaces after it
self._check_prefix(want_lines, ' '*indent, name,
lineno + len(source_lines))
want = '\n'.join([wl[indent:] for wl in want_lines])
# If `want` contains a traceback message, then extract it.
m = self._EXCEPTION_RE.match(want)
if m:
exc_msg = m.group('msg')
else:
exc_msg = None
# Extract options from the source.
options = self._find_options(source, name, lineno)
return source, options, want, exc_msg
# This regular expression looks for option directives in the
# source code of an example. Option directives are comments
# starting with "doctest:". Warning: this may give false
# positives for string-literals that contain the string
# "#doctest:". Eliminating these false positives would require
# actually parsing the string; but we limit them by ignoring any
# line containing "#doctest:" that is *followed* by a quote mark.
_OPTION_DIRECTIVE_RE = re.compile(r'#\s*doctest:\s*([^\n\'"]*)$',
re.MULTILINE)
def _find_options(self, source, name, lineno):
"""
Return a dictionary containing option overrides extracted from
option directives in the given source string.
`name` is the string's name, and `lineno` is the line number
where the example starts; both are used for error messages.
"""
options = {}
# (note: with the current regexp, this will match at most once:)
for m in self._OPTION_DIRECTIVE_RE.finditer(source):
option_strings = m.group(1).replace(',', ' ').split()
for option in option_strings:
if (option[0] not in '+-' or
option[1:] not in OPTIONFLAGS_BY_NAME):
raise ValueError('line %r of the doctest for %s '
'has an invalid option: %r' %
(lineno+1, name, option))
flag = OPTIONFLAGS_BY_NAME[option[1:]]
options[flag] = (option[0] == '+')
if options and self._IS_BLANK_OR_COMMENT(source):
raise ValueError('line %r of the doctest for %s has an option '
'directive on a line with no example: %r' %
(lineno, name, source))
return options
# This regular expression finds the indentation of every non-blank
# line in a string.
_INDENT_RE = re.compile('^([ ]*)(?=\S)', re.MULTILINE)
def _min_indent(self, s):
"Return the minimum indentation of any non-blank line in `s`"
indents = [len(indent) for indent in self._INDENT_RE.findall(s)]
if len(indents) > 0:
return min(indents)
else:
return 0
def _check_prompt_blank(self, lines, indent, name, lineno):
"""
Given the lines of a source string (including prompts and
leading indentation), check to make sure that every prompt is
followed by a space character. If any line is not followed by
a space character, then raise ValueError.
"""
for i, line in enumerate(lines):
if len(line) >= indent+4 and line[indent+3] != ' ':
raise ValueError('line %r of the docstring for %s '
'lacks blank after %s: %r' %
(lineno+i+1, name,
line[indent:indent+3], line))
def _check_prefix(self, lines, prefix, name, lineno):
"""
Check that every line in the given list starts with the given
prefix; if any line does not, then raise a ValueError.
"""
for i, line in enumerate(lines):
if line and not line.startswith(prefix):
raise ValueError('line %r of the docstring for %s has '
'inconsistent leading whitespace: %r' %
(lineno+i+1, name, line))
######################################################################
## 4. DocTest Finder
######################################################################
class DocTestFinder:
"""
A class used to extract the DocTests that are relevant to a given
object, from its docstring and the docstrings of its contained
objects. Doctests can currently be extracted from the following
object types: modules, functions, classes, methods, staticmethods,
classmethods, and properties.
"""
def __init__(self, verbose=False, parser=DocTestParser(),
recurse=True, exclude_empty=True):
"""
Create a new doctest finder.
The optional argument `parser` specifies a class or
function that should be used to create new DocTest objects (or
objects that implement the same interface as DocTest). The
signature for this factory function should match the signature
of the DocTest constructor.
If the optional argument `recurse` is false, then `find` will
only examine the given object, and not any contained objects.
If the optional argument `exclude_empty` is false, then `find`
will include tests for objects with empty docstrings.
"""
self._parser = parser
self._verbose = verbose
self._recurse = recurse
self._exclude_empty = exclude_empty
def find(self, obj, name=None, module=None, globs=None, extraglobs=None):
"""
Return a list of the DocTests that are defined by the given
object's docstring, or by any of its contained objects'
docstrings.
The optional parameter `module` is the module that contains
the given object. If the module is not specified or is None, then
the test finder will attempt to automatically determine the
correct module. The object's module is used:
- As a default namespace, if `globs` is not specified.
- To prevent the DocTestFinder from extracting DocTests
from objects that are imported from other modules.
- To find the name of the file containing the object.
- To help find the line number of the object within its
file.
Contained objects whose module does not match `module` are ignored.
If `module` is False, no attempt to find the module will be made.
This is obscure, of use mostly in tests: if `module` is False, or
is None but cannot be found automatically, then all objects are
considered to belong to the (non-existent) module, so all contained
objects will (recursively) be searched for doctests.
The globals for each DocTest is formed by combining `globs`
and `extraglobs` (bindings in `extraglobs` override bindings
in `globs`). A new copy of the globals dictionary is created
for each DocTest. If `globs` is not specified, then it
defaults to the module's `__dict__`, if specified, or {}
otherwise. If `extraglobs` is not specified, then it defaults
to {}.
"""
# If name was not specified, then extract it from the object.
if name is None:
name = getattr(obj, '__name__', None)
if name is None:
raise ValueError("DocTestFinder.find: name must be given "
"when obj.__name__ doesn't exist: %r" %
(type(obj),))
# Find the module that contains the given object (if obj is
# a module, then module=obj.). Note: this may fail, in which
# case module will be None.
if module is False:
module = None
elif module is None:
module = inspect.getmodule(obj)
# Read the module's source code. This is used by
# DocTestFinder._find_lineno to find the line number for a
# given object's docstring.
try:
file = inspect.getsourcefile(obj)
except TypeError:
source_lines = None
else:
if not file:
# Check to see if it's one of our special internal "files"
# (see __patched_linecache_getlines).
file = inspect.getfile(obj)
if not file[0]+file[-2:] == '<]>': file = None
if file is None:
source_lines = None
else:
if module is not None:
# Supply the module globals in case the module was
# originally loaded via a PEP 302 loader and
# file is not a valid filesystem path
source_lines = linecache.getlines(file, module.__dict__)
else:
# No access to a loader, so assume it's a normal
# filesystem path
source_lines = linecache.getlines(file)
if not source_lines:
source_lines = None
# Initialize globals, and merge in extraglobs.
if globs is None:
if module is None:
globs = {}
else:
globs = module.__dict__.copy()
else:
globs = globs.copy()
if extraglobs is not None:
globs.update(extraglobs)
if '__name__' not in globs:
globs['__name__'] = '__main__' # provide a default module name
# Recursively explore `obj`, extracting DocTests.
tests = []
self._find(tests, obj, name, module, source_lines, globs, {})
# Sort the tests by alpha order of names, for consistency in
# verbose-mode output. This was a feature of doctest in Pythons
# <= 2.3 that got lost by accident in 2.4. It was repaired in
# 2.4.4 and 2.5.
tests.sort()
return tests
def _from_module(self, module, object):
"""
Return true if the given object is defined in the given
module.
"""
if module is None:
return True
elif inspect.getmodule(object) is not None:
return module is inspect.getmodule(object)
elif inspect.isfunction(object):
return module.__dict__ is object.__globals__
elif inspect.ismethoddescriptor(object):
if hasattr(object, '__objclass__'):
obj_mod = object.__objclass__.__module__
elif hasattr(object, '__module__'):
obj_mod = object.__module__
else:
return True # [XX] no easy way to tell otherwise
return module.__name__ == obj_mod
elif inspect.isclass(object):
return module.__name__ == object.__module__
elif hasattr(object, '__module__'):
return module.__name__ == object.__module__
elif isinstance(object, property):
return True # [XX] no way not be sure.
else:
raise ValueError("object must be a class or function")
def _find(self, tests, obj, name, module, source_lines, globs, seen):
"""
Find tests for the given object and any contained objects, and
add them to `tests`.
"""
if self._verbose:
print('Finding tests in %s' % name)
# If we've already processed this object, then ignore it.
if id(obj) in seen:
return
seen[id(obj)] = 1
# Find a test for this object, and add it to the list of tests.
test = self._get_test(obj, name, module, globs, source_lines)
if test is not None:
tests.append(test)
# Look for tests in a module's contained objects.
if inspect.ismodule(obj) and self._recurse:
for valname, val in obj.__dict__.items():
valname = '%s.%s' % (name, valname)
# Recurse to functions & classes.
if ((inspect.isroutine(val) or inspect.isclass(val)) and
self._from_module(module, val)):
self._find(tests, val, valname, module, source_lines,
globs, seen)
# Look for tests in a module's __test__ dictionary.
if inspect.ismodule(obj) and self._recurse:
for valname, val in getattr(obj, '__test__', {}).items():
if not isinstance(valname, str):
raise ValueError("DocTestFinder.find: __test__ keys "
"must be strings: %r" %
(type(valname),))
if not (inspect.isroutine(val) or inspect.isclass(val) or
inspect.ismodule(val) or isinstance(val, str)):
raise ValueError("DocTestFinder.find: __test__ values "
"must be strings, functions, methods, "
"classes, or modules: %r" %
(type(val),))
valname = '%s.__test__.%s' % (name, valname)
self._find(tests, val, valname, module, source_lines,
globs, seen)
# Look for tests in a class's contained objects.
if inspect.isclass(obj) and self._recurse:
for valname, val in obj.__dict__.items():
# Special handling for staticmethod/classmethod.
if isinstance(val, staticmethod):
val = getattr(obj, valname)
if isinstance(val, classmethod):
val = getattr(obj, valname).__func__
# Recurse to methods, properties, and nested classes.
if ((inspect.isroutine(val) or inspect.isclass(val) or
isinstance(val, property)) and
self._from_module(module, val)):
valname = '%s.%s' % (name, valname)
self._find(tests, val, valname, module, source_lines,
globs, seen)
def _get_test(self, obj, name, module, globs, source_lines):
"""
Return a DocTest for the given object, if it defines a docstring;
otherwise, return None.
"""
# Extract the object's docstring. If it doesn't have one,
# then return None (no test for this object).
if isinstance(obj, str):
docstring = obj
else:
try:
if obj.__doc__ is None:
docstring = ''
else:
docstring = obj.__doc__
if not isinstance(docstring, str):
docstring = str(docstring)
except (TypeError, AttributeError):
docstring = ''
# Find the docstring's location in the file.
lineno = self._find_lineno(obj, source_lines)
# Don't bother if the docstring is empty.
if self._exclude_empty and not docstring:
return None
# Return a DocTest for this object.
if module is None:
filename = None
else:
filename = getattr(module, '__file__', module.__name__)
if filename[-4:] in (".pyc", ".pyo"):
filename = filename[:-1]
return self._parser.get_doctest(docstring, globs, name,
filename, lineno)
def _find_lineno(self, obj, source_lines):
"""
Return a line number of the given object's docstring. Note:
this method assumes that the object has a docstring.
"""
lineno = None
# Find the line number for modules.
if inspect.ismodule(obj):
lineno = 0
# Find the line number for classes.
# Note: this could be fooled if a class is defined multiple
# times in a single file.
if inspect.isclass(obj):
if source_lines is None:
return None
pat = re.compile(r'^\s*class\s*%s\b' %
getattr(obj, '__name__', '-'))
for i, line in enumerate(source_lines):
if pat.match(line):
lineno = i
break
# Find the line number for functions & methods.
if inspect.ismethod(obj): obj = obj.__func__
if inspect.isfunction(obj): obj = obj.__code__
if inspect.istraceback(obj): obj = obj.tb_frame
if inspect.isframe(obj): obj = obj.f_code
if inspect.iscode(obj):
lineno = getattr(obj, 'co_firstlineno', None)-1
# Find the line number where the docstring starts. Assume
# that it's the first line that begins with a quote mark.
# Note: this could be fooled by a multiline function
# signature, where a continuation line begins with a quote
# mark.
if lineno is not None:
if source_lines is None:
return lineno+1
pat = re.compile('(^|.*:)\s*\w*("|\')')
for lineno in range(lineno, len(source_lines)):
if pat.match(source_lines[lineno]):
return lineno
# We couldn't find the line number.
return None
######################################################################
## 5. DocTest Runner
######################################################################
class DocTestRunner:
"""
A class used to run DocTest test cases, and accumulate statistics.
The `run` method is used to process a single DocTest case. It
returns a tuple `(f, t)`, where `t` is the number of test cases
tried, and `f` is the number of test cases that failed.
>>> tests = DocTestFinder().find(_TestClass)
>>> runner = DocTestRunner(verbose=False)
>>> tests.sort(key = lambda test: test.name)
>>> for test in tests:
... print(test.name, '->', runner.run(test))
_TestClass -> TestResults(failed=0, attempted=2)
_TestClass.__init__ -> TestResults(failed=0, attempted=2)
_TestClass.get -> TestResults(failed=0, attempted=2)
_TestClass.square -> TestResults(failed=0, attempted=1)
The `summarize` method prints a summary of all the test cases that
have been run by the runner, and returns an aggregated `(f, t)`
tuple:
>>> runner.summarize(verbose=1)
4 items passed all tests:
2 tests in _TestClass
2 tests in _TestClass.__init__
2 tests in _TestClass.get
1 tests in _TestClass.square
7 tests in 4 items.
7 passed and 0 failed.
Test passed.
TestResults(failed=0, attempted=7)
The aggregated number of tried examples and failed examples is
also available via the `tries` and `failures` attributes:
>>> runner.tries
7
>>> runner.failures
0
The comparison between expected outputs and actual outputs is done
by an `OutputChecker`. This comparison may be customized with a
number of option flags; see the documentation for `testmod` for
more information. If the option flags are insufficient, then the
comparison may also be customized by passing a subclass of
`OutputChecker` to the constructor.
The test runner's display output can be controlled in two ways.
First, an output function (`out) can be passed to
`TestRunner.run`; this function will be called with strings that
should be displayed. It defaults to `sys.stdout.write`. If
capturing the output is not sufficient, then the display output
can be also customized by subclassing DocTestRunner, and
overriding the methods `report_start`, `report_success`,
`report_unexpected_exception`, and `report_failure`.
"""
# This divider string is used to separate failure messages, and to
# separate sections of the summary.
DIVIDER = "*" * 70
def __init__(self, checker=None, verbose=None, optionflags=0):
"""
Create a new test runner.
Optional keyword arg `checker` is the `OutputChecker` that
should be used to compare the expected outputs and actual
outputs of doctest examples.
Optional keyword arg 'verbose' prints lots of stuff if true,
only failures if false; by default, it's true iff '-v' is in
sys.argv.
Optional argument `optionflags` can be used to control how the
test runner compares expected output to actual output, and how
it displays failures. See the documentation for `testmod` for
more information.
"""
self._checker = checker or OutputChecker()
if verbose is None:
verbose = '-v' in sys.argv
self._verbose = verbose
self.optionflags = optionflags
self.original_optionflags = optionflags
# Keep track of the examples we've run.
self.tries = 0
self.failures = 0
self._name2ft = {}
# Create a fake output target for capturing doctest output.
self._fakeout = _SpoofOut()
#/////////////////////////////////////////////////////////////////
# Reporting methods
#/////////////////////////////////////////////////////////////////
def report_start(self, out, test, example):
"""
Report that the test runner is about to process the given
example. (Only displays a message if verbose=True)
"""
if self._verbose:
if example.want:
out('Trying:\n' + _indent(example.source) +
'Expecting:\n' + _indent(example.want))
else:
out('Trying:\n' + _indent(example.source) +
'Expecting nothing\n')
def report_success(self, out, test, example, got):
"""
Report that the given example ran successfully. (Only
displays a message if verbose=True)
"""
if self._verbose:
out("ok\n")
def report_failure(self, out, test, example, got):
"""
Report that the given example failed.
"""
out(self._failure_header(test, example) +
self._checker.output_difference(example, got, self.optionflags))
def report_unexpected_exception(self, out, test, example, exc_info):
"""
Report that the given example raised an unexpected exception.
"""
out(self._failure_header(test, example) +
'Exception raised:\n' + _indent(_exception_traceback(exc_info)))
def _failure_header(self, test, example):
out = [self.DIVIDER]
if test.filename:
if test.lineno is not None and example.lineno is not None:
lineno = test.lineno + example.lineno + 1
else:
lineno = '?'
out.append('File "%s", line %s, in %s' %
(test.filename, lineno, test.name))
else:
out.append('Line %s, in %s' % (example.lineno+1, test.name))
out.append('Failed example:')
source = example.source
out.append(_indent(source))
return '\n'.join(out)
#/////////////////////////////////////////////////////////////////
# DocTest Running
#/////////////////////////////////////////////////////////////////
def __run(self, test, compileflags, out):
"""
Run the examples in `test`. Write the outcome of each example
with one of the `DocTestRunner.report_*` methods, using the
writer function `out`. `compileflags` is the set of compiler
flags that should be used to execute examples. Return a tuple
`(f, t)`, where `t` is the number of examples tried, and `f`
is the number of examples that failed. The examples are run
in the namespace `test.globs`.
"""
# Keep track of the number of failures and tries.
failures = tries = 0
# Save the option flags (since option directives can be used
# to modify them).
original_optionflags = self.optionflags
SUCCESS, FAILURE, BOOM = range(3) # `outcome` state
check = self._checker.check_output
# Process each example.
for examplenum, example in enumerate(test.examples):
# If REPORT_ONLY_FIRST_FAILURE is set, then suppress
# reporting after the first failure.
quiet = (self.optionflags & REPORT_ONLY_FIRST_FAILURE and
failures > 0)
# Merge in the example's options.
self.optionflags = original_optionflags
if example.options:
for (optionflag, val) in example.options.items():
if val:
self.optionflags |= optionflag
else:
self.optionflags &= ~optionflag
# If 'SKIP' is set, then skip this example.
if self.optionflags & SKIP:
continue
# Record that we started this example.
tries += 1
if not quiet:
self.report_start(out, test, example)
# Use a special filename for compile(), so we can retrieve
# the source code during interactive debugging (see
# __patched_linecache_getlines).
filename = '<doctest %s[%d]>' % (test.name, examplenum)
# Run the example in the given context (globs), and record
# any exception that gets raised. (But don't intercept
# keyboard interrupts.)
try:
# Don't blink! This is where the user's code gets run.
exec(compile(example.source, filename, "single",
compileflags, 1), test.globs)
self.debugger.set_continue() # ==== Example Finished ====
exception = None
except KeyboardInterrupt:
raise
except:
exception = sys.exc_info()
self.debugger.set_continue() # ==== Example Finished ====
got = self._fakeout.getvalue() # the actual output
self._fakeout.truncate(0)
outcome = FAILURE # guilty until proved innocent or insane
# If the example executed without raising any exceptions,
# verify its output.
if exception is None:
if check(example.want, got, self.optionflags):
outcome = SUCCESS
# The example raised an exception: check if it was expected.
else:
exc_msg = traceback.format_exception_only(*exception[:2])[-1]
if not quiet:
got += _exception_traceback(exception)
# If `example.exc_msg` is None, then we weren't expecting
# an exception.
if example.exc_msg is None:
outcome = BOOM
# We expected an exception: see whether it matches.
elif check(example.exc_msg, exc_msg, self.optionflags):
outcome = SUCCESS
# Another chance if they didn't care about the detail.
elif self.optionflags & IGNORE_EXCEPTION_DETAIL:
if check(_strip_exception_details(example.exc_msg),
_strip_exception_details(exc_msg),
self.optionflags):
outcome = SUCCESS
# Report the outcome.
if outcome is SUCCESS:
if not quiet:
self.report_success(out, test, example, got)
elif outcome is FAILURE:
if not quiet:
self.report_failure(out, test, example, got)
failures += 1
elif outcome is BOOM:
if not quiet:
self.report_unexpected_exception(out, test, example,
exception)
failures += 1
else:
assert False, ("unknown outcome", outcome)
if failures and self.optionflags & FAIL_FAST:
break
# Restore the option flags (in case they were modified)
self.optionflags = original_optionflags
# Record and return the number of failures and tries.
self.__record_outcome(test, failures, tries)
return TestResults(failures, tries)
def __record_outcome(self, test, f, t):
"""
Record the fact that the given DocTest (`test`) generated `f`
failures out of `t` tried examples.
"""
f2, t2 = self._name2ft.get(test.name, (0,0))
self._name2ft[test.name] = (f+f2, t+t2)
self.failures += f
self.tries += t
__LINECACHE_FILENAME_RE = re.compile(r'<doctest '
r'(?P<name>.+)'
r'\[(?P<examplenum>\d+)\]>$')
def __patched_linecache_getlines(self, filename, module_globals=None):
m = self.__LINECACHE_FILENAME_RE.match(filename)
if m and m.group('name') == self.test.name:
example = self.test.examples[int(m.group('examplenum'))]
return example.source.splitlines(keepends=True)
else:
return self.save_linecache_getlines(filename, module_globals)
def run(self, test, compileflags=None, out=None, clear_globs=True):
"""
Run the examples in `test`, and display the results using the
writer function `out`.
The examples are run in the namespace `test.globs`. If
`clear_globs` is true (the default), then this namespace will
be cleared after the test runs, to help with garbage
collection. If you would like to examine the namespace after
the test completes, then use `clear_globs=False`.
`compileflags` gives the set of flags that should be used by
the Python compiler when running the examples. If not
specified, then it will default to the set of future-import
flags that apply to `globs`.
The output of each example is checked using
`DocTestRunner.check_output`, and the results are formatted by
the `DocTestRunner.report_*` methods.
"""
self.test = test
if compileflags is None:
compileflags = _extract_future_flags(test.globs)
save_stdout = sys.stdout
if out is None:
encoding = save_stdout.encoding
if encoding is None or encoding.lower() == 'utf-8':
out = save_stdout.write
else:
# Use backslashreplace error handling on write
def out(s):
s = str(s.encode(encoding, 'backslashreplace'), encoding)
save_stdout.write(s)
sys.stdout = self._fakeout
# Patch pdb.set_trace to restore sys.stdout during interactive
# debugging (so it's not still redirected to self._fakeout).
# Note that the interactive output will go to *our*
# save_stdout, even if that's not the real sys.stdout; this
# allows us to write test cases for the set_trace behavior.
save_trace = sys.gettrace()
save_set_trace = pdb.set_trace
self.debugger = _OutputRedirectingPdb(save_stdout)
self.debugger.reset()
pdb.set_trace = self.debugger.set_trace
# Patch linecache.getlines, so we can see the example's source
# when we're inside the debugger.
self.save_linecache_getlines = linecache.getlines
linecache.getlines = self.__patched_linecache_getlines
# Make sure sys.displayhook just prints the value to stdout
save_displayhook = sys.displayhook
sys.displayhook = sys.__displayhook__
try:
return self.__run(test, compileflags, out)
finally:
sys.stdout = save_stdout
pdb.set_trace = save_set_trace
sys.settrace(save_trace)
linecache.getlines = self.save_linecache_getlines
sys.displayhook = save_displayhook
if clear_globs:
test.globs.clear()
import builtins
builtins._ = None
#/////////////////////////////////////////////////////////////////
# Summarization
#/////////////////////////////////////////////////////////////////
def summarize(self, verbose=None):
"""
Print a summary of all the test cases that have been run by
this DocTestRunner, and return a tuple `(f, t)`, where `f` is
the total number of failed examples, and `t` is the total
number of tried examples.
The optional `verbose` argument controls how detailed the
summary is. If the verbosity is not specified, then the
DocTestRunner's verbosity is used.
"""
if verbose is None:
verbose = self._verbose
notests = []
passed = []
failed = []
totalt = totalf = 0
for x in self._name2ft.items():
name, (f, t) = x
assert f <= t
totalt += t
totalf += f
if t == 0:
notests.append(name)
elif f == 0:
passed.append( (name, t) )
else:
failed.append(x)
if verbose:
if notests:
print(len(notests), "items had no tests:")
notests.sort()
for thing in notests:
print(" ", thing)
if passed:
print(len(passed), "items passed all tests:")
passed.sort()
for thing, count in passed:
print(" %3d tests in %s" % (count, thing))
if failed:
print(self.DIVIDER)
print(len(failed), "items had failures:")
failed.sort()
for thing, (f, t) in failed:
print(" %3d of %3d in %s" % (f, t, thing))
if verbose:
print(totalt, "tests in", len(self._name2ft), "items.")
print(totalt - totalf, "passed and", totalf, "failed.")
if totalf:
print("***Test Failed***", totalf, "failures.")
elif verbose:
print("Test passed.")
return TestResults(totalf, totalt)
#/////////////////////////////////////////////////////////////////
# Backward compatibility cruft to maintain doctest.master.
#/////////////////////////////////////////////////////////////////
def merge(self, other):
d = self._name2ft
for name, (f, t) in other._name2ft.items():
if name in d:
# Don't print here by default, since doing
# so breaks some of the buildbots
#print("*** DocTestRunner.merge: '" + name + "' in both" \
# " testers; summing outcomes.")
f2, t2 = d[name]
f = f + f2
t = t + t2
d[name] = f, t
class OutputChecker:
"""
A class used to check the whether the actual output from a doctest
example matches the expected output. `OutputChecker` defines two
methods: `check_output`, which compares a given pair of outputs,
and returns true if they match; and `output_difference`, which
returns a string describing the differences between two outputs.
"""
def _toAscii(self, s):
"""
Convert string to hex-escaped ASCII string.
"""
return str(s.encode('ASCII', 'backslashreplace'), "ASCII")
def check_output(self, want, got, optionflags):
"""
Return True iff the actual output from an example (`got`)
matches the expected output (`want`). These strings are
always considered to match if they are identical; but
depending on what option flags the test runner is using,
several non-exact match types are also possible. See the
documentation for `TestRunner` for more information about
option flags.
"""
# If `want` contains hex-escaped character such as "\u1234",
# then `want` is a string of six characters(e.g. [\,u,1,2,3,4]).
# On the other hand, `got` could be another sequence of
# characters such as [\u1234], so `want` and `got` should
# be folded to hex-escaped ASCII string to compare.
got = self._toAscii(got)
want = self._toAscii(want)
# Handle the common case first, for efficiency:
# if they're string-identical, always return true.
if got == want:
return True
# The values True and False replaced 1 and 0 as the return
# value for boolean comparisons in Python 2.3.
if not (optionflags & DONT_ACCEPT_TRUE_FOR_1):
if (got,want) == ("True\n", "1\n"):
return True
if (got,want) == ("False\n", "0\n"):
return True
# <BLANKLINE> can be used as a special sequence to signify a
# blank line, unless the DONT_ACCEPT_BLANKLINE flag is used.
if not (optionflags & DONT_ACCEPT_BLANKLINE):
# Replace <BLANKLINE> in want with a blank line.
want = re.sub('(?m)^%s\s*?$' % re.escape(BLANKLINE_MARKER),
'', want)
# If a line in got contains only spaces, then remove the
# spaces.
got = re.sub('(?m)^\s*?$', '', got)
if got == want:
return True
# This flag causes doctest to ignore any differences in the
# contents of whitespace strings. Note that this can be used
# in conjunction with the ELLIPSIS flag.
if optionflags & NORMALIZE_WHITESPACE:
got = ' '.join(got.split())
want = ' '.join(want.split())
if got == want:
return True
# The ELLIPSIS flag says to let the sequence "..." in `want`
# match any substring in `got`.
if optionflags & ELLIPSIS:
if _ellipsis_match(want, got):
return True
# We didn't find any match; return false.
return False
# Should we do a fancy diff?
def _do_a_fancy_diff(self, want, got, optionflags):
# Not unless they asked for a fancy diff.
if not optionflags & (REPORT_UDIFF |
REPORT_CDIFF |
REPORT_NDIFF):
return False
# If expected output uses ellipsis, a meaningful fancy diff is
# too hard ... or maybe not. In two real-life failures Tim saw,
# a diff was a major help anyway, so this is commented out.
# [todo] _ellipsis_match() knows which pieces do and don't match,
# and could be the basis for a kick-ass diff in this case.
##if optionflags & ELLIPSIS and ELLIPSIS_MARKER in want:
## return False
# ndiff does intraline difference marking, so can be useful even
# for 1-line differences.
if optionflags & REPORT_NDIFF:
return True
# The other diff types need at least a few lines to be helpful.
return want.count('\n') > 2 and got.count('\n') > 2
def output_difference(self, example, got, optionflags):
"""
Return a string describing the differences between the
expected output for a given example (`example`) and the actual
output (`got`). `optionflags` is the set of option flags used
to compare `want` and `got`.
"""
want = example.want
# If <BLANKLINE>s are being used, then replace blank lines
# with <BLANKLINE> in the actual output string.
if not (optionflags & DONT_ACCEPT_BLANKLINE):
got = re.sub('(?m)^[ ]*(?=\n)', BLANKLINE_MARKER, got)
# Check if we should use diff.
if self._do_a_fancy_diff(want, got, optionflags):
# Split want & got into lines.
want_lines = want.splitlines(keepends=True)
got_lines = got.splitlines(keepends=True)
# Use difflib to find their differences.
if optionflags & REPORT_UDIFF:
diff = difflib.unified_diff(want_lines, got_lines, n=2)
diff = list(diff)[2:] # strip the diff header
kind = 'unified diff with -expected +actual'
elif optionflags & REPORT_CDIFF:
diff = difflib.context_diff(want_lines, got_lines, n=2)
diff = list(diff)[2:] # strip the diff header
kind = 'context diff with expected followed by actual'
elif optionflags & REPORT_NDIFF:
engine = difflib.Differ(charjunk=difflib.IS_CHARACTER_JUNK)
diff = list(engine.compare(want_lines, got_lines))
kind = 'ndiff with -expected +actual'
else:
assert 0, 'Bad diff option'
# Remove trailing whitespace on diff output.
diff = [line.rstrip() + '\n' for line in diff]
return 'Differences (%s):\n' % kind + _indent(''.join(diff))
# If we're not using diff, then simply list the expected
# output followed by the actual output.
if want and got:
return 'Expected:\n%sGot:\n%s' % (_indent(want), _indent(got))
elif want:
return 'Expected:\n%sGot nothing\n' % _indent(want)
elif got:
return 'Expected nothing\nGot:\n%s' % _indent(got)
else:
return 'Expected nothing\nGot nothing\n'
class DocTestFailure(Exception):
"""A DocTest example has failed in debugging mode.
The exception instance has variables:
- test: the DocTest object being run
- example: the Example object that failed
- got: the actual output
"""
def __init__(self, test, example, got):
self.test = test
self.example = example
self.got = got
def __str__(self):
return str(self.test)
class UnexpectedException(Exception):
"""A DocTest example has encountered an unexpected exception
The exception instance has variables:
- test: the DocTest object being run
- example: the Example object that failed
- exc_info: the exception info
"""
def __init__(self, test, example, exc_info):
self.test = test
self.example = example
self.exc_info = exc_info
def __str__(self):
return str(self.test)
class DebugRunner(DocTestRunner):
r"""Run doc tests but raise an exception as soon as there is a failure.
If an unexpected exception occurs, an UnexpectedException is raised.
It contains the test, the example, and the original exception:
>>> runner = DebugRunner(verbose=False)
>>> test = DocTestParser().get_doctest('>>> raise KeyError\n42',
... {}, 'foo', 'foo.py', 0)
>>> try:
... runner.run(test)
... except UnexpectedException as f:
... failure = f
>>> failure.test is test
True
>>> failure.example.want
'42\n'
>>> exc_info = failure.exc_info
>>> raise exc_info[1] # Already has the traceback
Traceback (most recent call last):
...
KeyError
We wrap the original exception to give the calling application
access to the test and example information.
If the output doesn't match, then a DocTestFailure is raised:
>>> test = DocTestParser().get_doctest('''
... >>> x = 1
... >>> x
... 2
... ''', {}, 'foo', 'foo.py', 0)
>>> try:
... runner.run(test)
... except DocTestFailure as f:
... failure = f
DocTestFailure objects provide access to the test:
>>> failure.test is test
True
As well as to the example:
>>> failure.example.want
'2\n'
and the actual output:
>>> failure.got
'1\n'
If a failure or error occurs, the globals are left intact:
>>> del test.globs['__builtins__']
>>> test.globs
{'x': 1}
>>> test = DocTestParser().get_doctest('''
... >>> x = 2
... >>> raise KeyError
... ''', {}, 'foo', 'foo.py', 0)
>>> runner.run(test)
Traceback (most recent call last):
...
doctest.UnexpectedException: <DocTest foo from foo.py:0 (2 examples)>
>>> del test.globs['__builtins__']
>>> test.globs
{'x': 2}
But the globals are cleared if there is no error:
>>> test = DocTestParser().get_doctest('''
... >>> x = 2
... ''', {}, 'foo', 'foo.py', 0)
>>> runner.run(test)
TestResults(failed=0, attempted=1)
>>> test.globs
{}
"""
def run(self, test, compileflags=None, out=None, clear_globs=True):
r = DocTestRunner.run(self, test, compileflags, out, False)
if clear_globs:
test.globs.clear()
return r
def report_unexpected_exception(self, out, test, example, exc_info):
raise UnexpectedException(test, example, exc_info)
def report_failure(self, out, test, example, got):
raise DocTestFailure(test, example, got)
######################################################################
## 6. Test Functions
######################################################################
# These should be backwards compatible.
# For backward compatibility, a global instance of a DocTestRunner
# class, updated by testmod.
master = None
def testmod(m=None, name=None, globs=None, verbose=None,
report=True, optionflags=0, extraglobs=None,
raise_on_error=False, exclude_empty=False):
"""m=None, name=None, globs=None, verbose=None, report=True,
optionflags=0, extraglobs=None, raise_on_error=False,
exclude_empty=False
Test examples in docstrings in functions and classes reachable
from module m (or the current module if m is not supplied), starting
with m.__doc__.
Also test examples reachable from dict m.__test__ if it exists and is
not None. m.__test__ maps names to functions, classes and strings;
function and class docstrings are tested even if the name is private;
strings are tested directly, as if they were docstrings.
Return (#failures, #tests).
See help(doctest) for an overview.
Optional keyword arg "name" gives the name of the module; by default
use m.__name__.
Optional keyword arg "globs" gives a dict to be used as the globals
when executing examples; by default, use m.__dict__. A copy of this
dict is actually used for each docstring, so that each docstring's
examples start with a clean slate.
Optional keyword arg "extraglobs" gives a dictionary that should be
merged into the globals that are used to execute examples. By
default, no extra globals are used. This is new in 2.4.
Optional keyword arg "verbose" prints lots of stuff if true, prints
only failures if false; by default, it's true iff "-v" is in sys.argv.
Optional keyword arg "report" prints a summary at the end when true,
else prints nothing at the end. In verbose mode, the summary is
detailed, else very brief (in fact, empty if all tests passed).
Optional keyword arg "optionflags" or's together module constants,
and defaults to 0. This is new in 2.3. Possible values (see the
docs for details):
DONT_ACCEPT_TRUE_FOR_1
DONT_ACCEPT_BLANKLINE
NORMALIZE_WHITESPACE
ELLIPSIS
SKIP
IGNORE_EXCEPTION_DETAIL
REPORT_UDIFF
REPORT_CDIFF
REPORT_NDIFF
REPORT_ONLY_FIRST_FAILURE
Optional keyword arg "raise_on_error" raises an exception on the
first unexpected exception or failure. This allows failures to be
post-mortem debugged.
Advanced tomfoolery: testmod runs methods of a local instance of
class doctest.Tester, then merges the results into (or creates)
global Tester instance doctest.master. Methods of doctest.master
can be called directly too, if you want to do something unusual.
Passing report=0 to testmod is especially useful then, to delay
displaying a summary. Invoke doctest.master.summarize(verbose)
when you're done fiddling.
"""
global master
# If no module was given, then use __main__.
if m is None:
# DWA - m will still be None if this wasn't invoked from the command
# line, in which case the following TypeError is about as good an error
# as we should expect
m = sys.modules.get('__main__')
# Check that we were actually given a module.
if not inspect.ismodule(m):
raise TypeError("testmod: module required; %r" % (m,))
# If no name was given, then use the module's name.
if name is None:
name = m.__name__
# Find, parse, and run all tests in the given module.
finder = DocTestFinder(exclude_empty=exclude_empty)
if raise_on_error:
runner = DebugRunner(verbose=verbose, optionflags=optionflags)
else:
runner = DocTestRunner(verbose=verbose, optionflags=optionflags)
for test in finder.find(m, name, globs=globs, extraglobs=extraglobs):
runner.run(test)
if report:
runner.summarize()
if master is None:
master = runner
else:
master.merge(runner)
return TestResults(runner.failures, runner.tries)
def testfile(filename, module_relative=True, name=None, package=None,
globs=None, verbose=None, report=True, optionflags=0,
extraglobs=None, raise_on_error=False, parser=DocTestParser(),
encoding=None):
"""
Test examples in the given file. Return (#failures, #tests).
Optional keyword arg "module_relative" specifies how filenames
should be interpreted:
- If "module_relative" is True (the default), then "filename"
specifies a module-relative path. By default, this path is
relative to the calling module's directory; but if the
"package" argument is specified, then it is relative to that
package. To ensure os-independence, "filename" should use
"/" characters to separate path segments, and should not
be an absolute path (i.e., it may not begin with "/").
- If "module_relative" is False, then "filename" specifies an
os-specific path. The path may be absolute or relative (to
the current working directory).
Optional keyword arg "name" gives the name of the test; by default
use the file's basename.
Optional keyword argument "package" is a Python package or the
name of a Python package whose directory should be used as the
base directory for a module relative filename. If no package is
specified, then the calling module's directory is used as the base
directory for module relative filenames. It is an error to
specify "package" if "module_relative" is False.
Optional keyword arg "globs" gives a dict to be used as the globals
when executing examples; by default, use {}. A copy of this dict
is actually used for each docstring, so that each docstring's
examples start with a clean slate.
Optional keyword arg "extraglobs" gives a dictionary that should be
merged into the globals that are used to execute examples. By
default, no extra globals are used.
Optional keyword arg "verbose" prints lots of stuff if true, prints
only failures if false; by default, it's true iff "-v" is in sys.argv.
Optional keyword arg "report" prints a summary at the end when true,
else prints nothing at the end. In verbose mode, the summary is
detailed, else very brief (in fact, empty if all tests passed).
Optional keyword arg "optionflags" or's together module constants,
and defaults to 0. Possible values (see the docs for details):
DONT_ACCEPT_TRUE_FOR_1
DONT_ACCEPT_BLANKLINE
NORMALIZE_WHITESPACE
ELLIPSIS
SKIP
IGNORE_EXCEPTION_DETAIL
REPORT_UDIFF
REPORT_CDIFF
REPORT_NDIFF
REPORT_ONLY_FIRST_FAILURE
Optional keyword arg "raise_on_error" raises an exception on the
first unexpected exception or failure. This allows failures to be
post-mortem debugged.
Optional keyword arg "parser" specifies a DocTestParser (or
subclass) that should be used to extract tests from the files.
Optional keyword arg "encoding" specifies an encoding that should
be used to convert the file to unicode.
Advanced tomfoolery: testmod runs methods of a local instance of
class doctest.Tester, then merges the results into (or creates)
global Tester instance doctest.master. Methods of doctest.master
can be called directly too, if you want to do something unusual.
Passing report=0 to testmod is especially useful then, to delay
displaying a summary. Invoke doctest.master.summarize(verbose)
when you're done fiddling.
"""
global master
if package and not module_relative:
raise ValueError("Package may only be specified for module-"
"relative paths.")
# Relativize the path
text, filename = _load_testfile(filename, package, module_relative,
encoding or "utf-8")
# If no name was given, then use the file's name.
if name is None:
name = os.path.basename(filename)
# Assemble the globals.
if globs is None:
globs = {}
else:
globs = globs.copy()
if extraglobs is not None:
globs.update(extraglobs)
if '__name__' not in globs:
globs['__name__'] = '__main__'
if raise_on_error:
runner = DebugRunner(verbose=verbose, optionflags=optionflags)
else:
runner = DocTestRunner(verbose=verbose, optionflags=optionflags)
# Read the file, convert it to a test, and run it.
test = parser.get_doctest(text, globs, name, filename, 0)
runner.run(test)
if report:
runner.summarize()
if master is None:
master = runner
else:
master.merge(runner)
return TestResults(runner.failures, runner.tries)
def run_docstring_examples(f, globs, verbose=False, name="NoName",
compileflags=None, optionflags=0):
"""
Test examples in the given object's docstring (`f`), using `globs`
as globals. Optional argument `name` is used in failure messages.
If the optional argument `verbose` is true, then generate output
even if there are no failures.
`compileflags` gives the set of flags that should be used by the
Python compiler when running the examples. If not specified, then
it will default to the set of future-import flags that apply to
`globs`.
Optional keyword arg `optionflags` specifies options for the
testing and output. See the documentation for `testmod` for more
information.
"""
# Find, parse, and run all tests in the given module.
finder = DocTestFinder(verbose=verbose, recurse=False)
runner = DocTestRunner(verbose=verbose, optionflags=optionflags)
for test in finder.find(f, name, globs=globs):
runner.run(test, compileflags=compileflags)
######################################################################
## 7. Unittest Support
######################################################################
_unittest_reportflags = 0
def set_unittest_reportflags(flags):
"""Sets the unittest option flags.
The old flag is returned so that a runner could restore the old
value if it wished to:
>>> import doctest
>>> old = doctest._unittest_reportflags
>>> doctest.set_unittest_reportflags(REPORT_NDIFF |
... REPORT_ONLY_FIRST_FAILURE) == old
True
>>> doctest._unittest_reportflags == (REPORT_NDIFF |
... REPORT_ONLY_FIRST_FAILURE)
True
Only reporting flags can be set:
>>> doctest.set_unittest_reportflags(ELLIPSIS)
Traceback (most recent call last):
...
ValueError: ('Only reporting flags allowed', 8)
>>> doctest.set_unittest_reportflags(old) == (REPORT_NDIFF |
... REPORT_ONLY_FIRST_FAILURE)
True
"""
global _unittest_reportflags
if (flags & REPORTING_FLAGS) != flags:
raise ValueError("Only reporting flags allowed", flags)
old = _unittest_reportflags
_unittest_reportflags = flags
return old
class DocTestCase(unittest.TestCase):
def __init__(self, test, optionflags=0, setUp=None, tearDown=None,
checker=None):
unittest.TestCase.__init__(self)
self._dt_optionflags = optionflags
self._dt_checker = checker
self._dt_test = test
self._dt_setUp = setUp
self._dt_tearDown = tearDown
def setUp(self):
test = self._dt_test
if self._dt_setUp is not None:
self._dt_setUp(test)
def tearDown(self):
test = self._dt_test
if self._dt_tearDown is not None:
self._dt_tearDown(test)
test.globs.clear()
def runTest(self):
test = self._dt_test
old = sys.stdout
new = StringIO()
optionflags = self._dt_optionflags
if not (optionflags & REPORTING_FLAGS):
# The option flags don't include any reporting flags,
# so add the default reporting flags
optionflags |= _unittest_reportflags
runner = DocTestRunner(optionflags=optionflags,
checker=self._dt_checker, verbose=False)
try:
runner.DIVIDER = "-"*70
failures, tries = runner.run(
test, out=new.write, clear_globs=False)
finally:
sys.stdout = old
if failures:
raise self.failureException(self.format_failure(new.getvalue()))
def format_failure(self, err):
test = self._dt_test
if test.lineno is None:
lineno = 'unknown line number'
else:
lineno = '%s' % test.lineno
lname = '.'.join(test.name.split('.')[-1:])
return ('Failed doctest test for %s\n'
' File "%s", line %s, in %s\n\n%s'
% (test.name, test.filename, lineno, lname, err)
)
def debug(self):
r"""Run the test case without results and without catching exceptions
The unit test framework includes a debug method on test cases
and test suites to support post-mortem debugging. The test code
is run in such a way that errors are not caught. This way a
caller can catch the errors and initiate post-mortem debugging.
The DocTestCase provides a debug method that raises
UnexpectedException errors if there is an unexpected
exception:
>>> test = DocTestParser().get_doctest('>>> raise KeyError\n42',
... {}, 'foo', 'foo.py', 0)
>>> case = DocTestCase(test)
>>> try:
... case.debug()
... except UnexpectedException as f:
... failure = f
The UnexpectedException contains the test, the example, and
the original exception:
>>> failure.test is test
True
>>> failure.example.want
'42\n'
>>> exc_info = failure.exc_info
>>> raise exc_info[1] # Already has the traceback
Traceback (most recent call last):
...
KeyError
If the output doesn't match, then a DocTestFailure is raised:
>>> test = DocTestParser().get_doctest('''
... >>> x = 1
... >>> x
... 2
... ''', {}, 'foo', 'foo.py', 0)
>>> case = DocTestCase(test)
>>> try:
... case.debug()
... except DocTestFailure as f:
... failure = f
DocTestFailure objects provide access to the test:
>>> failure.test is test
True
As well as to the example:
>>> failure.example.want
'2\n'
and the actual output:
>>> failure.got
'1\n'
"""
self.setUp()
runner = DebugRunner(optionflags=self._dt_optionflags,
checker=self._dt_checker, verbose=False)
runner.run(self._dt_test, clear_globs=False)
self.tearDown()
def id(self):
return self._dt_test.name
def __eq__(self, other):
if type(self) is not type(other):
return NotImplemented
return self._dt_test == other._dt_test and \
self._dt_optionflags == other._dt_optionflags and \
self._dt_setUp == other._dt_setUp and \
self._dt_tearDown == other._dt_tearDown and \
self._dt_checker == other._dt_checker
def __hash__(self):
return hash((self._dt_optionflags, self._dt_setUp, self._dt_tearDown,
self._dt_checker))
def __repr__(self):
name = self._dt_test.name.split('.')
return "%s (%s)" % (name[-1], '.'.join(name[:-1]))
__str__ = __repr__
def shortDescription(self):
return "Doctest: " + self._dt_test.name
class SkipDocTestCase(DocTestCase):
def __init__(self, module):
self.module = module
DocTestCase.__init__(self, None)
def setUp(self):
self.skipTest("DocTestSuite will not work with -O2 and above")
def test_skip(self):
pass
def shortDescription(self):
return "Skipping tests from %s" % self.module.__name__
__str__ = shortDescription
class _DocTestSuite(unittest.TestSuite):
def _removeTestAtIndex(self, index):
pass
def DocTestSuite(module=None, globs=None, extraglobs=None, test_finder=None,
**options):
"""
Convert doctest tests for a module to a unittest test suite.
This converts each documentation string in a module that
contains doctest tests to a unittest test case. If any of the
tests in a doc string fail, then the test case fails. An exception
is raised showing the name of the file containing the test and a
(sometimes approximate) line number.
The `module` argument provides the module to be tested. The argument
can be either a module or a module name.
If no argument is given, the calling module is used.
A number of options may be provided as keyword arguments:
setUp
A set-up function. This is called before running the
tests in each file. The setUp function will be passed a DocTest
object. The setUp function can access the test globals as the
globs attribute of the test passed.
tearDown
A tear-down function. This is called after running the
tests in each file. The tearDown function will be passed a DocTest
object. The tearDown function can access the test globals as the
globs attribute of the test passed.
globs
A dictionary containing initial global variables for the tests.
optionflags
A set of doctest option flags expressed as an integer.
"""
if test_finder is None:
test_finder = DocTestFinder()
module = _normalize_module(module)
tests = test_finder.find(module, globs=globs, extraglobs=extraglobs)
if not tests and sys.flags.optimize >=2:
# Skip doctests when running with -O2
suite = _DocTestSuite()
suite.addTest(SkipDocTestCase(module))
return suite
elif not tests:
# Why do we want to do this? Because it reveals a bug that might
# otherwise be hidden.
# It is probably a bug that this exception is not also raised if the
# number of doctest examples in tests is zero (i.e. if no doctest
# examples were found). However, we should probably not be raising
# an exception at all here, though it is too late to make this change
# for a maintenance release. See also issue #14649.
raise ValueError(module, "has no docstrings")
tests.sort()
suite = _DocTestSuite()
for test in tests:
if len(test.examples) == 0:
continue
if not test.filename:
filename = module.__file__
if filename[-4:] in (".pyc", ".pyo"):
filename = filename[:-1]
test.filename = filename
suite.addTest(DocTestCase(test, **options))
return suite
class DocFileCase(DocTestCase):
def id(self):
return '_'.join(self._dt_test.name.split('.'))
def __repr__(self):
return self._dt_test.filename
__str__ = __repr__
def format_failure(self, err):
return ('Failed doctest test for %s\n File "%s", line 0\n\n%s'
% (self._dt_test.name, self._dt_test.filename, err)
)
def DocFileTest(path, module_relative=True, package=None,
globs=None, parser=DocTestParser(),
encoding=None, **options):
if globs is None:
globs = {}
else:
globs = globs.copy()
if package and not module_relative:
raise ValueError("Package may only be specified for module-"
"relative paths.")
# Relativize the path.
doc, path = _load_testfile(path, package, module_relative,
encoding or "utf-8")
if "__file__" not in globs:
globs["__file__"] = path
# Find the file and read it.
name = os.path.basename(path)
# Convert it to a test, and wrap it in a DocFileCase.
test = parser.get_doctest(doc, globs, name, path, 0)
return DocFileCase(test, **options)
def DocFileSuite(*paths, **kw):
"""A unittest suite for one or more doctest files.
The path to each doctest file is given as a string; the
interpretation of that string depends on the keyword argument
"module_relative".
A number of options may be provided as keyword arguments:
module_relative
If "module_relative" is True, then the given file paths are
interpreted as os-independent module-relative paths. By
default, these paths are relative to the calling module's
directory; but if the "package" argument is specified, then
they are relative to that package. To ensure os-independence,
"filename" should use "/" characters to separate path
segments, and may not be an absolute path (i.e., it may not
begin with "/").
If "module_relative" is False, then the given file paths are
interpreted as os-specific paths. These paths may be absolute
or relative (to the current working directory).
package
A Python package or the name of a Python package whose directory
should be used as the base directory for module relative paths.
If "package" is not specified, then the calling module's
directory is used as the base directory for module relative
filenames. It is an error to specify "package" if
"module_relative" is False.
setUp
A set-up function. This is called before running the
tests in each file. The setUp function will be passed a DocTest
object. The setUp function can access the test globals as the
globs attribute of the test passed.
tearDown
A tear-down function. This is called after running the
tests in each file. The tearDown function will be passed a DocTest
object. The tearDown function can access the test globals as the
globs attribute of the test passed.
globs
A dictionary containing initial global variables for the tests.
optionflags
A set of doctest option flags expressed as an integer.
parser
A DocTestParser (or subclass) that should be used to extract
tests from the files.
encoding
An encoding that will be used to convert the files to unicode.
"""
suite = _DocTestSuite()
# We do this here so that _normalize_module is called at the right
# level. If it were called in DocFileTest, then this function
# would be the caller and we might guess the package incorrectly.
if kw.get('module_relative', True):
kw['package'] = _normalize_module(kw.get('package'))
for path in paths:
suite.addTest(DocFileTest(path, **kw))
return suite
######################################################################
## 8. Debugging Support
######################################################################
def script_from_examples(s):
r"""Extract script from text with examples.
Converts text with examples to a Python script. Example input is
converted to regular code. Example output and all other words
are converted to comments:
>>> text = '''
... Here are examples of simple math.
...
... Python has super accurate integer addition
...
... >>> 2 + 2
... 5
...
... And very friendly error messages:
...
... >>> 1/0
... To Infinity
... And
... Beyond
...
... You can use logic if you want:
...
... >>> if 0:
... ... blah
... ... blah
... ...
...
... Ho hum
... '''
>>> print(script_from_examples(text))
# Here are examples of simple math.
#
# Python has super accurate integer addition
#
2 + 2
# Expected:
## 5
#
# And very friendly error messages:
#
1/0
# Expected:
## To Infinity
## And
## Beyond
#
# You can use logic if you want:
#
if 0:
blah
blah
#
# Ho hum
<BLANKLINE>
"""
output = []
for piece in DocTestParser().parse(s):
if isinstance(piece, Example):
# Add the example's source code (strip trailing NL)
output.append(piece.source[:-1])
# Add the expected output:
want = piece.want
if want:
output.append('# Expected:')
output += ['## '+l for l in want.split('\n')[:-1]]
else:
# Add non-example text.
output += [_comment_line(l)
for l in piece.split('\n')[:-1]]
# Trim junk on both ends.
while output and output[-1] == '#':
output.pop()
while output and output[0] == '#':
output.pop(0)
# Combine the output, and return it.
# Add a courtesy newline to prevent exec from choking (see bug #1172785)
return '\n'.join(output) + '\n'
def testsource(module, name):
"""Extract the test sources from a doctest docstring as a script.
Provide the module (or dotted name of the module) containing the
test to be debugged and the name (within the module) of the object
with the doc string with tests to be debugged.
"""
module = _normalize_module(module)
tests = DocTestFinder().find(module)
test = [t for t in tests if t.name == name]
if not test:
raise ValueError(name, "not found in tests")
test = test[0]
testsrc = script_from_examples(test.docstring)
return testsrc
def debug_src(src, pm=False, globs=None):
"""Debug a single doctest docstring, in argument `src`'"""
testsrc = script_from_examples(src)
debug_script(testsrc, pm, globs)
def debug_script(src, pm=False, globs=None):
"Debug a test script. `src` is the script, as a string."
import pdb
if globs:
globs = globs.copy()
else:
globs = {}
if pm:
try:
exec(src, globs, globs)
except:
print(sys.exc_info()[1])
p = pdb.Pdb(nosigint=True)
p.reset()
p.interaction(None, sys.exc_info()[2])
else:
pdb.Pdb(nosigint=True).run("exec(%r)" % src, globs, globs)
def debug(module, name, pm=False):
"""Debug a single doctest docstring.
Provide the module (or dotted name of the module) containing the
test to be debugged and the name (within the module) of the object
with the docstring with tests to be debugged.
"""
module = _normalize_module(module)
testsrc = testsource(module, name)
debug_script(testsrc, pm, module.__dict__)
######################################################################
## 9. Example Usage
######################################################################
class _TestClass:
"""
A pointless class, for sanity-checking of docstring testing.
Methods:
square()
get()
>>> _TestClass(13).get() + _TestClass(-12).get()
1
>>> hex(_TestClass(13).square().get())
'0xa9'
"""
def __init__(self, val):
"""val -> _TestClass object with associated value val.
>>> t = _TestClass(123)
>>> print(t.get())
123
"""
self.val = val
def square(self):
"""square() -> square TestClass's associated value
>>> _TestClass(13).square().get()
169
"""
self.val = self.val ** 2
return self
def get(self):
"""get() -> return TestClass's associated value.
>>> x = _TestClass(-42)
>>> print(x.get())
-42
"""
return self.val
__test__ = {"_TestClass": _TestClass,
"string": r"""
Example of a string object, searched as-is.
>>> x = 1; y = 2
>>> x + y, x * y
(3, 2)
""",
"bool-int equivalence": r"""
In 2.2, boolean expressions displayed
0 or 1. By default, we still accept
them. This can be disabled by passing
DONT_ACCEPT_TRUE_FOR_1 to the new
optionflags argument.
>>> 4 == 4
1
>>> 4 == 4
True
>>> 4 > 4
0
>>> 4 > 4
False
""",
"blank lines": r"""
Blank lines can be marked with <BLANKLINE>:
>>> print('foo\n\nbar\n')
foo
<BLANKLINE>
bar
<BLANKLINE>
""",
"ellipsis": r"""
If the ellipsis flag is used, then '...' can be used to
elide substrings in the desired output:
>>> print(list(range(1000))) #doctest: +ELLIPSIS
[0, 1, 2, ..., 999]
""",
"whitespace normalization": r"""
If the whitespace normalization flag is used, then
differences in whitespace are ignored.
>>> print(list(range(30))) #doctest: +NORMALIZE_WHITESPACE
[0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14,
15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26,
27, 28, 29]
""",
}
def _test():
parser = argparse.ArgumentParser(description="doctest runner")
parser.add_argument('-v', '--verbose', action='store_true', default=False,
help='print very verbose output for all tests')
parser.add_argument('-o', '--option', action='append',
choices=OPTIONFLAGS_BY_NAME.keys(), default=[],
help=('specify a doctest option flag to apply'
' to the test run; may be specified more'
' than once to apply multiple options'))
parser.add_argument('-f', '--fail-fast', action='store_true',
help=('stop running tests after first failure (this'
' is a shorthand for -o FAIL_FAST, and is'
' in addition to any other -o options)'))
parser.add_argument('file', nargs='+',
help='file containing the tests to run')
args = parser.parse_args()
testfiles = args.file
# Verbose used to be handled by the "inspect argv" magic in DocTestRunner,
# but since we are using argparse we are passing it manually now.
verbose = args.verbose
options = 0
for option in args.option:
options |= OPTIONFLAGS_BY_NAME[option]
if args.fail_fast:
options |= FAIL_FAST
for filename in testfiles:
if filename.endswith(".py"):
# It is a module -- insert its dir into sys.path and try to
# import it. If it is part of a package, that possibly
# won't work because of package imports.
dirname, filename = os.path.split(filename)
sys.path.insert(0, dirname)
m = __import__(filename[:-3])
del sys.path[0]
failures, _ = testmod(m, verbose=verbose, optionflags=options)
else:
failures, _ = testfile(filename, module_relative=False,
verbose=verbose, optionflags=options)
if failures:
return 1
return 0
if __name__ == "__main__":
sys.exit(_test()) | unknown | codeparrot/codeparrot-clean | ||
// Copyright (c) HashiCorp, Inc.
// SPDX-License-Identifier: BUSL-1.1
package schema
import (
"fmt"
"log"
"strconv"
"strings"
"sync"
"github.com/hashicorp/terraform/internal/legacy/terraform"
"github.com/mitchellh/mapstructure"
)
// ConfigFieldReader reads fields out of an untyped map[string]string to the
// best of its ability. It also applies defaults from the Schema. (The other
// field readers do not need default handling because they source fully
// populated data structures.)
type ConfigFieldReader struct {
Config *terraform.ResourceConfig
Schema map[string]*Schema
indexMaps map[string]map[string]int
once sync.Once
}
func (r *ConfigFieldReader) ReadField(address []string) (FieldReadResult, error) {
r.once.Do(func() { r.indexMaps = make(map[string]map[string]int) })
return r.readField(address, false)
}
func (r *ConfigFieldReader) readField(
address []string, nested bool) (FieldReadResult, error) {
schemaList := addrToSchema(address, r.Schema)
if len(schemaList) == 0 {
return FieldReadResult{}, nil
}
if !nested {
// If we have a set anywhere in the address, then we need to
// read that set out in order and actually replace that part of
// the address with the real list index. i.e. set.50 might actually
// map to set.12 in the config, since it is in list order in the
// config, not indexed by set value.
for i, v := range schemaList {
// Sets are the only thing that cause this issue.
if v.Type != TypeSet {
continue
}
// If we're at the end of the list, then we don't have to worry
// about this because we're just requesting the whole set.
if i == len(schemaList)-1 {
continue
}
// If we're looking for the count, then ignore...
if address[i+1] == "#" {
continue
}
indexMap, ok := r.indexMaps[strings.Join(address[:i+1], ".")]
if !ok {
// Get the set so we can get the index map that tells us the
// mapping of the hash code to the list index
_, err := r.readSet(address[:i+1], v)
if err != nil {
return FieldReadResult{}, err
}
indexMap = r.indexMaps[strings.Join(address[:i+1], ".")]
}
index, ok := indexMap[address[i+1]]
if !ok {
return FieldReadResult{}, nil
}
address[i+1] = strconv.FormatInt(int64(index), 10)
}
}
k := strings.Join(address, ".")
schema := schemaList[len(schemaList)-1]
// If we're getting the single element of a promoted list, then
// check to see if we have a single element we need to promote.
if address[len(address)-1] == "0" && len(schemaList) > 1 {
lastSchema := schemaList[len(schemaList)-2]
if lastSchema.Type == TypeList && lastSchema.PromoteSingle {
k := strings.Join(address[:len(address)-1], ".")
result, err := r.readPrimitive(k, schema)
if err == nil {
return result, nil
}
}
}
if protoVersion5 {
switch schema.Type {
case TypeList, TypeSet, TypeMap, typeObject:
// Check if the value itself is unknown.
// The new protocol shims will add unknown values to this list of
// ComputedKeys. This is the only way we have to indicate that a
// collection is unknown in the config
for _, unknown := range r.Config.ComputedKeys {
if k == unknown {
log.Printf("[DEBUG] setting computed for %q from ComputedKeys", k)
return FieldReadResult{Computed: true, Exists: true}, nil
}
}
}
}
switch schema.Type {
case TypeBool, TypeFloat, TypeInt, TypeString:
return r.readPrimitive(k, schema)
case TypeList:
// If we support promotion then we first check if we have a lone
// value that we must promote.
// a value that is alone.
if schema.PromoteSingle {
result, err := r.readPrimitive(k, schema.Elem.(*Schema))
if err == nil && result.Exists {
result.Value = []interface{}{result.Value}
return result, nil
}
}
return readListField(&nestedConfigFieldReader{r}, address, schema)
case TypeMap:
return r.readMap(k, schema)
case TypeSet:
return r.readSet(address, schema)
case typeObject:
return readObjectField(
&nestedConfigFieldReader{r},
address, schema.Elem.(map[string]*Schema))
default:
panic(fmt.Sprintf("Unknown type: %s", schema.Type))
}
}
func (r *ConfigFieldReader) readMap(k string, schema *Schema) (FieldReadResult, error) {
// We want both the raw value and the interpolated. We use the interpolated
// to store actual values and we use the raw one to check for
// computed keys. Actual values are obtained in the switch, depending on
// the type of the raw value.
mraw, ok := r.Config.GetRaw(k)
if !ok {
// check if this is from an interpolated field by seeing if it exists
// in the config
_, ok := r.Config.Get(k)
if !ok {
// this really doesn't exist
return FieldReadResult{}, nil
}
// We couldn't fetch the value from a nested data structure, so treat the
// raw value as an interpolation string. The mraw value is only used
// for the type switch below.
mraw = "${INTERPOLATED}"
}
result := make(map[string]interface{})
computed := false
switch m := mraw.(type) {
case string:
// This is a map which has come out of an interpolated variable, so we
// can just get the value directly from config. Values cannot be computed
// currently.
v, _ := r.Config.Get(k)
// If this isn't a map[string]interface, it must be computed.
mapV, ok := v.(map[string]interface{})
if !ok {
return FieldReadResult{
Exists: true,
Computed: true,
}, nil
}
// Otherwise we can proceed as usual.
for i, iv := range mapV {
result[i] = iv
}
case []interface{}:
for i, innerRaw := range m {
for ik := range innerRaw.(map[string]interface{}) {
key := fmt.Sprintf("%s.%d.%s", k, i, ik)
if r.Config.IsComputed(key) {
computed = true
break
}
v, _ := r.Config.Get(key)
result[ik] = v
}
}
case []map[string]interface{}:
for i, innerRaw := range m {
for ik := range innerRaw {
key := fmt.Sprintf("%s.%d.%s", k, i, ik)
if r.Config.IsComputed(key) {
computed = true
break
}
v, _ := r.Config.Get(key)
result[ik] = v
}
}
case map[string]interface{}:
for ik := range m {
key := fmt.Sprintf("%s.%s", k, ik)
if r.Config.IsComputed(key) {
computed = true
break
}
v, _ := r.Config.Get(key)
result[ik] = v
}
case nil:
// the map may have been empty on the configuration, so we leave the
// empty result
default:
panic(fmt.Sprintf("unknown type: %#v", mraw))
}
err := mapValuesToPrimitive(k, result, schema)
if err != nil {
return FieldReadResult{}, nil
}
var value interface{}
if !computed {
value = result
}
return FieldReadResult{
Value: value,
Exists: true,
Computed: computed,
}, nil
}
func (r *ConfigFieldReader) readPrimitive(
k string, schema *Schema) (FieldReadResult, error) {
raw, ok := r.Config.Get(k)
if !ok {
// Nothing in config, but we might still have a default from the schema
var err error
raw, err = schema.DefaultValue()
if err != nil {
return FieldReadResult{}, fmt.Errorf("%s, error loading default: %s", k, err)
}
if raw == nil {
return FieldReadResult{}, nil
}
}
var result string
if err := mapstructure.WeakDecode(raw, &result); err != nil {
return FieldReadResult{}, err
}
computed := r.Config.IsComputed(k)
returnVal, err := stringToPrimitive(result, computed, schema)
if err != nil {
return FieldReadResult{}, err
}
return FieldReadResult{
Value: returnVal,
Exists: true,
Computed: computed,
}, nil
}
func (r *ConfigFieldReader) readSet(
address []string, schema *Schema) (FieldReadResult, error) {
indexMap := make(map[string]int)
// Create the set that will be our result
set := schema.ZeroValue().(*Set)
raw, err := readListField(&nestedConfigFieldReader{r}, address, schema)
if err != nil {
return FieldReadResult{}, err
}
if !raw.Exists {
return FieldReadResult{Value: set}, nil
}
// If the list is computed, the set is necessarilly computed
if raw.Computed {
return FieldReadResult{
Value: set,
Exists: true,
Computed: raw.Computed,
}, nil
}
// Build up the set from the list elements
for i, v := range raw.Value.([]interface{}) {
// Check if any of the keys in this item are computed
computed := r.hasComputedSubKeys(
fmt.Sprintf("%s.%d", strings.Join(address, "."), i), schema)
code := set.add(v, computed)
indexMap[code] = i
}
r.indexMaps[strings.Join(address, ".")] = indexMap
return FieldReadResult{
Value: set,
Exists: true,
}, nil
}
// hasComputedSubKeys walks through a schema and returns whether or not the
// given key contains any subkeys that are computed.
func (r *ConfigFieldReader) hasComputedSubKeys(key string, schema *Schema) bool {
prefix := key + "."
switch t := schema.Elem.(type) {
case *Resource:
for k, schema := range t.Schema {
if r.Config.IsComputed(prefix + k) {
return true
}
if r.hasComputedSubKeys(prefix+k, schema) {
return true
}
}
}
return false
}
// nestedConfigFieldReader is a funny little thing that just wraps a
// ConfigFieldReader to call readField when ReadField is called so that
// we don't recalculate the set rewrites in the address, which leads to
// an infinite loop.
type nestedConfigFieldReader struct {
Reader *ConfigFieldReader
}
func (r *nestedConfigFieldReader) ReadField(
address []string) (FieldReadResult, error) {
return r.Reader.readField(address, true)
} | go | github | https://github.com/hashicorp/terraform | internal/legacy/helper/schema/field_reader_config.go |
#-------------------------------------------------------------
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
#-------------------------------------------------------------
"""
Visualization -- Predicting Breast Cancer Proliferation Scores with
Apache SystemML
This module contains functions for visualizing data for the breast
cancer project.
"""
import matplotlib.pyplot as plt
def visualize_tile(tile):
"""
Plot a tissue tile.
Args:
tile: A 3D NumPy array of shape (tile_size, tile_size, channels).
Returns:
None
"""
plt.imshow(tile)
plt.show()
def visualize_sample(sample, size=256):
"""
Plot a tissue sample.
Args:
sample: A square sample flattened to a vector of size
(channels*size_x*size_y).
size: The width and height of the square samples.
Returns:
None
"""
# Change type, reshape, transpose to (size_x, size_y, channels).
length = sample.shape[0]
channels = int(length / (size * size))
if channels > 1:
sample = sample.astype('uint8').reshape((channels, size, size)).transpose(1,2,0)
plt.imshow(sample)
else:
vmax = 255 if sample.max() > 1 else 1
sample = sample.reshape((size, size))
plt.imshow(sample, cmap="gray", vmin=0, vmax=vmax)
plt.show() | unknown | codeparrot/codeparrot-clean | ||
#!/usr/bin/env python2.4
import sys, os
from cStringIO import StringIO
import re
from Plex import *
from Plex.Traditional import re as Re
class MyScanner(Scanner):
def __init__(self, info, name='<default>'):
Scanner.__init__(self, self.lexicon, info, name)
def begin(self, state_name):
# if self.state_name == '':
# print '<default>'
# else:
# print self.state_name
Scanner.begin(self, state_name)
def sep_seq(sequence, sep):
pat = Str(sequence[0])
for s in sequence[1:]:
pat += sep + Str(s)
return pat
def runScanner(data, scanner_class, lexicon=None):
info = StringIO(data)
outfo = StringIO()
if lexicon is not None:
scanner = scanner_class(lexicon, info)
else:
scanner = scanner_class(info)
while 1:
value, text = scanner.read()
if value is None:
break
elif value is IGNORE:
pass
else:
outfo.write(value)
return outfo.getvalue(), scanner
class LenSubsScanner(MyScanner):
"""Following clapack, we remove ftnlen arguments, which f2c puts after
a char * argument to hold the length of the passed string. This is just
a nuisance in C.
"""
def __init__(self, info, name='<ftnlen>'):
MyScanner.__init__(self, info, name)
self.paren_count = 0
def beginArgs(self, text):
if self.paren_count == 0:
self.begin('args')
self.paren_count += 1
return text
def endArgs(self, text):
self.paren_count -= 1
if self.paren_count == 0:
self.begin('')
return text
digits = Re('[0-9]+')
iofun = Re(r'\([^;]*;')
decl = Re(r'\([^)]*\)[,;'+'\n]')
any = Re('[.]*')
S = Re('[ \t\n]*')
cS = Str(',') + S
len_ = Re('[a-z][a-z0-9]*_len')
iofunctions = Str("s_cat", "s_copy", "s_stop", "s_cmp",
"i_len", "do_fio", "do_lio") + iofun
# Routines to not scrub the ftnlen argument from
keep_ftnlen = (Str('ilaenv_') | Str('s_rnge')) + Str('(')
lexicon = Lexicon([
(iofunctions, TEXT),
(keep_ftnlen, beginArgs),
State('args', [
(Str(')'), endArgs),
(Str('('), beginArgs),
(AnyChar, TEXT),
]),
(cS+Re(r'[1-9][0-9]*L'), IGNORE),
(cS+Str('ftnlen')+Opt(S+len_), IGNORE),
(cS+sep_seq(['(', 'ftnlen', ')'], S)+S+digits, IGNORE),
(Bol+Str('ftnlen ')+len_+Str(';\n'), IGNORE),
(cS+len_, TEXT),
(AnyChar, TEXT),
])
def scrubFtnlen(source):
return runScanner(source, LenSubsScanner)[0]
def cleanSource(source):
# remove whitespace at end of lines
source = re.sub(r'[\t ]+\n', '\n', source)
# remove comments like .. Scalar Arguments ..
source = re.sub(r'(?m)^[\t ]*/\* *\.\. .*?\n', '', source)
# collapse blanks of more than two in-a-row to two
source = re.sub(r'\n\n\n\n+', r'\n\n\n', source)
return source
class LineQueue(object):
def __init__(self):
object.__init__(self)
self._queue = []
def add(self, line):
self._queue.append(line)
def clear(self):
self._queue = []
def flushTo(self, other_queue):
for line in self._queue:
other_queue.add(line)
self.clear()
def getValue(self):
q = LineQueue()
self.flushTo(q)
s = ''.join(q._queue)
self.clear()
return s
class CommentQueue(LineQueue):
def __init__(self):
LineQueue.__init__(self)
def add(self, line):
if line.strip() == '':
LineQueue.add(self, '\n')
else:
line = ' ' + line[2:-3].rstrip() + '\n'
LineQueue.add(self, line)
def flushTo(self, other_queue):
if len(self._queue) == 0:
pass
elif len(self._queue) == 1:
other_queue.add('/*' + self._queue[0][2:].rstrip() + ' */\n')
else:
other_queue.add('/*\n')
LineQueue.flushTo(self, other_queue)
other_queue.add('*/\n')
self.clear()
# This really seems to be about 4x longer than it needs to be
def cleanComments(source):
lines = LineQueue()
comments = CommentQueue()
def isCommentLine(line):
return line.startswith('/*') and line.endswith('*/\n')
blanks = LineQueue()
def isBlank(line):
return line.strip() == ''
def SourceLines(line):
if isCommentLine(line):
comments.add(line)
return HaveCommentLines
else:
lines.add(line)
return SourceLines
def HaveCommentLines(line):
if isBlank(line):
blanks.add('\n')
return HaveBlankLines
elif isCommentLine(line):
comments.add(line)
return HaveCommentLines
else:
comments.flushTo(lines)
lines.add(line)
return SourceLines
def HaveBlankLines(line):
if isBlank(line):
blanks.add('\n')
return HaveBlankLines
elif isCommentLine(line):
blanks.flushTo(comments)
comments.add(line)
return HaveCommentLines
else:
comments.flushTo(lines)
blanks.flushTo(lines)
lines.add(line)
return SourceLines
state = SourceLines
for line in StringIO(source):
state = state(line)
comments.flushTo(lines)
return lines.getValue()
def removeHeader(source):
lines = LineQueue()
def LookingForHeader(line):
m = re.match(r'/\*[^\n]*-- translated', line)
if m:
return InHeader
else:
lines.add(line)
return LookingForHeader
def InHeader(line):
if line.startswith('*/'):
return OutOfHeader
else:
return InHeader
def OutOfHeader(line):
if line.startswith('#include "f2c.h"'):
pass
else:
lines.add(line)
return OutOfHeader
state = LookingForHeader
for line in StringIO(source):
state = state(line)
return lines.getValue()
def replaceDlamch(source):
"""Replace dlamch_ calls with appropiate macros"""
def repl(m):
s = m.group(1)
return dict(E='EPSILON', P='PRECISION', S='SAFEMINIMUM',
B='BASE')[s[0]]
source = re.sub(r'dlamch_\("(.*?)"\)', repl, source)
source = re.sub(r'^\s+extern.*? dlamch_.*?;$(?m)', '', source)
return source
# do it
def scrubSource(source, nsteps=None, verbose=False):
steps = [
('scrubbing ftnlen', scrubFtnlen),
('remove header', removeHeader),
('clean source', cleanSource),
('clean comments', cleanComments),
('replace dlamch_() calls', replaceDlamch),
]
if nsteps is not None:
steps = steps[:nsteps]
for msg, step in steps:
if verbose:
print msg
source = step(source)
return source
if __name__ == '__main__':
filename = sys.argv[1]
outfilename = os.path.join(sys.argv[2], os.path.basename(filename))
fo = open(filename, 'r')
source = fo.read()
fo.close()
if len(sys.argv) > 3:
nsteps = int(sys.argv[3])
else:
nsteps = None
source = scrub_source(source, nsteps, verbose=True)
writefo = open(outfilename, 'w')
writefo.write(source)
writefo.close() | unknown | codeparrot/codeparrot-clean | ||
# Modules User Model
A `module` is the primary unit of code
sharing in Swift. This document describes the experience of using
modules in Swift: what they are and what they provide for the user.
> *Warning:* This document was used in planning Swift 1.0; it has not been kept up to
date.
## High-Level Overview
### A module contains declarations
The primary purpose of a module is to provide declarations of types,
functions, and global variables that are present in a library[^library].
Importing[^import] the module gives
access to these declarations and allows them to be used in your code.
```swift
import Chess
import Foundation
```
You can also selectively import certain declarations from a module:
```swift
import func Chess.createGreedyPlayer
import class Foundation.NSRegularExpression
```
#### Comparison with Other Languages
Importing a module is much like importing a library in Ruby, Python, or
Perl, importing a class in Java, or including a header file in a
C-family language. However, unlike C, module files are not textually
included and must be valid programs on their own, and may not be in a
textual format at all. Unlike Java, declarations in a module are not
visible at all until imported. And unlike the dynamic languages
mentioned, importing a module cannot automatically cause any code to be
run.
### Imported declarations can be accessed with qualified or unqualified lookup
Once a module has been imported, its declarations are available for use
within the current source file. These declarations can be referred to by
name, or by qualifying[^qualified-name] them with the name of the module:
```swift
func playChess(_ blackPlayer : Chess.Player, whitePlayer : Chess.Player) {
var board = Board() // refers to Chess.Board
}
```
### Modules provide a unique context for declarations
A declaration in a module is unique; it is never the same as a
declaration with the same name in another module (with one caveat
described below). This means that two types `Chess.Board` and
`Xiangqi.Board` can exist in the same program, and each can be referred
to as `Board` as long as the other is not visible. If more than one
imported module declares the same name, the full qualified name[^qualified-name]
can be used for disambiguation.
> *Note:* This is accomplished by including the module name in the
mangled name[^mangled-name] of a declaration.
Therefore, it is an ABI-breaking change to change the name of a module
containing a public declaration.
> *Warning:* The one exception to this rule is declarations that must be compatible
with Objective-C. Such declarations follow the usual Objective-C rules
for name conflicts: all classes must have unique names, all protocols
must have unique names, and all constructors, methods, and properties
must have unique names within their class (including inherited methods
and properties).
### Modules may contain code
In addition to declarations, modules may contain implementations of the
functions they define. The compiler may choose to use this information
when optimizing a user's program, usually by inlining the module code
into a caller. In some cases[^1], the compiler may even use a module's
function implementations to produce more effective diagnostics.
Modules can also contain autolinking[^autolinking] information, which
the compiler passes on to the linker. This can be used to specify which
library implements the declarations in the module.
### Modules can "re-export" other modules
> *Warning:* This feature is likely to be modified in the future.
Like any other body of code, a module may depend on other modules in its
implementation. The module implementer may also choose to
re-export[^re-export] these modules, meaning that
anyone who imports the first module will also have access to the
declarations in the re-exported modules.
```swift
@exported import AmericanCheckers
```
As an example, the "Cocoa" framework[^framework] on macOS exists only
to re-export three other frameworks: AppKit, Foundation, and CoreData.
Just as certain declarations can be selectively imported from a module,
so too can they be selectively re-exported, using the same syntax:
```swift
@exported import class AmericanCheckers.Board
```
### Modules are uniquely identified by their name
Module names exist in a global namespace and must be unique. Like type
names, module names are conventionally capitalized.
> *TODO:* While this matches the general convention for Clang, there are
advantages to being able to rename a module for lookup purposes, even if
the ABI name stays the same. It would also be nice to avoid having
people stick prefixes on their module names the way they currently do
for Objective-C classes.
> *Note:* Because access into a module and access into a type look the same, it is
bad style to declare a type with the same name as a top-level module
used in your program:
```swift
// Example 1:
import Foundation
import struct BuildingConstruction.Foundation
var firstSupport = Foundation.SupportType() // from the struct or from the module?
// Example 2:
import Foundation
import BuildingConstruction
Foundation.SupportType() // from the class or from the module?
```
In both cases, the type takes priority over the module, but this should
still be avoided.
> *TODO:* Can we enforce this in the compiler? It seems like there's no way around
Example 2, and indeed Example 2 is probably doing the wrong thing.
## `import`
As shown above, a module is imported using the `import` keyword,
followed by the name of the module:
```swift
import AppKit
```
To import only a certain declaration from the module, you use the
appropriate declaration keyword:
```swift
import class AppKit.NSWindow
import func AppKit.NSApplicationMain
import var AppKit.NSAppKitVersionNumber
import typealias AppKit.NSApplicationPresentationOptions
```
- `import typealias` has slightly special behavior: it will match any
type other than a protocol, regardless of how the type is declared
in the imported module.
- `import class`, `struct`, and `enum` will succeed even if the name
given is a typealias for a type of the appropriate kind.
- `import func` will bring in all overloads of the named function.
- Using a keyword that doesn't match the named declaration is an
error.
> *TODO:* There is currently no way to selectively import extensions or operators.
### Multiple source files
Most programs are broken up into multiple source files, and these files
may depend on each other. To facilitate this design, declarations in
*all* source files in a module (including the "main module" for an
executable) are implicitly visible in each file's context. It is almost
as if all these files had been loaded with `import`, but with a few
important differences:
- The declarations in other files belong to the module being built,
just like those in the current file. Therefore, if you need to refer
to them by qualified name, you need to use the name of the module
being built.
- A module is a fully-contained entity: it may depend on other
modules, but those other modules can't depend on it. Source files
within a module may have mutual dependencies.
> FIXME: This wouldn't belong in the user model at all except for the implicit
visibility thing. Is there a better way to talk about this?
### Ambiguity
Because two different modules can declare the same name, it is sometimes
necessary to use a qualified name[^qualified-name] to
refer to a particular declaration:
```swift
import Chess
import Xiangqi
if userGame == "chess" {
Chess.playGame()
} else if userGame == "xiangqi" {
Xiangqi.playGame()
}
```
Here, both modules declare a function named `playGame` that takes no
arguments, so we have to disambiguate by "qualifying" the function name
with the appropriate module.
These are the rules for resolving name lookup ambiguities:
1. Declarations in the current source file are best.
2. Declarations from other files in the same module are better than
declarations from imports.
3. Declarations from selective imports are better than declarations
from non-selective imports. (This may be used to give priority to a
particular module for a given name.)
4. Every source file implicitly imports the core standard library as a
non-selective import.
5. If the name refers to a function, normal overload resolution may
resolve ambiguities.
### Submodules
> *Warning:* This feature was never implemented, or even fully designed.
For large projects, it is usually desirable to break a single
application or framework into subsystems, which Swift calls
"submodules". A submodule is a development-time construct used for
grouping within a module. By default, declarations within a submodule
are considered "submodule-private", which means they are only visible
within that submodule (rather than across the entire module). These
declarations will not conflict with declarations in other submodules
that may have the same name.
Declarations explicitly marked "whole-module" or "API" are still visible
across the entire module (even if declared within a submodule), and must
have a unique name within that space.
The qualified name of a declaration within a submodule consists of the
top-level module name, followed by the submodule name, followed by thez
declaration.
> *TODO:* We need to decide once and for all whether implicit visibility applies
across submodule boundaries, i.e. "can I access the public
Swift.AST.Module from Swift.Sema without an import, or do I have to say
`import Swift.AST`?"
Advantages of module-wide implicit visibility:
- Better name conflict checking. (The alternative is a linker error,
or worse *no* linker error if the names have different manglings.)
- Less work if things move around.
- Build time performance is consistent whether or not you use this
feature.
Advantages of submodule-only implicit visibility:
- Code completion will include names of public things you don't care
about.
- We haven't actually tested the build time performance of any large
Swift projects, so we don't know if we can actually handle targets
that contain hundreds of files.
- Could be considered desirable to force declaring your internal
dependencies explicitly.
- In this mode, we could allow two "whole-module" declarations to have
the same name, since they won't. (We could allow this in the other
mode too but then the qualified name would always be required.)
Both cases still use "submodule-only" as the default access control, so
this only affects the implicit visibility of whole-module and public
declarations.
### Import Search Paths
> *FIXME:* Write this section. Can source files be self-contained modules?
How does `-i` mode work? Can the "wrong" module be found when looking for a
dependency (i.e. can I substitute my own Foundation and expect AppKit to
work)? How are modules stored on disk? How do hierarchical module names
work?
## Interoperability with Objective-C via Clang
The compiler has the ability to interoperate with C and Objective-C by
importing Clang modules[^clang-module]. This feature of the Clang compiler
was developed to provide a "semantic import" extension to the C family of
languages. The Swift compiler uses this to expose declarations from C and
Objective-C as if they used native Swift types.
In all the examples above, `import AppKit` has been using this
mechanism: the module found with the name "AppKit" is generated from the
Objective-C AppKit framework.
### Clang Submodules
Clang also has a concept of "submodules", which are essentially
hierarchically-named modules. Unlike Swift's [submodules](#submodules),
Clang submodules are visible from outside the module. It is conventional
for a top-level Clang module to re-export all of its submodules, but
sometimes certain submodules are specified to require an explicit import:
```swift
import OpenGL.GL3
```
### Module Overlays
> *Warning:* This feature has mostly been removed from Swift; it's only
in use in the "overlay" libraries bundled with Swift itself.
If a source file in module A includes `import A`, this indicates that
the source file is providing a replacement or overlay for an external
module. In most cases, the source file will
`re-export`{.interpreted-text role="term"} the underlying module, but
add some convenience APIs to make the existing interface more
Swift-friendly.
This replacement syntax (using the current module name in an import)
cannot be used to overlay a Swift module, because
`module-naming`{.interpreted-text role="ref"}.
## Multiple source files, part 2
In migrating from Objective-C to Swift, it is expected that a single
program will contain a mix of sources. The compiler therefore allows
importing a single Objective-C header, exposing its declarations to the
main source file by constructing a sort of "ad hoc" module. These can
then be used like any other declarations imported from C or Objective-C.
> *Note:* This is describing the feature that eventually became "bridging headers"
for app targets.
### Accessing Swift declarations from Objective-C
> *Warning: This never actually happened; instead, we went with "generated headers"
output by the Swift compiler.
Using the new `@import` syntax, Objective-C translation units can import
Swift modules as well. Swift declarations will be mirrored into
Objective-C and can be called natively, just as Objective-C declarations
are mirrored into Swift for Clang modules[^clang-module]. In this
case, only the declarations compatible with Objective-C will be visible.
> TODO: We need to actually do this, but it requires working on a branch of
Clang, so we're pushing it back in the schedule as far as possible. The
workaround is to manually write header files for imported Swift classes.
> TODO: Importing Swift sources from within the same target is a goal, but there
are many difficulties. How do you name a file to be imported? What if
the file itself depends on another Objective-C header? What if there's a
mutual dependency across the language boundary? (That's a problem in
both directions, since both Clang modules and Swift modules are only
supposed to be exposed once they've been type-checked.)
[^1]: Specifically, code marked with the ``@_transparent`` attribute is
required to be "transparent" to the compiler: it *must* be inlined and
will affect diagnostics.
[^autolinking]: A technique where linking information is included in compiled object
files, so that external dependencies can be recorded without having
to explicitly specify them at link time.
[^clang-module]: A module whose contents are generated from a C-family header or set
of headers. See [Clang's Modules](http://clang.llvm.org/docs/Modules.html) documentation for more
information.
[^framework]: A mechanism for library distribution on OS X. Traditionally
frameworks contain header files describing the library's API, a binary file containing
the implementation, and a directory containing any resources the
library may need. Frameworks are also used on iOS, but as of iOS 7 custom frameworks
cannot be created by users.
[^import]: To locate and read a module, then make its declarations available in the current context.
[^library]: Abstractly, a collection of APIs for a programmer to use, usually
with a common theme. Concretely, the file containing the
implementation of these APIs.
[^mangled-name]: A unique, internal name for a type or value. The term is most
commonly used in C++; [see Wikipedia](https://en.wikipedia.org/wiki/Name_mangling#C.2B.2B)
for some examples. Swift's name mangling scheme is not the same as C++'s but serves a similar
purpose.
[^qualified-name]: A multi-piece name like `Foundation.NSWindow`, which names an entity
within a particular context. This document is concerned with the
case where the context is the name of an imported module.
[^re-export]: To directly expose the API of one module through another module.
Including the latter module in a source file will behave as if the
user had also included the former module. | unknown | github | https://github.com/apple/swift | docs/Modules.md |
# Copyright (c) 2001-2010 Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Tests for L{twisted.python.threadpool}
"""
import pickle, time, weakref, gc, threading
from twisted.trial import unittest, util
from twisted.python import threadpool, threadable, failure, context
from twisted.internet import reactor
from twisted.internet.defer import Deferred
#
# See the end of this module for the remainder of the imports.
#
class Synchronization(object):
failures = 0
def __init__(self, N, waiting):
self.N = N
self.waiting = waiting
self.lock = threading.Lock()
self.runs = []
def run(self):
# This is the testy part: this is supposed to be invoked
# serially from multiple threads. If that is actually the
# case, we will never fail to acquire this lock. If it is
# *not* the case, we might get here while someone else is
# holding the lock.
if self.lock.acquire(False):
if not len(self.runs) % 5:
time.sleep(0.0002) # Constant selected based on
# empirical data to maximize the
# chance of a quick failure if this
# code is broken.
self.lock.release()
else:
self.failures += 1
# This is just the only way I can think of to wake up the test
# method. It doesn't actually have anything to do with the
# test.
self.lock.acquire()
self.runs.append(None)
if len(self.runs) == self.N:
self.waiting.release()
self.lock.release()
synchronized = ["run"]
threadable.synchronize(Synchronization)
class ThreadPoolTestCase(unittest.TestCase):
"""
Test threadpools.
"""
def _waitForLock(self, lock):
for i in xrange(1000000):
if lock.acquire(False):
break
time.sleep(1e-5)
else:
self.fail("A long time passed without succeeding")
def test_attributes(self):
"""
L{ThreadPool.min} and L{ThreadPool.max} are set to the values passed to
L{ThreadPool.__init__}.
"""
pool = threadpool.ThreadPool(12, 22)
self.assertEqual(pool.min, 12)
self.assertEqual(pool.max, 22)
def test_start(self):
"""
L{ThreadPool.start} creates the minimum number of threads specified.
"""
pool = threadpool.ThreadPool(0, 5)
pool.start()
self.addCleanup(pool.stop)
self.assertEqual(len(pool.threads), 0)
pool = threadpool.ThreadPool(3, 10)
self.assertEqual(len(pool.threads), 0)
pool.start()
self.addCleanup(pool.stop)
self.assertEqual(len(pool.threads), 3)
def test_threadCreationArguments(self):
"""
Test that creating threads in the threadpool with application-level
objects as arguments doesn't results in those objects never being
freed, with the thread maintaining a reference to them as long as it
exists.
"""
tp = threadpool.ThreadPool(0, 1)
tp.start()
self.addCleanup(tp.stop)
# Sanity check - no threads should have been started yet.
self.assertEqual(tp.threads, [])
# Here's our function
def worker(arg):
pass
# weakref needs an object subclass
class Dumb(object):
pass
# And here's the unique object
unique = Dumb()
workerRef = weakref.ref(worker)
uniqueRef = weakref.ref(unique)
# Put some work in
tp.callInThread(worker, unique)
# Add an event to wait completion
event = threading.Event()
tp.callInThread(event.set)
event.wait(self.getTimeout())
del worker
del unique
gc.collect()
self.assertEquals(uniqueRef(), None)
self.assertEquals(workerRef(), None)
def test_threadCreationArgumentsCallInThreadWithCallback(self):
"""
As C{test_threadCreationArguments} above, but for
callInThreadWithCallback.
"""
tp = threadpool.ThreadPool(0, 1)
tp.start()
self.addCleanup(tp.stop)
# Sanity check - no threads should have been started yet.
self.assertEqual(tp.threads, [])
# this holds references obtained in onResult
refdict = {} # name -> ref value
onResultWait = threading.Event()
onResultDone = threading.Event()
resultRef = []
# result callback
def onResult(success, result):
onResultWait.wait(self.getTimeout())
refdict['workerRef'] = workerRef()
refdict['uniqueRef'] = uniqueRef()
onResultDone.set()
resultRef.append(weakref.ref(result))
# Here's our function
def worker(arg, test):
return Dumb()
# weakref needs an object subclass
class Dumb(object):
pass
# And here's the unique object
unique = Dumb()
onResultRef = weakref.ref(onResult)
workerRef = weakref.ref(worker)
uniqueRef = weakref.ref(unique)
# Put some work in
tp.callInThreadWithCallback(onResult, worker, unique, test=unique)
del worker
del unique
gc.collect()
# let onResult collect the refs
onResultWait.set()
# wait for onResult
onResultDone.wait(self.getTimeout())
self.assertEquals(uniqueRef(), None)
self.assertEquals(workerRef(), None)
# XXX There's a race right here - has onResult in the worker thread
# returned and the locals in _worker holding it and the result been
# deleted yet?
del onResult
gc.collect()
self.assertEqual(onResultRef(), None)
self.assertEqual(resultRef[0](), None)
def test_persistence(self):
"""
Threadpools can be pickled and unpickled, which should preserve the
number of threads and other parameters.
"""
pool = threadpool.ThreadPool(7, 20)
self.assertEquals(pool.min, 7)
self.assertEquals(pool.max, 20)
# check that unpickled threadpool has same number of threads
copy = pickle.loads(pickle.dumps(pool))
self.assertEquals(copy.min, 7)
self.assertEquals(copy.max, 20)
def _threadpoolTest(self, method):
"""
Test synchronization of calls made with C{method}, which should be
one of the mechanisms of the threadpool to execute work in threads.
"""
# This is a schizophrenic test: it seems to be trying to test
# both the callInThread()/dispatch() behavior of the ThreadPool as well
# as the serialization behavior of threadable.synchronize(). It
# would probably make more sense as two much simpler tests.
N = 10
tp = threadpool.ThreadPool()
tp.start()
self.addCleanup(tp.stop)
waiting = threading.Lock()
waiting.acquire()
actor = Synchronization(N, waiting)
for i in xrange(N):
method(tp, actor)
self._waitForLock(waiting)
self.failIf(actor.failures, "run() re-entered %d times" %
(actor.failures,))
def test_dispatch(self):
"""
Call C{_threadpoolTest} with C{dispatch}.
"""
return self._threadpoolTest(
lambda tp, actor: tp.dispatch(actor, actor.run))
test_dispatch.suppress = [util.suppress(
message="dispatch\(\) is deprecated since Twisted 8.0, "
"use callInThread\(\) instead",
category=DeprecationWarning)]
def test_callInThread(self):
"""
Call C{_threadpoolTest} with C{callInThread}.
"""
return self._threadpoolTest(
lambda tp, actor: tp.callInThread(actor.run))
def test_callInThreadException(self):
"""
L{ThreadPool.callInThread} logs exceptions raised by the callable it
is passed.
"""
class NewError(Exception):
pass
def raiseError():
raise NewError()
tp = threadpool.ThreadPool(0, 1)
tp.callInThread(raiseError)
tp.start()
tp.stop()
errors = self.flushLoggedErrors(NewError)
self.assertEqual(len(errors), 1)
def test_callInThreadWithCallback(self):
"""
L{ThreadPool.callInThreadWithCallback} calls C{onResult} with a
two-tuple of C{(True, result)} where C{result} is the value returned
by the callable supplied.
"""
waiter = threading.Lock()
waiter.acquire()
results = []
def onResult(success, result):
waiter.release()
results.append(success)
results.append(result)
tp = threadpool.ThreadPool(0, 1)
tp.callInThreadWithCallback(onResult, lambda : "test")
tp.start()
try:
self._waitForLock(waiter)
finally:
tp.stop()
self.assertTrue(results[0])
self.assertEqual(results[1], "test")
def test_callInThreadWithCallbackExceptionInCallback(self):
"""
L{ThreadPool.callInThreadWithCallback} calls C{onResult} with a
two-tuple of C{(False, failure)} where C{failure} represents the
exception raised by the callable supplied.
"""
class NewError(Exception):
pass
def raiseError():
raise NewError()
waiter = threading.Lock()
waiter.acquire()
results = []
def onResult(success, result):
waiter.release()
results.append(success)
results.append(result)
tp = threadpool.ThreadPool(0, 1)
tp.callInThreadWithCallback(onResult, raiseError)
tp.start()
try:
self._waitForLock(waiter)
finally:
tp.stop()
self.assertFalse(results[0])
self.assertTrue(isinstance(results[1], failure.Failure))
self.assertTrue(issubclass(results[1].type, NewError))
def test_callInThreadWithCallbackExceptionInOnResult(self):
"""
L{ThreadPool.callInThreadWithCallback} logs the exception raised by
C{onResult}.
"""
class NewError(Exception):
pass
waiter = threading.Lock()
waiter.acquire()
results = []
def onResult(success, result):
results.append(success)
results.append(result)
raise NewError()
tp = threadpool.ThreadPool(0, 1)
tp.callInThreadWithCallback(onResult, lambda : None)
tp.callInThread(waiter.release)
tp.start()
try:
self._waitForLock(waiter)
finally:
tp.stop()
errors = self.flushLoggedErrors(NewError)
self.assertEqual(len(errors), 1)
self.assertTrue(results[0])
self.assertEqual(results[1], None)
def test_callbackThread(self):
"""
L{ThreadPool.callInThreadWithCallback} calls the function it is
given and the C{onResult} callback in the same thread.
"""
threadIds = []
import thread
event = threading.Event()
def onResult(success, result):
threadIds.append(thread.get_ident())
event.set()
def func():
threadIds.append(thread.get_ident())
tp = threadpool.ThreadPool(0, 1)
tp.callInThreadWithCallback(onResult, func)
tp.start()
self.addCleanup(tp.stop)
event.wait(self.getTimeout())
self.assertEqual(len(threadIds), 2)
self.assertEqual(threadIds[0], threadIds[1])
def test_callbackContext(self):
"""
The context L{ThreadPool.callInThreadWithCallback} is invoked in is
shared by the context the callable and C{onResult} callback are
invoked in.
"""
myctx = context.theContextTracker.currentContext().contexts[-1]
myctx['testing'] = 'this must be present'
contexts = []
event = threading.Event()
def onResult(success, result):
ctx = context.theContextTracker.currentContext().contexts[-1]
contexts.append(ctx)
event.set()
def func():
ctx = context.theContextTracker.currentContext().contexts[-1]
contexts.append(ctx)
tp = threadpool.ThreadPool(0, 1)
tp.callInThreadWithCallback(onResult, func)
tp.start()
self.addCleanup(tp.stop)
event.wait(self.getTimeout())
self.assertEqual(len(contexts), 2)
self.assertEqual(myctx, contexts[0])
self.assertEqual(myctx, contexts[1])
def test_existingWork(self):
"""
Work added to the threadpool before its start should be executed once
the threadpool is started: this is ensured by trying to release a lock
previously acquired.
"""
waiter = threading.Lock()
waiter.acquire()
tp = threadpool.ThreadPool(0, 1)
tp.callInThread(waiter.release) # before start()
tp.start()
try:
self._waitForLock(waiter)
finally:
tp.stop()
def test_dispatchDeprecation(self):
"""
Test for the deprecation of the dispatch method.
"""
tp = threadpool.ThreadPool()
tp.start()
self.addCleanup(tp.stop)
def cb():
return tp.dispatch(None, lambda: None)
self.assertWarns(DeprecationWarning,
"dispatch() is deprecated since Twisted 8.0, "
"use callInThread() instead",
__file__, cb)
def test_dispatchWithCallbackDeprecation(self):
"""
Test for the deprecation of the dispatchWithCallback method.
"""
tp = threadpool.ThreadPool()
tp.start()
self.addCleanup(tp.stop)
def cb():
return tp.dispatchWithCallback(
None,
lambda x: None,
lambda x: None,
lambda: None)
self.assertWarns(DeprecationWarning,
"dispatchWithCallback() is deprecated since Twisted 8.0, "
"use twisted.internet.threads.deferToThread() instead.",
__file__, cb)
class RaceConditionTestCase(unittest.TestCase):
def setUp(self):
self.event = threading.Event()
self.threadpool = threadpool.ThreadPool(0, 10)
self.threadpool.start()
def tearDown(self):
del self.event
self.threadpool.stop()
del self.threadpool
def test_synchronization(self):
"""
Test a race condition: ensure that actions run in the pool synchronize
with actions run in the main thread.
"""
timeout = self.getTimeout()
self.threadpool.callInThread(self.event.set)
self.event.wait(timeout)
self.event.clear()
for i in range(3):
self.threadpool.callInThread(self.event.wait)
self.threadpool.callInThread(self.event.set)
self.event.wait(timeout)
if not self.event.isSet():
self.event.set()
self.fail("Actions not synchronized")
def test_singleThread(self):
"""
The submission of a new job to a thread pool in response to the
C{onResult} callback does not cause a new thread to be added to the
thread pool.
This requires that the thread which calls C{onResult} to have first
marked itself as available so that when the new job is queued, that
thread may be considered to run it. This is desirable so that when
only N jobs are ever being executed in the thread pool at once only
N threads will ever be created.
"""
# Ensure no threads running
self.assertEquals(self.threadpool.workers, 0)
loopDeferred = Deferred()
def onResult(success, counter):
reactor.callFromThread(submit, counter)
def submit(counter):
if counter:
self.threadpool.callInThreadWithCallback(
onResult, lambda: counter - 1)
else:
loopDeferred.callback(None)
def cbLoop(ignored):
# Ensure there is only one thread running.
self.assertEqual(self.threadpool.workers, 1)
loopDeferred.addCallback(cbLoop)
submit(10)
return loopDeferred
class ThreadSafeListDeprecationTestCase(unittest.TestCase):
"""
Test deprecation of threadpool.ThreadSafeList in twisted.python.threadpool
"""
def test_threadSafeList(self):
"""
Test deprecation of L{threadpool.ThreadSafeList}.
"""
threadpool.ThreadSafeList()
warningsShown = self.flushWarnings([self.test_threadSafeList])
self.assertEquals(len(warningsShown), 1)
self.assertIdentical(warningsShown[0]['category'], DeprecationWarning)
self.assertEquals(
warningsShown[0]['message'],
"twisted.python.threadpool.ThreadSafeList was deprecated in "
"Twisted 10.1.0: This was an internal implementation detail of "
"support for Jython 2.1, which is now obsolete.") | unknown | codeparrot/codeparrot-clean | ||
#ifndef __FAST_FLOAT_STRTOD_H__
#define __FAST_FLOAT_STRTOD_H__
#if defined(__cplusplus)
extern "C"
{
#endif
double fast_float_strtod(const char *in, char **out);
#if defined(__cplusplus)
}
#endif
#endif /* __FAST_FLOAT_STRTOD_H__ */ | c | github | https://github.com/redis/redis | deps/fast_float/fast_float_strtod.h |
# This file was created automatically by SWIG.
import __init___
#-------------- SHADOW WRAPPERS ------------------
def gluPickMatrix(x, y, width, height, viewport = None):
'gluPickMatrix(x, y, width, height, viewport = None) -> None'
return __gluPickMatrix(x, y, width, height, viewport)
def gluProject(objx, objy, objz, modelMatrix = None, projMatrix = None, viewport = None):
'gluProject(objx, objy, objz, modelMatrix = None, projMatrix = None, viewport = None) -> (winx, winy, winz)'
return __gluProject(objx, objy, objz, modelMatrix, projMatrix, viewport)
def gluUnProject(winx, winy, winz, modelMatrix = None, projMatrix = None, viewport = None):
'gluUnProject(winx, winy, winz, modelMatrix = None, projMatrix = None, viewport = None) -> (objx, objy, objz)'
return __gluUnProject(winx, winy, winz, modelMatrix, projMatrix, viewport)
def gluUnProject4(winx, winy, winz, clipW, modelMatrix = None, projMatrix = None, viewport = None, near = 0.0, far = 1.0):
'gluUnProject4(winx, winy, winz, clipW, modelMatrix = None, projMatrix = None, viewport = None, near = 0.0, far = 1.0) -> (objx, objy, objz, objw)'
return __gluUnProject4(winx, winy, winz, clipW, modelMatrix, projMatrix, viewport, near, far)
def __info():
import string
return [('GLU_VERSION', GLU_VERSION, 'su'),
('GLU_EXTENSIONS', GLU_EXTENSIONS, 'eu')]
GLUerror = __init___.GLUerror
__version__ = __init___.__version__
__date__ = __init___.__date__
__api_version__ = __init___.__api_version__
__author__ = __init___.__author__
__doc__ = __init___.__doc__
gluErrorString = __init___.gluErrorString
gluGetString = __init___.gluGetString
gluCheckExtension = __init___.gluCheckExtension
gluOrtho2D = __init___.gluOrtho2D
gluPerspective = __init___.gluPerspective
__gluPickMatrix = __init___.__gluPickMatrix
gluLookAt = __init___.gluLookAt
__gluProject = __init___.__gluProject
__gluUnProject = __init___.__gluUnProject
__gluUnProject4 = __init___.__gluUnProject4
gluScaleImage = __init___.gluScaleImage
gluScaleImageb = __init___.gluScaleImageb
gluScaleImageub = __init___.gluScaleImageub
gluScaleImages = __init___.gluScaleImages
gluScaleImageus = __init___.gluScaleImageus
gluScaleImagei = __init___.gluScaleImagei
gluScaleImageui = __init___.gluScaleImageui
gluScaleImagef = __init___.gluScaleImagef
gluBuild1DMipmaps = __init___.gluBuild1DMipmaps
gluBuild1DMipmapsb = __init___.gluBuild1DMipmapsb
gluBuild1DMipmapsub = __init___.gluBuild1DMipmapsub
gluBuild1DMipmapss = __init___.gluBuild1DMipmapss
gluBuild1DMipmapsus = __init___.gluBuild1DMipmapsus
gluBuild1DMipmapsi = __init___.gluBuild1DMipmapsi
gluBuild1DMipmapsui = __init___.gluBuild1DMipmapsui
gluBuild1DMipmapsf = __init___.gluBuild1DMipmapsf
gluBuild2DMipmaps = __init___.gluBuild2DMipmaps
gluBuild2DMipmapsb = __init___.gluBuild2DMipmapsb
gluBuild2DMipmapsub = __init___.gluBuild2DMipmapsub
gluBuild2DMipmapss = __init___.gluBuild2DMipmapss
gluBuild2DMipmapsus = __init___.gluBuild2DMipmapsus
gluBuild2DMipmapsi = __init___.gluBuild2DMipmapsi
gluBuild2DMipmapsui = __init___.gluBuild2DMipmapsui
gluBuild2DMipmapsf = __init___.gluBuild2DMipmapsf
gluBuild3DMipmaps = __init___.gluBuild3DMipmaps
gluBuild3DMipmapsb = __init___.gluBuild3DMipmapsb
gluBuild3DMipmapsub = __init___.gluBuild3DMipmapsub
gluBuild3DMipmapss = __init___.gluBuild3DMipmapss
gluBuild3DMipmapsus = __init___.gluBuild3DMipmapsus
gluBuild3DMipmapsi = __init___.gluBuild3DMipmapsi
gluBuild3DMipmapsui = __init___.gluBuild3DMipmapsui
gluBuild3DMipmapsf = __init___.gluBuild3DMipmapsf
gluBuild1DMipmapLevels = __init___.gluBuild1DMipmapLevels
gluBuild1DMipmapLevelsb = __init___.gluBuild1DMipmapLevelsb
gluBuild1DMipmapLevelsub = __init___.gluBuild1DMipmapLevelsub
gluBuild1DMipmapLevelss = __init___.gluBuild1DMipmapLevelss
gluBuild1DMipmapLevelsus = __init___.gluBuild1DMipmapLevelsus
gluBuild1DMipmapLevelsi = __init___.gluBuild1DMipmapLevelsi
gluBuild1DMipmapLevelsui = __init___.gluBuild1DMipmapLevelsui
gluBuild1DMipmapLevelsf = __init___.gluBuild1DMipmapLevelsf
gluBuild2DMipmapLevels = __init___.gluBuild2DMipmapLevels
gluBuild2DMipmapLevelsb = __init___.gluBuild2DMipmapLevelsb
gluBuild2DMipmapLevelsub = __init___.gluBuild2DMipmapLevelsub
gluBuild2DMipmapLevelss = __init___.gluBuild2DMipmapLevelss
gluBuild2DMipmapLevelsus = __init___.gluBuild2DMipmapLevelsus
gluBuild2DMipmapLevelsi = __init___.gluBuild2DMipmapLevelsi
gluBuild2DMipmapLevelsui = __init___.gluBuild2DMipmapLevelsui
gluBuild2DMipmapLevelsf = __init___.gluBuild2DMipmapLevelsf
gluBuild3DMipmapLevels = __init___.gluBuild3DMipmapLevels
gluBuild3DMipmapLevelsb = __init___.gluBuild3DMipmapLevelsb
gluBuild3DMipmapLevelsub = __init___.gluBuild3DMipmapLevelsub
gluBuild3DMipmapLevelss = __init___.gluBuild3DMipmapLevelss
gluBuild3DMipmapLevelsus = __init___.gluBuild3DMipmapLevelsus
gluBuild3DMipmapLevelsi = __init___.gluBuild3DMipmapLevelsi
gluBuild3DMipmapLevelsui = __init___.gluBuild3DMipmapLevelsui
gluBuild3DMipmapLevelsf = __init___.gluBuild3DMipmapLevelsf
gluNewQuadric = __init___.gluNewQuadric
gluDeleteQuadric = __init___.gluDeleteQuadric
gluQuadricNormals = __init___.gluQuadricNormals
gluQuadricTexture = __init___.gluQuadricTexture
gluQuadricOrientation = __init___.gluQuadricOrientation
gluQuadricDrawStyle = __init___.gluQuadricDrawStyle
gluCylinder = __init___.gluCylinder
gluDisk = __init___.gluDisk
gluPartialDisk = __init___.gluPartialDisk
gluSphere = __init___.gluSphere
gluQuadricCallback = __init___.gluQuadricCallback
gluNewTess = __init___.gluNewTess
gluDeleteTess = __init___.gluDeleteTess
gluTessBeginPolygon = __init___.gluTessBeginPolygon
gluBeginPolygon = __init___.gluBeginPolygon
gluTessBeginContour = __init___.gluTessBeginContour
gluTessVertex = __init___.gluTessVertex
gluTessEndContour = __init___.gluTessEndContour
gluNextContour = __init___.gluNextContour
gluTessEndPolygon = __init___.gluTessEndPolygon
gluEndPolygon = __init___.gluEndPolygon
gluTessProperty = __init___.gluTessProperty
gluTessNormal = __init___.gluTessNormal
gluTessCallback = __init___.gluTessCallback
gluGetTessProperty = __init___.gluGetTessProperty
gluNewNurbsRenderer = __init___.gluNewNurbsRenderer
gluDeleteNurbsRenderer = __init___.gluDeleteNurbsRenderer
gluBeginSurface = __init___.gluBeginSurface
gluBeginCurve = __init___.gluBeginCurve
gluEndCurve = __init___.gluEndCurve
gluEndSurface = __init___.gluEndSurface
gluBeginTrim = __init___.gluBeginTrim
gluEndTrim = __init___.gluEndTrim
gluPwlCurve = __init___.gluPwlCurve
gluNurbsCurve = __init___.gluNurbsCurve
gluNurbsSurface = __init___.gluNurbsSurface
gluLoadSamplingMatrices = __init___.gluLoadSamplingMatrices
gluNurbsProperty = __init___.gluNurbsProperty
gluGetNurbsProperty = __init___.gluGetNurbsProperty
gluNurbsCallback = __init___.gluNurbsCallback
gluNurbsCallbackData = __init___.gluNurbsCallbackData
__gluNurbsCallbackDataEXT = __init___.__gluNurbsCallbackDataEXT
__gluInitNurbsTessellatorEXT = __init___.__gluInitNurbsTessellatorEXT
GLU_VERSION_1_1 = __init___.GLU_VERSION_1_1
GLU_VERSION_1_2 = __init___.GLU_VERSION_1_2
GLU_VERSION_1_3 = __init___.GLU_VERSION_1_3
GLU_INVALID_ENUM = __init___.GLU_INVALID_ENUM
GLU_INVALID_VALUE = __init___.GLU_INVALID_VALUE
GLU_OUT_OF_MEMORY = __init___.GLU_OUT_OF_MEMORY
GLU_INCOMPATIBLE_GL_VERSION = __init___.GLU_INCOMPATIBLE_GL_VERSION
GLU_VERSION = __init___.GLU_VERSION
GLU_EXTENSIONS = __init___.GLU_EXTENSIONS
GLU_SMOOTH = __init___.GLU_SMOOTH
GLU_FLAT = __init___.GLU_FLAT
GLU_NONE = __init___.GLU_NONE
GLU_POINT = __init___.GLU_POINT
GLU_LINE = __init___.GLU_LINE
GLU_FILL = __init___.GLU_FILL
GLU_SILHOUETTE = __init___.GLU_SILHOUETTE
GLU_OUTSIDE = __init___.GLU_OUTSIDE
GLU_INSIDE = __init___.GLU_INSIDE
GLU_TESS_MAX_COORD = __init___.GLU_TESS_MAX_COORD
GLU_TESS_WINDING_RULE = __init___.GLU_TESS_WINDING_RULE
GLU_TESS_BOUNDARY_ONLY = __init___.GLU_TESS_BOUNDARY_ONLY
GLU_TESS_TOLERANCE = __init___.GLU_TESS_TOLERANCE
GLU_TESS_WINDING_ODD = __init___.GLU_TESS_WINDING_ODD
GLU_TESS_WINDING_NONZERO = __init___.GLU_TESS_WINDING_NONZERO
GLU_TESS_WINDING_POSITIVE = __init___.GLU_TESS_WINDING_POSITIVE
GLU_TESS_WINDING_NEGATIVE = __init___.GLU_TESS_WINDING_NEGATIVE
GLU_TESS_WINDING_ABS_GEQ_TWO = __init___.GLU_TESS_WINDING_ABS_GEQ_TWO
GLU_TESS_BEGIN = __init___.GLU_TESS_BEGIN
GLU_TESS_VERTEX = __init___.GLU_TESS_VERTEX
GLU_TESS_END = __init___.GLU_TESS_END
GLU_TESS_ERROR = __init___.GLU_TESS_ERROR
GLU_TESS_EDGE_FLAG = __init___.GLU_TESS_EDGE_FLAG
GLU_TESS_COMBINE = __init___.GLU_TESS_COMBINE
GLU_TESS_BEGIN_DATA = __init___.GLU_TESS_BEGIN_DATA
GLU_TESS_VERTEX_DATA = __init___.GLU_TESS_VERTEX_DATA
GLU_TESS_END_DATA = __init___.GLU_TESS_END_DATA
GLU_TESS_ERROR_DATA = __init___.GLU_TESS_ERROR_DATA
GLU_TESS_EDGE_FLAG_DATA = __init___.GLU_TESS_EDGE_FLAG_DATA
GLU_TESS_COMBINE_DATA = __init___.GLU_TESS_COMBINE_DATA
GLU_TESS_ERROR1 = __init___.GLU_TESS_ERROR1
GLU_TESS_ERROR2 = __init___.GLU_TESS_ERROR2
GLU_TESS_ERROR3 = __init___.GLU_TESS_ERROR3
GLU_TESS_ERROR4 = __init___.GLU_TESS_ERROR4
GLU_TESS_ERROR5 = __init___.GLU_TESS_ERROR5
GLU_TESS_ERROR6 = __init___.GLU_TESS_ERROR6
GLU_TESS_ERROR7 = __init___.GLU_TESS_ERROR7
GLU_TESS_ERROR8 = __init___.GLU_TESS_ERROR8
GLU_TESS_MISSING_BEGIN_POLYGON = __init___.GLU_TESS_MISSING_BEGIN_POLYGON
GLU_TESS_MISSING_BEGIN_CONTOUR = __init___.GLU_TESS_MISSING_BEGIN_CONTOUR
GLU_TESS_MISSING_END_POLYGON = __init___.GLU_TESS_MISSING_END_POLYGON
GLU_TESS_MISSING_END_CONTOUR = __init___.GLU_TESS_MISSING_END_CONTOUR
GLU_TESS_COORD_TOO_LARGE = __init___.GLU_TESS_COORD_TOO_LARGE
GLU_TESS_NEED_COMBINE_CALLBACK = __init___.GLU_TESS_NEED_COMBINE_CALLBACK
GLU_AUTO_LOAD_MATRIX = __init___.GLU_AUTO_LOAD_MATRIX
GLU_CULLING = __init___.GLU_CULLING
GLU_SAMPLING_TOLERANCE = __init___.GLU_SAMPLING_TOLERANCE
GLU_DISPLAY_MODE = __init___.GLU_DISPLAY_MODE
GLU_PARAMETRIC_TOLERANCE = __init___.GLU_PARAMETRIC_TOLERANCE
GLU_SAMPLING_METHOD = __init___.GLU_SAMPLING_METHOD
GLU_U_STEP = __init___.GLU_U_STEP
GLU_V_STEP = __init___.GLU_V_STEP
GLU_PATH_LENGTH = __init___.GLU_PATH_LENGTH
GLU_PARAMETRIC_ERROR = __init___.GLU_PARAMETRIC_ERROR
GLU_DOMAIN_DISTANCE = __init___.GLU_DOMAIN_DISTANCE
GLU_MAP1_TRIM_2 = __init___.GLU_MAP1_TRIM_2
GLU_MAP1_TRIM_3 = __init___.GLU_MAP1_TRIM_3
GLU_OUTLINE_POLYGON = __init___.GLU_OUTLINE_POLYGON
GLU_OUTLINE_PATCH = __init___.GLU_OUTLINE_PATCH
GLU_NURBS_ERROR1 = __init___.GLU_NURBS_ERROR1
GLU_NURBS_ERROR2 = __init___.GLU_NURBS_ERROR2
GLU_NURBS_ERROR3 = __init___.GLU_NURBS_ERROR3
GLU_NURBS_ERROR4 = __init___.GLU_NURBS_ERROR4
GLU_NURBS_ERROR5 = __init___.GLU_NURBS_ERROR5
GLU_NURBS_ERROR6 = __init___.GLU_NURBS_ERROR6
GLU_NURBS_ERROR7 = __init___.GLU_NURBS_ERROR7
GLU_NURBS_ERROR8 = __init___.GLU_NURBS_ERROR8
GLU_NURBS_ERROR9 = __init___.GLU_NURBS_ERROR9
GLU_NURBS_ERROR10 = __init___.GLU_NURBS_ERROR10
GLU_NURBS_ERROR11 = __init___.GLU_NURBS_ERROR11
GLU_NURBS_ERROR12 = __init___.GLU_NURBS_ERROR12
GLU_NURBS_ERROR13 = __init___.GLU_NURBS_ERROR13
GLU_NURBS_ERROR14 = __init___.GLU_NURBS_ERROR14
GLU_NURBS_ERROR15 = __init___.GLU_NURBS_ERROR15
GLU_NURBS_ERROR16 = __init___.GLU_NURBS_ERROR16
GLU_NURBS_ERROR17 = __init___.GLU_NURBS_ERROR17
GLU_NURBS_ERROR18 = __init___.GLU_NURBS_ERROR18
GLU_NURBS_ERROR19 = __init___.GLU_NURBS_ERROR19
GLU_NURBS_ERROR20 = __init___.GLU_NURBS_ERROR20
GLU_NURBS_ERROR21 = __init___.GLU_NURBS_ERROR21
GLU_NURBS_ERROR22 = __init___.GLU_NURBS_ERROR22
GLU_NURBS_ERROR23 = __init___.GLU_NURBS_ERROR23
GLU_NURBS_ERROR24 = __init___.GLU_NURBS_ERROR24
GLU_NURBS_ERROR25 = __init___.GLU_NURBS_ERROR25
GLU_NURBS_ERROR26 = __init___.GLU_NURBS_ERROR26
GLU_NURBS_ERROR27 = __init___.GLU_NURBS_ERROR27
GLU_NURBS_ERROR28 = __init___.GLU_NURBS_ERROR28
GLU_NURBS_ERROR29 = __init___.GLU_NURBS_ERROR29
GLU_NURBS_ERROR30 = __init___.GLU_NURBS_ERROR30
GLU_NURBS_ERROR31 = __init___.GLU_NURBS_ERROR31
GLU_NURBS_ERROR32 = __init___.GLU_NURBS_ERROR32
GLU_NURBS_ERROR33 = __init___.GLU_NURBS_ERROR33
GLU_NURBS_ERROR34 = __init___.GLU_NURBS_ERROR34
GLU_NURBS_ERROR35 = __init___.GLU_NURBS_ERROR35
GLU_NURBS_ERROR36 = __init___.GLU_NURBS_ERROR36
GLU_NURBS_ERROR37 = __init___.GLU_NURBS_ERROR37
GLU_CW = __init___.GLU_CW
GLU_CCW = __init___.GLU_CCW
GLU_INTERIOR = __init___.GLU_INTERIOR
GLU_EXTERIOR = __init___.GLU_EXTERIOR
GLU_UNKNOWN = __init___.GLU_UNKNOWN
GLU_BEGIN = __init___.GLU_BEGIN
GLU_VERTEX = __init___.GLU_VERTEX
GLU_END = __init___.GLU_END
GLU_ERROR = __init___.GLU_ERROR
GLU_EDGE_FLAG = __init___.GLU_EDGE_FLAG
GLU_NURBS_MODE = __init___.GLU_NURBS_MODE
GLU_NURBS_TESSELLATOR = __init___.GLU_NURBS_TESSELLATOR
GLU_NURBS_RENDERER = __init___.GLU_NURBS_RENDERER
GLU_NURBS_BEGIN = __init___.GLU_NURBS_BEGIN
GLU_NURBS_VERTEX = __init___.GLU_NURBS_VERTEX
GLU_NURBS_NORMAL = __init___.GLU_NURBS_NORMAL
GLU_NURBS_COLOR = __init___.GLU_NURBS_COLOR
GLU_NURBS_TEXTURE_COORD = __init___.GLU_NURBS_TEXTURE_COORD
GLU_NURBS_END = __init___.GLU_NURBS_END
GLU_NURBS_BEGIN_DATA = __init___.GLU_NURBS_BEGIN_DATA
GLU_NURBS_VERTEX_DATA = __init___.GLU_NURBS_VERTEX_DATA
GLU_NURBS_NORMAL_DATA = __init___.GLU_NURBS_NORMAL_DATA
GLU_NURBS_COLOR_DATA = __init___.GLU_NURBS_COLOR_DATA
GLU_NURBS_TEXTURE_COORD_DATA = __init___.GLU_NURBS_TEXTURE_COORD_DATA
GLU_NURBS_END_DATA = __init___.GLU_NURBS_END_DATA
GLU_OBJECT_PARAMETRIC_ERROR = __init___.GLU_OBJECT_PARAMETRIC_ERROR
GLU_OBJECT_PATH_LENGTH = __init___.GLU_OBJECT_PATH_LENGTH | unknown | codeparrot/codeparrot-clean | ||
from typing import TYPE_CHECKING, Any
from langchain_classic._api import create_importer
if TYPE_CHECKING:
from langchain_community.document_loaders import JoplinLoader
# Create a way to dynamically look up deprecated imports.
# Used to consolidate logic for raising deprecation warnings and
# handling optional imports.
DEPRECATED_LOOKUP = {"JoplinLoader": "langchain_community.document_loaders"}
_import_attribute = create_importer(__package__, deprecated_lookups=DEPRECATED_LOOKUP)
def __getattr__(name: str) -> Any:
"""Look up attributes dynamically."""
return _import_attribute(name)
__all__ = [
"JoplinLoader",
] | python | github | https://github.com/langchain-ai/langchain | libs/langchain/langchain_classic/document_loaders/joplin.py |
# this is based on jsarray.py
# todo check everything :)
from ..base import *
try:
import numpy
except:
pass
@Js
def ArrayBuffer():
a = arguments[0]
if isinstance(a, PyJsNumber):
length = a.to_uint32()
if length!=a.value:
raise MakeError('RangeError', 'Invalid array length')
temp = Js(bytearray([0]*length))
return temp
return Js(bytearray([0]))
ArrayBuffer.create = ArrayBuffer
ArrayBuffer.own['length']['value'] = Js(None)
ArrayBuffer.define_own_property('prototype', {'value': ArrayBufferPrototype,
'enumerable': False,
'writable': False,
'configurable': False})
ArrayBufferPrototype.define_own_property('constructor', {'value': ArrayBuffer,
'enumerable': False,
'writable': False,
'configurable': True}) | unknown | codeparrot/codeparrot-clean | ||
/*-------------------------------------------------------------------------
*
* resowner.c
* POSTGRES resource owner management code.
*
* Query-lifespan resources are tracked by associating them with
* ResourceOwner objects. This provides a simple mechanism for ensuring
* that such resources are freed at the right time.
* See utils/resowner/README for more info on how to use it.
*
* The implementation consists of a small fixed-size array and a hash table.
* New entries are inserted to the fixed-size array, and when the array
* fills up, all the entries are moved to the hash table. This way, the
* array always contains a few most recently remembered references. To find
* a particular reference, you need to search both the array and the hash
* table.
*
* The most frequent usage is that a resource is remembered, and forgotten
* shortly thereafter. For example, pin a buffer, read one tuple from it,
* release the pin. Linearly scanning the small array handles that case
* efficiently. However, some resources are held for a longer time, and
* sometimes a lot of resources need to be held simultaneously. The hash
* table handles those cases.
*
* When it's time to release the resources, we sort them according to the
* release-priority of each resource, and release them in that order.
*
* Local lock references are special, they are not stored in the array or
* the hash table. Instead, each resource owner has a separate small cache
* of locks it owns. The lock manager has the same information in its local
* lock hash table, and we fall back on that if the cache overflows, but
* traversing the hash table is slower when there are a lot of locks
* belonging to other resource owners. This is to speed up bulk releasing
* or reassigning locks from a resource owner to its parent.
*
*
* Portions Copyright (c) 1996-2026, PostgreSQL Global Development Group
* Portions Copyright (c) 1994, Regents of the University of California
*
*
* IDENTIFICATION
* src/backend/utils/resowner/resowner.c
*
*-------------------------------------------------------------------------
*/
#include "postgres.h"
#include "common/hashfn.h"
#include "common/int.h"
#include "lib/ilist.h"
#include "storage/aio.h"
#include "storage/ipc.h"
#include "storage/predicate.h"
#include "storage/proc.h"
#include "utils/memutils.h"
#include "utils/resowner.h"
/*
* ResourceElem represents a reference associated with a resource owner.
*
* All objects managed by this code are required to fit into a Datum,
* which is fine since they are generally pointers or integers.
*/
typedef struct ResourceElem
{
Datum item;
const ResourceOwnerDesc *kind; /* NULL indicates a free hash table slot */
} ResourceElem;
/*
* Size of the fixed-size array to hold most-recently remembered resources.
*/
#define RESOWNER_ARRAY_SIZE 32
/*
* Initially allocated size of a ResourceOwner's hash table. Must be power of
* two because we use (capacity - 1) as mask for hashing.
*/
#define RESOWNER_HASH_INIT_SIZE 64
/*
* How many items may be stored in a hash table of given capacity. When this
* number is reached, we must resize.
*
* The hash table must always have enough free space that we can copy the
* entries from the array to it, in ResourceOwnerSort. We also insist that
* the initial size is large enough that we don't hit the max size immediately
* when it's created. Aside from those limitations, 0.75 is a reasonable fill
* factor.
*/
#define RESOWNER_HASH_MAX_ITEMS(capacity) \
Min(capacity - RESOWNER_ARRAY_SIZE, (capacity)/4 * 3)
StaticAssertDecl(RESOWNER_HASH_MAX_ITEMS(RESOWNER_HASH_INIT_SIZE) >= RESOWNER_ARRAY_SIZE,
"initial hash size too small compared to array size");
/*
* MAX_RESOWNER_LOCKS is the size of the per-resource owner locks cache. It's
* chosen based on some testing with pg_dump with a large schema. When the
* tests were done (on 9.2), resource owners in a pg_dump run contained up
* to 9 locks, regardless of the schema size, except for the top resource
* owner which contained much more (overflowing the cache). 15 seems like a
* nice round number that's somewhat higher than what pg_dump needs. Note that
* making this number larger is not free - the bigger the cache, the slower
* it is to release locks (in retail), when a resource owner holds many locks.
*/
#define MAX_RESOWNER_LOCKS 15
/*
* ResourceOwner objects look like this
*/
struct ResourceOwnerData
{
ResourceOwner parent; /* NULL if no parent (toplevel owner) */
ResourceOwner firstchild; /* head of linked list of children */
ResourceOwner nextchild; /* next child of same parent */
const char *name; /* name (just for debugging) */
/*
* When ResourceOwnerRelease is called, we sort the 'hash' and 'arr' by
* the release priority. After that, no new resources can be remembered
* or forgotten in retail. We have separate flags because
* ResourceOwnerReleaseAllOfKind() temporarily sets 'releasing' without
* sorting the arrays.
*/
bool releasing;
bool sorted; /* are 'hash' and 'arr' sorted by priority? */
/*
* Number of items in the locks cache, array, and hash table respectively.
* (These are packed together to avoid padding in the struct.)
*/
uint8 nlocks; /* number of owned locks */
uint8 narr; /* how many items are stored in the array */
uint32 nhash; /* how many items are stored in the hash */
/*
* The fixed-size array for recent resources.
*
* If 'sorted' is set, the contents are sorted by release priority.
*/
ResourceElem arr[RESOWNER_ARRAY_SIZE];
/*
* The hash table. Uses open-addressing. 'nhash' is the number of items
* present; if it would exceed 'grow_at', we enlarge it and re-hash.
* 'grow_at' should be rather less than 'capacity' so that we don't waste
* too much time searching for empty slots.
*
* If 'sorted' is set, the contents are no longer hashed, but sorted by
* release priority. The first 'nhash' elements are occupied, the rest
* are empty.
*/
ResourceElem *hash;
uint32 capacity; /* allocated length of hash[] */
uint32 grow_at; /* grow hash when reach this */
/* The local locks cache. */
LOCALLOCK *locks[MAX_RESOWNER_LOCKS]; /* list of owned locks */
/*
* AIO handles need be registered in critical sections and therefore
* cannot use the normal ResourceElem mechanism.
*/
dlist_head aio_handles;
};
/*****************************************************************************
* GLOBAL MEMORY *
*****************************************************************************/
ResourceOwner CurrentResourceOwner = NULL;
ResourceOwner CurTransactionResourceOwner = NULL;
ResourceOwner TopTransactionResourceOwner = NULL;
ResourceOwner AuxProcessResourceOwner = NULL;
/* #define RESOWNER_STATS */
#ifdef RESOWNER_STATS
static int narray_lookups = 0;
static int nhash_lookups = 0;
#endif
/*
* List of add-on callbacks for resource releasing
*/
typedef struct ResourceReleaseCallbackItem
{
struct ResourceReleaseCallbackItem *next;
ResourceReleaseCallback callback;
void *arg;
} ResourceReleaseCallbackItem;
static ResourceReleaseCallbackItem *ResourceRelease_callbacks = NULL;
/* Internal routines */
static inline uint32 hash_resource_elem(Datum value, const ResourceOwnerDesc *kind);
static void ResourceOwnerAddToHash(ResourceOwner owner, Datum value,
const ResourceOwnerDesc *kind);
static int resource_priority_cmp(const void *a, const void *b);
static void ResourceOwnerSort(ResourceOwner owner);
static void ResourceOwnerReleaseAll(ResourceOwner owner,
ResourceReleasePhase phase,
bool printLeakWarnings);
static void ResourceOwnerReleaseInternal(ResourceOwner owner,
ResourceReleasePhase phase,
bool isCommit,
bool isTopLevel);
static void ReleaseAuxProcessResourcesCallback(int code, Datum arg);
/*****************************************************************************
* INTERNAL ROUTINES *
*****************************************************************************/
/*
* Hash function for value+kind combination.
*/
static inline uint32
hash_resource_elem(Datum value, const ResourceOwnerDesc *kind)
{
/*
* Most resource kinds store a pointer in 'value', and pointers are unique
* all on their own. But some resources store plain integers (Files and
* Buffers as of this writing), so we want to incorporate the 'kind' in
* the hash too, otherwise those resources will collide a lot. But
* because there are only a few resource kinds like that - and only a few
* resource kinds to begin with - we don't need to work too hard to mix
* 'kind' into the hash. Just add it with hash_combine(), it perturbs the
* result enough for our purposes.
*/
return hash_combine64(murmurhash64((uint64) value),
(uint64) (uintptr_t) kind);
}
/*
* Adds 'value' of given 'kind' to the ResourceOwner's hash table
*/
static void
ResourceOwnerAddToHash(ResourceOwner owner, Datum value, const ResourceOwnerDesc *kind)
{
uint32 mask = owner->capacity - 1;
uint32 idx;
Assert(kind != NULL);
/* Insert into first free slot at or after hash location. */
idx = hash_resource_elem(value, kind) & mask;
for (;;)
{
if (owner->hash[idx].kind == NULL)
break; /* found a free slot */
idx = (idx + 1) & mask;
}
owner->hash[idx].item = value;
owner->hash[idx].kind = kind;
owner->nhash++;
}
/*
* Comparison function to sort by release phase and priority
*/
static int
resource_priority_cmp(const void *a, const void *b)
{
const ResourceElem *ra = (const ResourceElem *) a;
const ResourceElem *rb = (const ResourceElem *) b;
/* Note: reverse order */
if (ra->kind->release_phase == rb->kind->release_phase)
return pg_cmp_u32(rb->kind->release_priority, ra->kind->release_priority);
else if (ra->kind->release_phase > rb->kind->release_phase)
return -1;
else
return 1;
}
/*
* Sort resources in reverse release priority.
*
* If the hash table is in use, all the elements from the fixed-size array are
* moved to the hash table, and then the hash table is sorted. If there is no
* hash table, then the fixed-size array is sorted directly. In either case,
* the result is one sorted array that contains all the resources.
*/
static void
ResourceOwnerSort(ResourceOwner owner)
{
ResourceElem *items;
uint32 nitems;
if (owner->nhash == 0)
{
items = owner->arr;
nitems = owner->narr;
}
else
{
/*
* Compact the hash table, so that all the elements are in the
* beginning of the 'hash' array, with no empty elements.
*/
uint32 dst = 0;
for (int idx = 0; idx < owner->capacity; idx++)
{
if (owner->hash[idx].kind != NULL)
{
if (dst != idx)
owner->hash[dst] = owner->hash[idx];
dst++;
}
}
/*
* Move all entries from the fixed-size array to 'hash'.
*
* RESOWNER_HASH_MAX_ITEMS is defined so that there is always enough
* free space to move all the elements from the fixed-size array to
* the hash.
*/
Assert(dst + owner->narr <= owner->capacity);
for (int idx = 0; idx < owner->narr; idx++)
{
owner->hash[dst] = owner->arr[idx];
dst++;
}
Assert(dst == owner->nhash + owner->narr);
owner->narr = 0;
owner->nhash = dst;
items = owner->hash;
nitems = owner->nhash;
}
qsort(items, nitems, sizeof(ResourceElem), resource_priority_cmp);
}
/*
* Call the ReleaseResource callback on entries with given 'phase'.
*/
static void
ResourceOwnerReleaseAll(ResourceOwner owner, ResourceReleasePhase phase,
bool printLeakWarnings)
{
ResourceElem *items;
uint32 nitems;
/*
* ResourceOwnerSort must've been called already. All the resources are
* either in the array or the hash.
*/
Assert(owner->releasing);
Assert(owner->sorted);
if (owner->nhash == 0)
{
items = owner->arr;
nitems = owner->narr;
}
else
{
Assert(owner->narr == 0);
items = owner->hash;
nitems = owner->nhash;
}
/*
* The resources are sorted in reverse priority order. Release them
* starting from the end, until we hit the end of the phase that we are
* releasing now. We will continue from there when called again for the
* next phase.
*/
while (nitems > 0)
{
uint32 idx = nitems - 1;
Datum value = items[idx].item;
const ResourceOwnerDesc *kind = items[idx].kind;
if (kind->release_phase > phase)
break;
Assert(kind->release_phase == phase);
if (printLeakWarnings)
{
char *res_str;
res_str = kind->DebugPrint ?
kind->DebugPrint(value)
: psprintf("%s %p", kind->name, DatumGetPointer(value));
elog(WARNING, "resource was not closed: %s", res_str);
pfree(res_str);
}
kind->ReleaseResource(value);
nitems--;
}
if (owner->nhash == 0)
owner->narr = nitems;
else
owner->nhash = nitems;
}
/*****************************************************************************
* EXPORTED ROUTINES *
*****************************************************************************/
/*
* ResourceOwnerCreate
* Create an empty ResourceOwner.
*
* All ResourceOwner objects are kept in TopMemoryContext, since they should
* only be freed explicitly.
*/
ResourceOwner
ResourceOwnerCreate(ResourceOwner parent, const char *name)
{
ResourceOwner owner;
owner = (ResourceOwner) MemoryContextAllocZero(TopMemoryContext,
sizeof(struct ResourceOwnerData));
owner->name = name;
if (parent)
{
owner->parent = parent;
owner->nextchild = parent->firstchild;
parent->firstchild = owner;
}
dlist_init(&owner->aio_handles);
return owner;
}
/*
* Make sure there is room for at least one more resource in an array.
*
* This is separate from actually inserting a resource because if we run out
* of memory, it's critical to do so *before* acquiring the resource.
*
* NB: Make sure there are no unrelated ResourceOwnerRemember() calls between
* your ResourceOwnerEnlarge() call and the ResourceOwnerRemember() call that
* you reserved the space for!
*/
void
ResourceOwnerEnlarge(ResourceOwner owner)
{
/*
* Mustn't try to remember more resources after we have already started
* releasing
*/
if (owner->releasing)
elog(ERROR, "ResourceOwnerEnlarge called after release started");
if (owner->narr < RESOWNER_ARRAY_SIZE)
return; /* no work needed */
/*
* Is there space in the hash? If not, enlarge it.
*/
if (owner->narr + owner->nhash >= owner->grow_at)
{
uint32 i,
oldcap,
newcap;
ResourceElem *oldhash;
ResourceElem *newhash;
oldhash = owner->hash;
oldcap = owner->capacity;
/* Double the capacity (it must stay a power of 2!) */
newcap = (oldcap > 0) ? oldcap * 2 : RESOWNER_HASH_INIT_SIZE;
newhash = (ResourceElem *) MemoryContextAllocZero(TopMemoryContext,
newcap * sizeof(ResourceElem));
/*
* We assume we can't fail below this point, so OK to scribble on the
* owner
*/
owner->hash = newhash;
owner->capacity = newcap;
owner->grow_at = RESOWNER_HASH_MAX_ITEMS(newcap);
owner->nhash = 0;
if (oldhash != NULL)
{
/*
* Transfer any pre-existing entries into the new hash table; they
* don't necessarily go where they were before, so this simple
* logic is the best way.
*/
for (i = 0; i < oldcap; i++)
{
if (oldhash[i].kind != NULL)
ResourceOwnerAddToHash(owner, oldhash[i].item, oldhash[i].kind);
}
/* And release old hash table. */
pfree(oldhash);
}
}
/* Move items from the array to the hash */
for (int i = 0; i < owner->narr; i++)
ResourceOwnerAddToHash(owner, owner->arr[i].item, owner->arr[i].kind);
owner->narr = 0;
Assert(owner->nhash <= owner->grow_at);
}
/*
* Remember that an object is owned by a ResourceOwner
*
* Caller must have previously done ResourceOwnerEnlarge()
*/
void
ResourceOwnerRemember(ResourceOwner owner, Datum value, const ResourceOwnerDesc *kind)
{
uint32 idx;
/* sanity check the ResourceOwnerDesc */
Assert(kind->release_phase != 0);
Assert(kind->release_priority != 0);
/*
* Mustn't try to remember more resources after we have already started
* releasing. We already checked this in ResourceOwnerEnlarge.
*/
Assert(!owner->releasing);
Assert(!owner->sorted);
if (owner->narr >= RESOWNER_ARRAY_SIZE)
{
/* forgot to call ResourceOwnerEnlarge? */
elog(ERROR, "ResourceOwnerRemember called but array was full");
}
/* Append to the array. */
idx = owner->narr;
owner->arr[idx].item = value;
owner->arr[idx].kind = kind;
owner->narr++;
}
/*
* Forget that an object is owned by a ResourceOwner
*
* Note: If same resource ID is associated with the ResourceOwner more than
* once, one instance is removed.
*
* Note: Forgetting a resource does not guarantee that there is room to
* remember a new resource. One exception is when you forget the most
* recently remembered resource; that does make room for a new remember call.
* Some code callers rely on that exception.
*/
void
ResourceOwnerForget(ResourceOwner owner, Datum value, const ResourceOwnerDesc *kind)
{
/*
* Mustn't call this after we have already started releasing resources.
* (Release callback functions are not allowed to release additional
* resources.)
*/
if (owner->releasing)
elog(ERROR, "ResourceOwnerForget called for %s after release started", kind->name);
Assert(!owner->sorted);
/* Search through all items in the array first. */
for (int i = owner->narr - 1; i >= 0; i--)
{
if (owner->arr[i].item == value &&
owner->arr[i].kind == kind)
{
owner->arr[i] = owner->arr[owner->narr - 1];
owner->narr--;
#ifdef RESOWNER_STATS
narray_lookups++;
#endif
return;
}
}
/* Search hash */
if (owner->nhash > 0)
{
uint32 mask = owner->capacity - 1;
uint32 idx;
idx = hash_resource_elem(value, kind) & mask;
for (uint32 i = 0; i < owner->capacity; i++)
{
if (owner->hash[idx].item == value &&
owner->hash[idx].kind == kind)
{
owner->hash[idx].item = (Datum) 0;
owner->hash[idx].kind = NULL;
owner->nhash--;
#ifdef RESOWNER_STATS
nhash_lookups++;
#endif
return;
}
idx = (idx + 1) & mask;
}
}
/*
* Use %p to print the reference, since most objects tracked by a resource
* owner are pointers. It's a bit misleading if it's not a pointer, but
* this is a programmer error, anyway.
*/
elog(ERROR, "%s %p is not owned by resource owner %s",
kind->name, DatumGetPointer(value), owner->name);
}
/*
* ResourceOwnerRelease
* Release all resources owned by a ResourceOwner and its descendants,
* but don't delete the owner objects themselves.
*
* Note that this executes just one phase of release, and so typically
* must be called three times. We do it this way because (a) we want to
* do all the recursion separately for each phase, thereby preserving
* the needed order of operations; and (b) xact.c may have other operations
* to do between the phases.
*
* phase: release phase to execute
* isCommit: true for successful completion of a query or transaction,
* false for unsuccessful
* isTopLevel: true if completing a main transaction, else false
*
* isCommit is passed because some modules may expect that their resources
* were all released already if the transaction or portal finished normally.
* If so it is reasonable to give a warning (NOT an error) should any
* unreleased resources be present. When isCommit is false, such warnings
* are generally inappropriate.
*
* isTopLevel is passed when we are releasing TopTransactionResourceOwner
* at completion of a main transaction. This generally means that *all*
* resources will be released, and so we can optimize things a bit.
*
* NOTE: After starting the release process, by calling this function, no new
* resources can be remembered in the resource owner. You also cannot call
* ResourceOwnerForget on any previously remembered resources to release
* resources "in retail" after that, you must let the bulk release take care
* of them.
*/
void
ResourceOwnerRelease(ResourceOwner owner,
ResourceReleasePhase phase,
bool isCommit,
bool isTopLevel)
{
/* There's not currently any setup needed before recursing */
ResourceOwnerReleaseInternal(owner, phase, isCommit, isTopLevel);
#ifdef RESOWNER_STATS
if (isTopLevel)
{
elog(LOG, "RESOWNER STATS: lookups: array %d, hash %d",
narray_lookups, nhash_lookups);
narray_lookups = 0;
nhash_lookups = 0;
}
#endif
}
static void
ResourceOwnerReleaseInternal(ResourceOwner owner,
ResourceReleasePhase phase,
bool isCommit,
bool isTopLevel)
{
ResourceOwner child;
ResourceOwner save;
ResourceReleaseCallbackItem *item;
ResourceReleaseCallbackItem *next;
/* Recurse to handle descendants */
for (child = owner->firstchild; child != NULL; child = child->nextchild)
ResourceOwnerReleaseInternal(child, phase, isCommit, isTopLevel);
/*
* To release the resources in the right order, sort them by phase and
* priority.
*
* The ReleaseResource callback functions are not allowed to remember or
* forget any other resources after this. Otherwise we lose track of where
* we are in processing the hash/array.
*/
if (!owner->releasing)
{
Assert(phase == RESOURCE_RELEASE_BEFORE_LOCKS);
Assert(!owner->sorted);
owner->releasing = true;
}
else
{
/*
* Phase is normally > RESOURCE_RELEASE_BEFORE_LOCKS, if this is not
* the first call to ResourceOwnerRelease. But if an error happens
* between the release phases, we might get called again for the same
* ResourceOwner from AbortTransaction.
*/
}
if (!owner->sorted)
{
ResourceOwnerSort(owner);
owner->sorted = true;
}
/*
* Make CurrentResourceOwner point to me, so that the release callback
* functions know which resource owner is been released.
*/
save = CurrentResourceOwner;
CurrentResourceOwner = owner;
if (phase == RESOURCE_RELEASE_BEFORE_LOCKS)
{
/*
* Release all resources that need to be released before the locks.
*
* During a commit, there shouldn't be any remaining resources ---
* that would indicate failure to clean up the executor correctly ---
* so issue warnings. In the abort case, just clean up quietly.
*/
ResourceOwnerReleaseAll(owner, phase, isCommit);
while (!dlist_is_empty(&owner->aio_handles))
{
dlist_node *node = dlist_head_node(&owner->aio_handles);
pgaio_io_release_resowner(node, !isCommit);
}
}
else if (phase == RESOURCE_RELEASE_LOCKS)
{
if (isTopLevel)
{
/*
* For a top-level xact we are going to release all locks (or at
* least all non-session locks), so just do a single lmgr call at
* the top of the recursion.
*/
if (owner == TopTransactionResourceOwner)
{
ProcReleaseLocks(isCommit);
ReleasePredicateLocks(isCommit, false);
}
}
else
{
/*
* Release locks retail. Note that if we are committing a
* subtransaction, we do NOT release its locks yet, but transfer
* them to the parent.
*/
LOCALLOCK **locks;
int nlocks;
Assert(owner->parent != NULL);
/*
* Pass the list of locks owned by this resource owner to the lock
* manager, unless it has overflowed.
*/
if (owner->nlocks > MAX_RESOWNER_LOCKS)
{
locks = NULL;
nlocks = 0;
}
else
{
locks = owner->locks;
nlocks = owner->nlocks;
}
if (isCommit)
LockReassignCurrentOwner(locks, nlocks);
else
LockReleaseCurrentOwner(locks, nlocks);
}
}
else if (phase == RESOURCE_RELEASE_AFTER_LOCKS)
{
/*
* Release all resources that need to be released after the locks.
*/
ResourceOwnerReleaseAll(owner, phase, isCommit);
}
/* Let add-on modules get a chance too */
for (item = ResourceRelease_callbacks; item; item = next)
{
/* allow callbacks to unregister themselves when called */
next = item->next;
item->callback(phase, isCommit, isTopLevel, item->arg);
}
CurrentResourceOwner = save;
}
/*
* ResourceOwnerReleaseAllOfKind
* Release all resources of a certain type held by this owner.
*/
void
ResourceOwnerReleaseAllOfKind(ResourceOwner owner, const ResourceOwnerDesc *kind)
{
/* Mustn't call this after we have already started releasing resources. */
if (owner->releasing)
elog(ERROR, "ResourceOwnerForget called for %s after release started", kind->name);
Assert(!owner->sorted);
/*
* Temporarily set 'releasing', to prevent calls to ResourceOwnerRemember
* while we're scanning the owner. Enlarging the hash would cause us to
* lose track of the point we're scanning.
*/
owner->releasing = true;
/* Array first */
for (int i = 0; i < owner->narr; i++)
{
if (owner->arr[i].kind == kind)
{
Datum value = owner->arr[i].item;
owner->arr[i] = owner->arr[owner->narr - 1];
owner->narr--;
i--;
kind->ReleaseResource(value);
}
}
/* Then hash */
for (int i = 0; i < owner->capacity; i++)
{
if (owner->hash[i].kind == kind)
{
Datum value = owner->hash[i].item;
owner->hash[i].item = (Datum) 0;
owner->hash[i].kind = NULL;
owner->nhash--;
kind->ReleaseResource(value);
}
}
owner->releasing = false;
}
/*
* ResourceOwnerDelete
* Delete an owner object and its descendants.
*
* The caller must have already released all resources in the object tree.
*/
void
ResourceOwnerDelete(ResourceOwner owner)
{
/* We had better not be deleting CurrentResourceOwner ... */
Assert(owner != CurrentResourceOwner);
/* And it better not own any resources, either */
Assert(owner->narr == 0);
Assert(owner->nhash == 0);
Assert(owner->nlocks == 0 || owner->nlocks == MAX_RESOWNER_LOCKS + 1);
/*
* Delete children. The recursive call will delink the child from me, so
* just iterate as long as there is a child.
*/
while (owner->firstchild != NULL)
ResourceOwnerDelete(owner->firstchild);
/*
* We delink the owner from its parent before deleting it, so that if
* there's an error we won't have deleted/busted owners still attached to
* the owner tree. Better a leak than a crash.
*/
ResourceOwnerNewParent(owner, NULL);
/* And free the object. */
if (owner->hash)
pfree(owner->hash);
pfree(owner);
}
/*
* Fetch parent of a ResourceOwner (returns NULL if top-level owner)
*/
ResourceOwner
ResourceOwnerGetParent(ResourceOwner owner)
{
return owner->parent;
}
/*
* Reassign a ResourceOwner to have a new parent
*/
void
ResourceOwnerNewParent(ResourceOwner owner,
ResourceOwner newparent)
{
ResourceOwner oldparent = owner->parent;
if (oldparent)
{
if (owner == oldparent->firstchild)
oldparent->firstchild = owner->nextchild;
else
{
ResourceOwner child;
for (child = oldparent->firstchild; child; child = child->nextchild)
{
if (owner == child->nextchild)
{
child->nextchild = owner->nextchild;
break;
}
}
}
}
if (newparent)
{
Assert(owner != newparent);
owner->parent = newparent;
owner->nextchild = newparent->firstchild;
newparent->firstchild = owner;
}
else
{
owner->parent = NULL;
owner->nextchild = NULL;
}
}
/*
* Register or deregister callback functions for resource cleanup
*
* These functions can be used by dynamically loaded modules. These used
* to be the only way for an extension to register custom resource types
* with a resource owner, but nowadays it is easier to define a new
* ResourceOwnerDesc with custom callbacks.
*/
void
RegisterResourceReleaseCallback(ResourceReleaseCallback callback, void *arg)
{
ResourceReleaseCallbackItem *item;
item = (ResourceReleaseCallbackItem *)
MemoryContextAlloc(TopMemoryContext,
sizeof(ResourceReleaseCallbackItem));
item->callback = callback;
item->arg = arg;
item->next = ResourceRelease_callbacks;
ResourceRelease_callbacks = item;
}
void
UnregisterResourceReleaseCallback(ResourceReleaseCallback callback, void *arg)
{
ResourceReleaseCallbackItem *item;
ResourceReleaseCallbackItem *prev;
prev = NULL;
for (item = ResourceRelease_callbacks; item; prev = item, item = item->next)
{
if (item->callback == callback && item->arg == arg)
{
if (prev)
prev->next = item->next;
else
ResourceRelease_callbacks = item->next;
pfree(item);
break;
}
}
}
/*
* Establish an AuxProcessResourceOwner for the current process.
*/
void
CreateAuxProcessResourceOwner(void)
{
Assert(AuxProcessResourceOwner == NULL);
Assert(CurrentResourceOwner == NULL);
AuxProcessResourceOwner = ResourceOwnerCreate(NULL, "AuxiliaryProcess");
CurrentResourceOwner = AuxProcessResourceOwner;
/*
* Register a shmem-exit callback for cleanup of aux-process resource
* owner. (This needs to run after, e.g., ShutdownXLOG.)
*/
on_shmem_exit(ReleaseAuxProcessResourcesCallback, 0);
}
/*
* Convenience routine to release all resources tracked in
* AuxProcessResourceOwner (but that resowner is not destroyed here).
* Warn about leaked resources if isCommit is true.
*/
void
ReleaseAuxProcessResources(bool isCommit)
{
/*
* At this writing, the only thing that could actually get released is
* buffer pins; but we may as well do the full release protocol.
*/
ResourceOwnerRelease(AuxProcessResourceOwner,
RESOURCE_RELEASE_BEFORE_LOCKS,
isCommit, true);
ResourceOwnerRelease(AuxProcessResourceOwner,
RESOURCE_RELEASE_LOCKS,
isCommit, true);
ResourceOwnerRelease(AuxProcessResourceOwner,
RESOURCE_RELEASE_AFTER_LOCKS,
isCommit, true);
/* allow it to be reused */
AuxProcessResourceOwner->releasing = false;
AuxProcessResourceOwner->sorted = false;
}
/*
* Shmem-exit callback for the same.
* Warn about leaked resources if process exit code is zero (ie normal).
*/
static void
ReleaseAuxProcessResourcesCallback(int code, Datum arg)
{
bool isCommit = (code == 0);
ReleaseAuxProcessResources(isCommit);
}
/*
* Remember that a Local Lock is owned by a ResourceOwner
*
* This is different from the generic ResourceOwnerRemember in that the list of
* locks is only a lossy cache. It can hold up to MAX_RESOWNER_LOCKS entries,
* and when it overflows, we stop tracking locks. The point of only remembering
* only up to MAX_RESOWNER_LOCKS entries is that if a lot of locks are held,
* ResourceOwnerForgetLock doesn't need to scan through a large array to find
* the entry.
*/
void
ResourceOwnerRememberLock(ResourceOwner owner, LOCALLOCK *locallock)
{
Assert(locallock != NULL);
if (owner->nlocks > MAX_RESOWNER_LOCKS)
return; /* we have already overflowed */
if (owner->nlocks < MAX_RESOWNER_LOCKS)
owner->locks[owner->nlocks] = locallock;
else
{
/* overflowed */
}
owner->nlocks++;
}
/*
* Forget that a Local Lock is owned by a ResourceOwner
*/
void
ResourceOwnerForgetLock(ResourceOwner owner, LOCALLOCK *locallock)
{
int i;
if (owner->nlocks > MAX_RESOWNER_LOCKS)
return; /* we have overflowed */
Assert(owner->nlocks > 0);
for (i = owner->nlocks - 1; i >= 0; i--)
{
if (locallock == owner->locks[i])
{
owner->locks[i] = owner->locks[owner->nlocks - 1];
owner->nlocks--;
return;
}
}
elog(ERROR, "lock reference %p is not owned by resource owner %s",
locallock, owner->name);
}
void
ResourceOwnerRememberAioHandle(ResourceOwner owner, struct dlist_node *ioh_node)
{
dlist_push_tail(&owner->aio_handles, ioh_node);
}
void
ResourceOwnerForgetAioHandle(ResourceOwner owner, struct dlist_node *ioh_node)
{
dlist_delete_from(&owner->aio_handles, ioh_node);
} | c | github | https://github.com/postgres/postgres | src/backend/utils/resowner/resowner.c |
# -*- coding: utf-8 -*-
#
# ===================================================================
# The contents of this file are dedicated to the public domain. To
# the extent that dedication to the public domain is not available,
# everyone is granted a worldwide, perpetual, royalty-free,
# non-exclusive license to exercise all rights associated with the
# contents of this file for any purpose whatsoever.
# No rights are reserved.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# ===================================================================
from Cryptodome.Util.py3compat import bord
from Cryptodome.Util._raw_api import (load_pycryptodome_raw_lib,
VoidPointer, SmartPointer,
create_string_buffer,
get_raw_buffer, c_size_t,
c_uint8_ptr)
from Cryptodome.Hash.keccak import _raw_keccak_lib
class SHA3_384_Hash(object):
"""A SHA3-384 hash object.
Do not instantiate directly.
Use the :func:`new` function.
:ivar oid: ASN.1 Object ID
:vartype oid: string
:ivar digest_size: the size in bytes of the resulting hash
:vartype digest_size: integer
"""
# The size of the resulting hash in bytes.
digest_size = 48
# ASN.1 Object ID
oid = "2.16.840.1.101.3.4.2.9"
def __init__(self, data, update_after_digest):
self._update_after_digest = update_after_digest
self._digest_done = False
state = VoidPointer()
result = _raw_keccak_lib.keccak_init(state.address_of(),
c_size_t(self.digest_size * 2),
0x06)
if result:
raise ValueError("Error %d while instantiating SHA-3/384"
% result)
self._state = SmartPointer(state.get(),
_raw_keccak_lib.keccak_destroy)
if data:
self.update(data)
def update(self, data):
"""Continue hashing of a message by consuming the next chunk of data.
Args:
data (byte string/byte array/memoryview): The next chunk of the message being hashed.
"""
if self._digest_done and not self._update_after_digest:
raise TypeError("You can only call 'digest' or 'hexdigest' on this object")
result = _raw_keccak_lib.keccak_absorb(self._state.get(),
c_uint8_ptr(data),
c_size_t(len(data)))
if result:
raise ValueError("Error %d while updating SHA-3/384"
% result)
return self
def digest(self):
"""Return the **binary** (non-printable) digest of the message that has been hashed so far.
:return: The hash digest, computed over the data processed so far.
Binary form.
:rtype: byte string
"""
self._digest_done = True
bfr = create_string_buffer(self.digest_size)
result = _raw_keccak_lib.keccak_digest(self._state.get(),
bfr,
c_size_t(self.digest_size))
if result:
raise ValueError("Error %d while instantiating SHA-3/384"
% result)
self._digest_value = get_raw_buffer(bfr)
return self._digest_value
def hexdigest(self):
"""Return the **printable** digest of the message that has been hashed so far.
:return: The hash digest, computed over the data processed so far.
Hexadecimal encoded.
:rtype: string
"""
return "".join(["%02x" % bord(x) for x in self.digest()])
def new(self):
"""Create a fresh SHA3-384 hash object."""
return type(self)(None, self._update_after_digest)
def new(*args, **kwargs):
"""Create a new hash object.
Args:
data (byte string/byte array/memoryview):
The very first chunk of the message to hash.
It is equivalent to an early call to :meth:`update`.
update_after_digest (boolean):
Whether :meth:`digest` can be followed by another :meth:`update`
(default: ``False``).
:Return: A :class:`SHA3_384_Hash` hash object
"""
data = kwargs.pop("data", None)
update_after_digest = kwargs.pop("update_after_digest", False)
if len(args) == 1:
if data:
raise ValueError("Initial data for hash specified twice")
data = args[0]
if kwargs:
raise TypeError("Unknown parameters: " + str(kwargs))
return SHA3_384_Hash(data, update_after_digest)
# The size of the resulting hash in bytes.
digest_size = SHA3_384_Hash.digest_size | unknown | codeparrot/codeparrot-clean | ||
# -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
import argparse
from pyspark.sql import SparkSession
def set_common_options(spark_source,
url='localhost:5432',
jdbc_table='default.default',
user='root',
password='root',
driver='driver'):
spark_source = spark_source \
.format('jdbc') \
.option('url', url) \
.option('dbtable', jdbc_table) \
.option('user', user) \
.option('password', password) \
.option('driver', driver)
return spark_source
def spark_write_to_jdbc(spark, url, user, password, metastore_table, jdbc_table, driver,
truncate, save_mode, batch_size, num_partitions,
create_table_column_types):
writer = spark \
.table(metastore_table) \
.write \
# first set common options
writer = set_common_options(writer, url, jdbc_table, user, password, driver)
# now set write-specific options
if truncate:
writer = writer.option('truncate', truncate)
if batch_size:
writer = writer.option('batchsize', batch_size)
if num_partitions:
writer = writer.option('numPartitions', num_partitions)
if create_table_column_types:
writer = writer.option("createTableColumnTypes", create_table_column_types)
writer \
.save(mode=save_mode)
def spark_read_from_jdbc(spark, url, user, password, metastore_table, jdbc_table, driver,
save_mode, save_format, fetch_size, num_partitions,
partition_column, lower_bound, upper_bound):
# first set common options
reader = set_common_options(spark.read, url, jdbc_table, user, password, driver)
# now set specific read options
if fetch_size:
reader = reader.option('fetchsize', fetch_size)
if num_partitions:
reader = reader.option('numPartitions', num_partitions)
if partition_column and lower_bound and upper_bound:
reader = reader \
.option('partitionColumn', partition_column) \
.option('lowerBound', lower_bound) \
.option('upperBound', upper_bound)
reader \
.load() \
.write \
.saveAsTable(metastore_table, format=save_format, mode=save_mode)
if __name__ == "__main__": # pragma: no cover
# parse the parameters
parser = argparse.ArgumentParser(description='Spark-JDBC')
parser.add_argument('-cmdType', dest='cmd_type', action='store')
parser.add_argument('-url', dest='url', action='store')
parser.add_argument('-user', dest='user', action='store')
parser.add_argument('-password', dest='password', action='store')
parser.add_argument('-metastoreTable', dest='metastore_table', action='store')
parser.add_argument('-jdbcTable', dest='jdbc_table', action='store')
parser.add_argument('-jdbcDriver', dest='jdbc_driver', action='store')
parser.add_argument('-jdbcTruncate', dest='truncate', action='store')
parser.add_argument('-saveMode', dest='save_mode', action='store')
parser.add_argument('-saveFormat', dest='save_format', action='store')
parser.add_argument('-batchsize', dest='batch_size', action='store')
parser.add_argument('-fetchsize', dest='fetch_size', action='store')
parser.add_argument('-name', dest='name', action='store')
parser.add_argument('-numPartitions', dest='num_partitions', action='store')
parser.add_argument('-partitionColumn', dest='partition_column', action='store')
parser.add_argument('-lowerBound', dest='lower_bound', action='store')
parser.add_argument('-upperBound', dest='upper_bound', action='store')
parser.add_argument('-createTableColumnTypes',
dest='create_table_column_types', action='store')
arguments = parser.parse_args()
# Disable dynamic allocation by default to allow num_executors to take effect.
spark = SparkSession.builder \
.appName(arguments.name) \
.enableHiveSupport() \
.getOrCreate()
if arguments.cmd_type == "spark_to_jdbc":
spark_write_to_jdbc(spark,
arguments.url,
arguments.user,
arguments.password,
arguments.metastore_table,
arguments.jdbc_table,
arguments.jdbc_driver,
arguments.truncate,
arguments.save_mode,
arguments.batch_size,
arguments.num_partitions,
arguments.create_table_column_types)
elif arguments.cmd_type == "jdbc_to_spark":
spark_read_from_jdbc(spark,
arguments.url,
arguments.user,
arguments.password,
arguments.metastore_table,
arguments.jdbc_table,
arguments.jdbc_driver,
arguments.save_mode,
arguments.save_format,
arguments.fetch_size,
arguments.num_partitions,
arguments.partition_column,
arguments.lower_bound,
arguments.upper_bound) | unknown | codeparrot/codeparrot-clean | ||
# Copyright (c) 2004 Python Software Foundation.
# All rights reserved.
# Written by Eric Price <eprice at tjhsst.edu>
# and Facundo Batista <facundo at taniquetil.com.ar>
# and Raymond Hettinger <python at rcn.com>
# and Aahz <aahz at pobox.com>
# and Tim Peters
# This module should be kept in sync with the latest updates of the
# IBM specification as it evolves. Those updates will be treated
# as bug fixes (deviation from the spec is a compatibility, usability
# bug) and will be backported. At this point the spec is stabilizing
# and the updates are becoming fewer, smaller, and less significant.
"""
This is an implementation of decimal floating point arithmetic based on
the General Decimal Arithmetic Specification:
http://speleotrove.com/decimal/decarith.html
and IEEE standard 854-1987:
http://en.wikipedia.org/wiki/IEEE_854-1987
Decimal floating point has finite precision with arbitrarily large bounds.
The purpose of this module is to support arithmetic using familiar
"schoolhouse" rules and to avoid some of the tricky representation
issues associated with binary floating point. The package is especially
useful for financial applications or for contexts where users have
expectations that are at odds with binary floating point (for instance,
in binary floating point, 1.00 % 0.1 gives 0.09999999999999995 instead
of 0.0; Decimal('1.00') % Decimal('0.1') returns the expected
Decimal('0.00')).
Here are some examples of using the decimal module:
>>> from decimal import *
>>> setcontext(ExtendedContext)
>>> Decimal(0)
Decimal('0')
>>> Decimal('1')
Decimal('1')
>>> Decimal('-.0123')
Decimal('-0.0123')
>>> Decimal(123456)
Decimal('123456')
>>> Decimal('123.45e12345678')
Decimal('1.2345E+12345680')
>>> Decimal('1.33') + Decimal('1.27')
Decimal('2.60')
>>> Decimal('12.34') + Decimal('3.87') - Decimal('18.41')
Decimal('-2.20')
>>> dig = Decimal(1)
>>> print(dig / Decimal(3))
0.333333333
>>> getcontext().prec = 18
>>> print(dig / Decimal(3))
0.333333333333333333
>>> print(dig.sqrt())
1
>>> print(Decimal(3).sqrt())
1.73205080756887729
>>> print(Decimal(3) ** 123)
4.85192780976896427E+58
>>> inf = Decimal(1) / Decimal(0)
>>> print(inf)
Infinity
>>> neginf = Decimal(-1) / Decimal(0)
>>> print(neginf)
-Infinity
>>> print(neginf + inf)
NaN
>>> print(neginf * inf)
-Infinity
>>> print(dig / 0)
Infinity
>>> getcontext().traps[DivisionByZero] = 1
>>> print(dig / 0)
Traceback (most recent call last):
...
...
...
decimal.DivisionByZero: x / 0
>>> c = Context()
>>> c.traps[InvalidOperation] = 0
>>> print(c.flags[InvalidOperation])
0
>>> c.divide(Decimal(0), Decimal(0))
Decimal('NaN')
>>> c.traps[InvalidOperation] = 1
>>> print(c.flags[InvalidOperation])
1
>>> c.flags[InvalidOperation] = 0
>>> print(c.flags[InvalidOperation])
0
>>> print(c.divide(Decimal(0), Decimal(0)))
Traceback (most recent call last):
...
...
...
decimal.InvalidOperation: 0 / 0
>>> print(c.flags[InvalidOperation])
1
>>> c.flags[InvalidOperation] = 0
>>> c.traps[InvalidOperation] = 0
>>> print(c.divide(Decimal(0), Decimal(0)))
NaN
>>> print(c.flags[InvalidOperation])
1
>>>
"""
__all__ = [
# Two major classes
'Decimal', 'Context',
# Contexts
'DefaultContext', 'BasicContext', 'ExtendedContext',
# Exceptions
'DecimalException', 'Clamped', 'InvalidOperation', 'DivisionByZero',
'Inexact', 'Rounded', 'Subnormal', 'Overflow', 'Underflow',
'FloatOperation',
# Constants for use in setting up contexts
'ROUND_DOWN', 'ROUND_HALF_UP', 'ROUND_HALF_EVEN', 'ROUND_CEILING',
'ROUND_FLOOR', 'ROUND_UP', 'ROUND_HALF_DOWN', 'ROUND_05UP',
# Functions for manipulating contexts
'setcontext', 'getcontext', 'localcontext',
# Limits for the C version for compatibility
'MAX_PREC', 'MAX_EMAX', 'MIN_EMIN', 'MIN_ETINY',
# C version: compile time choice that enables the thread local context
'HAVE_THREADS'
]
__version__ = '1.70' # Highest version of the spec this complies with
# See http://speleotrove.com/decimal/
import copy as _copy
import math as _math
import numbers as _numbers
import sys
try:
from collections import namedtuple as _namedtuple
DecimalTuple = _namedtuple('DecimalTuple', 'sign digits exponent')
except ImportError:
DecimalTuple = lambda *args: args
# Rounding
ROUND_DOWN = 'ROUND_DOWN'
ROUND_HALF_UP = 'ROUND_HALF_UP'
ROUND_HALF_EVEN = 'ROUND_HALF_EVEN'
ROUND_CEILING = 'ROUND_CEILING'
ROUND_FLOOR = 'ROUND_FLOOR'
ROUND_UP = 'ROUND_UP'
ROUND_HALF_DOWN = 'ROUND_HALF_DOWN'
ROUND_05UP = 'ROUND_05UP'
# Compatibility with the C version
HAVE_THREADS = True
if sys.maxsize == 2**63-1:
MAX_PREC = 999999999999999999
MAX_EMAX = 999999999999999999
MIN_EMIN = -999999999999999999
else:
MAX_PREC = 425000000
MAX_EMAX = 425000000
MIN_EMIN = -425000000
MIN_ETINY = MIN_EMIN - (MAX_PREC-1)
# Errors
class DecimalException(ArithmeticError):
"""Base exception class.
Used exceptions derive from this.
If an exception derives from another exception besides this (such as
Underflow (Inexact, Rounded, Subnormal) that indicates that it is only
called if the others are present. This isn't actually used for
anything, though.
handle -- Called when context._raise_error is called and the
trap_enabler is not set. First argument is self, second is the
context. More arguments can be given, those being after
the explanation in _raise_error (For example,
context._raise_error(NewError, '(-x)!', self._sign) would
call NewError().handle(context, self._sign).)
To define a new exception, it should be sufficient to have it derive
from DecimalException.
"""
def handle(self, context, *args):
pass
class Clamped(DecimalException):
"""Exponent of a 0 changed to fit bounds.
This occurs and signals clamped if the exponent of a result has been
altered in order to fit the constraints of a specific concrete
representation. This may occur when the exponent of a zero result would
be outside the bounds of a representation, or when a large normal
number would have an encoded exponent that cannot be represented. In
this latter case, the exponent is reduced to fit and the corresponding
number of zero digits are appended to the coefficient ("fold-down").
"""
#brython fixme
pass
class InvalidOperation(DecimalException):
"""An invalid operation was performed.
Various bad things cause this:
Something creates a signaling NaN
-INF + INF
0 * (+-)INF
(+-)INF / (+-)INF
x % 0
(+-)INF % x
x._rescale( non-integer )
sqrt(-x) , x > 0
0 ** 0
x ** (non-integer)
x ** (+-)INF
An operand is invalid
The result of the operation after these is a quiet positive NaN,
except when the cause is a signaling NaN, in which case the result is
also a quiet NaN, but with the original sign, and an optional
diagnostic information.
"""
def handle(self, context, *args):
if args:
ans = _dec_from_triple(args[0]._sign, args[0]._int, 'n', True)
return ans._fix_nan(context)
return _NaN
class ConversionSyntax(InvalidOperation):
"""Trying to convert badly formed string.
This occurs and signals invalid-operation if an string is being
converted to a number and it does not conform to the numeric string
syntax. The result is [0,qNaN].
"""
def handle(self, context, *args):
return _NaN
class DivisionByZero(DecimalException, ZeroDivisionError):
"""Division by 0.
This occurs and signals division-by-zero if division of a finite number
by zero was attempted (during a divide-integer or divide operation, or a
power operation with negative right-hand operand), and the dividend was
not zero.
The result of the operation is [sign,inf], where sign is the exclusive
or of the signs of the operands for divide, or is 1 for an odd power of
-0, for power.
"""
def handle(self, context, sign, *args):
return _SignedInfinity[sign]
class DivisionImpossible(InvalidOperation):
"""Cannot perform the division adequately.
This occurs and signals invalid-operation if the integer result of a
divide-integer or remainder operation had too many digits (would be
longer than precision). The result is [0,qNaN].
"""
def handle(self, context, *args):
return _NaN
class DivisionUndefined(InvalidOperation, ZeroDivisionError):
"""Undefined result of division.
This occurs and signals invalid-operation if division by zero was
attempted (during a divide-integer, divide, or remainder operation), and
the dividend is also zero. The result is [0,qNaN].
"""
def handle(self, context, *args):
return _NaN
class Inexact(DecimalException):
"""Had to round, losing information.
This occurs and signals inexact whenever the result of an operation is
not exact (that is, it needed to be rounded and any discarded digits
were non-zero), or if an overflow or underflow condition occurs. The
result in all cases is unchanged.
The inexact signal may be tested (or trapped) to determine if a given
operation (or sequence of operations) was inexact.
"""
#brython fix me
pass
class InvalidContext(InvalidOperation):
"""Invalid context. Unknown rounding, for example.
This occurs and signals invalid-operation if an invalid context was
detected during an operation. This can occur if contexts are not checked
on creation and either the precision exceeds the capability of the
underlying concrete representation or an unknown or unsupported rounding
was specified. These aspects of the context need only be checked when
the values are required to be used. The result is [0,qNaN].
"""
def handle(self, context, *args):
return _NaN
class Rounded(DecimalException):
"""Number got rounded (not necessarily changed during rounding).
This occurs and signals rounded whenever the result of an operation is
rounded (that is, some zero or non-zero digits were discarded from the
coefficient), or if an overflow or underflow condition occurs. The
result in all cases is unchanged.
The rounded signal may be tested (or trapped) to determine if a given
operation (or sequence of operations) caused a loss of precision.
"""
#brython fix me
pass
class Subnormal(DecimalException):
"""Exponent < Emin before rounding.
This occurs and signals subnormal whenever the result of a conversion or
operation is subnormal (that is, its adjusted exponent is less than
Emin, before any rounding). The result in all cases is unchanged.
The subnormal signal may be tested (or trapped) to determine if a given
or operation (or sequence of operations) yielded a subnormal result.
"""
#brython fix me
pass
class Overflow(Inexact, Rounded):
"""Numerical overflow.
This occurs and signals overflow if the adjusted exponent of a result
(from a conversion or from an operation that is not an attempt to divide
by zero), after rounding, would be greater than the largest value that
can be handled by the implementation (the value Emax).
The result depends on the rounding mode:
For round-half-up and round-half-even (and for round-half-down and
round-up, if implemented), the result of the operation is [sign,inf],
where sign is the sign of the intermediate result. For round-down, the
result is the largest finite number that can be represented in the
current precision, with the sign of the intermediate result. For
round-ceiling, the result is the same as for round-down if the sign of
the intermediate result is 1, or is [0,inf] otherwise. For round-floor,
the result is the same as for round-down if the sign of the intermediate
result is 0, or is [1,inf] otherwise. In all cases, Inexact and Rounded
will also be raised.
"""
def handle(self, context, sign, *args):
if context.rounding in (ROUND_HALF_UP, ROUND_HALF_EVEN,
ROUND_HALF_DOWN, ROUND_UP):
return _SignedInfinity[sign]
if sign == 0:
if context.rounding == ROUND_CEILING:
return _SignedInfinity[sign]
return _dec_from_triple(sign, '9'*context.prec,
context.Emax-context.prec+1)
if sign == 1:
if context.rounding == ROUND_FLOOR:
return _SignedInfinity[sign]
return _dec_from_triple(sign, '9'*context.prec,
context.Emax-context.prec+1)
class Underflow(Inexact, Rounded, Subnormal):
"""Numerical underflow with result rounded to 0.
This occurs and signals underflow if a result is inexact and the
adjusted exponent of the result would be smaller (more negative) than
the smallest value that can be handled by the implementation (the value
Emin). That is, the result is both inexact and subnormal.
The result after an underflow will be a subnormal number rounded, if
necessary, so that its exponent is not less than Etiny. This may result
in 0 with the sign of the intermediate result and an exponent of Etiny.
In all cases, Inexact, Rounded, and Subnormal will also be raised.
"""
#brython fix me
pass
class FloatOperation(DecimalException, TypeError):
"""Enable stricter semantics for mixing floats and Decimals.
If the signal is not trapped (default), mixing floats and Decimals is
permitted in the Decimal() constructor, context.create_decimal() and
all comparison operators. Both conversion and comparisons are exact.
Any occurrence of a mixed operation is silently recorded by setting
FloatOperation in the context flags. Explicit conversions with
Decimal.from_float() or context.create_decimal_from_float() do not
set the flag.
Otherwise (the signal is trapped), only equality comparisons and explicit
conversions are silent. All other mixed operations raise FloatOperation.
"""
#brython fix me
pass
# List of public traps and flags
_signals = [Clamped, DivisionByZero, Inexact, Overflow, Rounded,
Underflow, InvalidOperation, Subnormal, FloatOperation]
# Map conditions (per the spec) to signals
_condition_map = {ConversionSyntax:InvalidOperation,
DivisionImpossible:InvalidOperation,
DivisionUndefined:InvalidOperation,
InvalidContext:InvalidOperation}
# Valid rounding modes
_rounding_modes = (ROUND_DOWN, ROUND_HALF_UP, ROUND_HALF_EVEN, ROUND_CEILING,
ROUND_FLOOR, ROUND_UP, ROUND_HALF_DOWN, ROUND_05UP)
##### Context Functions ##################################################
# The getcontext() and setcontext() function manage access to a thread-local
# current context. Py2.4 offers direct support for thread locals. If that
# is not available, use threading.current_thread() which is slower but will
# work for older Pythons. If threads are not part of the build, create a
# mock threading object with threading.local() returning the module namespace.
try:
import threading
except ImportError:
# Python was compiled without threads; create a mock object instead
class MockThreading(object):
def local(self, sys=sys):
return sys.modules[__name__]
threading = MockThreading()
del MockThreading
try:
threading.local
except AttributeError:
# To fix reloading, force it to create a new context
# Old contexts have different exceptions in their dicts, making problems.
if hasattr(threading.current_thread(), '__decimal_context__'):
del threading.current_thread().__decimal_context__
def setcontext(context):
"""Set this thread's context to context."""
if context in (DefaultContext, BasicContext, ExtendedContext):
context = context.copy()
context.clear_flags()
threading.current_thread().__decimal_context__ = context
def getcontext():
"""Returns this thread's context.
If this thread does not yet have a context, returns
a new context and sets this thread's context.
New contexts are copies of DefaultContext.
"""
try:
return threading.current_thread().__decimal_context__
except AttributeError:
context = Context()
threading.current_thread().__decimal_context__ = context
return context
else:
local = threading.local()
if hasattr(local, '__decimal_context__'):
del local.__decimal_context__
def getcontext(_local=local):
"""Returns this thread's context.
If this thread does not yet have a context, returns
a new context and sets this thread's context.
New contexts are copies of DefaultContext.
"""
try:
return _local.__decimal_context__
except AttributeError:
context = Context()
_local.__decimal_context__ = context
return context
def setcontext(context, _local=local):
"""Set this thread's context to context."""
if context in (DefaultContext, BasicContext, ExtendedContext):
context = context.copy()
context.clear_flags()
_local.__decimal_context__ = context
del threading, local # Don't contaminate the namespace
def localcontext(ctx=None):
"""Return a context manager for a copy of the supplied context
Uses a copy of the current context if no context is specified
The returned context manager creates a local decimal context
in a with statement:
def sin(x):
with localcontext() as ctx:
ctx.prec += 2
# Rest of sin calculation algorithm
# uses a precision 2 greater than normal
return +s # Convert result to normal precision
def sin(x):
with localcontext(ExtendedContext):
# Rest of sin calculation algorithm
# uses the Extended Context from the
# General Decimal Arithmetic Specification
return +s # Convert result to normal context
>>> setcontext(DefaultContext)
>>> print(getcontext().prec)
28
>>> with localcontext():
... ctx = getcontext()
... ctx.prec += 2
... print(ctx.prec)
...
30
>>> with localcontext(ExtendedContext):
... print(getcontext().prec)
...
9
>>> print(getcontext().prec)
28
"""
if ctx is None: ctx = getcontext()
return _ContextManager(ctx)
##### Decimal class #######################################################
# Do not subclass Decimal from numbers.Real and do not register it as such
# (because Decimals are not interoperable with floats). See the notes in
# numbers.py for more detail.
class Decimal(object):
"""Floating point class for decimal arithmetic."""
__slots__ = ('_exp','_int','_sign', '_is_special')
# Generally, the value of the Decimal instance is given by
# (-1)**_sign * _int * 10**_exp
# Special values are signified by _is_special == True
# We're immutable, so use __new__ not __init__
def __new__(cls, value="0", context=None):
"""Create a decimal point instance.
>>> Decimal('3.14') # string input
Decimal('3.14')
>>> Decimal((0, (3, 1, 4), -2)) # tuple (sign, digit_tuple, exponent)
Decimal('3.14')
>>> Decimal(314) # int
Decimal('314')
>>> Decimal(Decimal(314)) # another decimal instance
Decimal('314')
>>> Decimal(' 3.14 \\n') # leading and trailing whitespace okay
Decimal('3.14')
"""
# Note that the coefficient, self._int, is actually stored as
# a string rather than as a tuple of digits. This speeds up
# the "digits to integer" and "integer to digits" conversions
# that are used in almost every arithmetic operation on
# Decimals. This is an internal detail: the as_tuple function
# and the Decimal constructor still deal with tuples of
# digits.
self = object.__new__(cls)
# From a string
# REs insist on real strings, so we can too.
if isinstance(value, str):
value=value.strip().lower()
if value.startswith("-"):
self._sign = 1
value=value[1:]
else:
self._sign = 0
if value in ('', 'nan'):
self._is_special = True
self._int = ''
#if m.group('signal'): #figure out what a signaling NaN is later
# self._exp = 'N'
#else:
# self._exp = 'n'
self._exp='n'
return self
if value in ('inf', 'infinity'):
self._int = '0'
self._exp = 'F'
self._is_special = True
return self
import _jsre as re
_m=re.match("^\d*\.?\d*(e\+?\d*)?$", value)
if not _m:
self._is_special = True
self._int = ''
self._exp='n'
return self
if '.' in value:
intpart, fracpart=value.split('.')
if 'e' in fracpart:
fracpart, exp=fracpart.split('e')
exp=int(exp)
else:
exp=0
#self._int = str(int(intpart+fracpart))
self._int = intpart+fracpart
self._exp = exp - len(fracpart)
self._is_special = False
return self
else:
#is this a pure int?
self._is_special = False
if 'e' in value:
self._int, _exp=value.split('e')
self._exp=int(_exp)
#print(self._int, self._exp)
else:
self._int = value
self._exp = 0
return self
#m = _parser(value.strip())
#if m is None:
if context is None:
context = getcontext()
return context._raise_error(ConversionSyntax,
"Invalid literal for Decimal: %r" % value)
#if m.group('sign') == "-":
# self._sign = 1
#else:
# self._sign = 0
#intpart = m.group('int')
#if intpart is not None:
# # finite number
# fracpart = m.group('frac') or ''
# exp = int(m.group('exp') or '0')
# self._int = str(int(intpart+fracpart))
# self._exp = exp - len(fracpart)
# self._is_special = False
#else:
# diag = m.group('diag')
# if diag is not None:
# # NaN
# self._int = str(int(diag or '0')).lstrip('0')
# if m.group('signal'):
# self._exp = 'N'
# else:
# self._exp = 'n'
# else:
# # infinity
# self._int = '0'
# self._exp = 'F'
# self._is_special = True
#return self
# From an integer
if isinstance(value, int):
if value >= 0:
self._sign = 0
else:
self._sign = 1
self._exp = 0
self._int = str(abs(value))
self._is_special = False
return self
# From another decimal
if isinstance(value, Decimal):
self._exp = value._exp
self._sign = value._sign
self._int = value._int
self._is_special = value._is_special
return self
# From an internal working value
if isinstance(value, _WorkRep):
self._sign = value.sign
self._int = str(value.int)
self._exp = int(value.exp)
self._is_special = False
return self
# tuple/list conversion (possibly from as_tuple())
if isinstance(value, (list,tuple)):
if len(value) != 3:
raise ValueError('Invalid tuple size in creation of Decimal '
'from list or tuple. The list or tuple '
'should have exactly three elements.')
# process sign. The isinstance test rejects floats
if not (isinstance(value[0], int) and value[0] in (0,1)):
raise ValueError("Invalid sign. The first value in the tuple "
"should be an integer; either 0 for a "
"positive number or 1 for a negative number.")
self._sign = value[0]
if value[2] == 'F':
# infinity: value[1] is ignored
self._int = '0'
self._exp = value[2]
self._is_special = True
else:
# process and validate the digits in value[1]
digits = []
for digit in value[1]:
if isinstance(digit, int) and 0 <= digit <= 9:
# skip leading zeros
if digits or digit != 0:
digits.append(digit)
else:
raise ValueError("The second value in the tuple must "
"be composed of integers in the range "
"0 through 9.")
if value[2] in ('n', 'N'):
# NaN: digits form the diagnostic
self._int = ''.join(map(str, digits))
self._exp = value[2]
self._is_special = True
elif isinstance(value[2], int):
# finite number: digits give the coefficient
self._int = ''.join(map(str, digits or [0]))
self._exp = value[2]
self._is_special = False
else:
raise ValueError("The third value in the tuple must "
"be an integer, or one of the "
"strings 'F', 'n', 'N'.")
return self
if isinstance(value, float):
if context is None:
context = getcontext()
context._raise_error(FloatOperation,
"strict semantics for mixing floats and Decimals are "
"enabled")
value = Decimal.from_float(value)
self._exp = value._exp
self._sign = value._sign
self._int = value._int
self._is_special = value._is_special
return self
raise TypeError("Cannot convert %r to Decimal" % value)
# @classmethod, but @decorator is not valid Python 2.3 syntax, so
# don't use it (see notes on Py2.3 compatibility at top of file)
def from_float(cls, f):
"""Converts a float to a decimal number, exactly.
Note that Decimal.from_float(0.1) is not the same as Decimal('0.1').
Since 0.1 is not exactly representable in binary floating point, the
value is stored as the nearest representable value which is
0x1.999999999999ap-4. The exact equivalent of the value in decimal
is 0.1000000000000000055511151231257827021181583404541015625.
>>> Decimal.from_float(0.1)
Decimal('0.1000000000000000055511151231257827021181583404541015625')
>>> Decimal.from_float(float('nan'))
Decimal('NaN')
>>> Decimal.from_float(float('inf'))
Decimal('Infinity')
>>> Decimal.from_float(-float('inf'))
Decimal('-Infinity')
>>> Decimal.from_float(-0.0)
Decimal('-0')
"""
if isinstance(f, int): # handle integer inputs
return cls(f)
if not isinstance(f, float):
raise TypeError("argument must be int or float.")
if _math.isinf(f) or _math.isnan(f):
return cls(repr(f))
if _math.copysign(1.0, f) == 1.0:
sign = 0
else:
sign = 1
n, d = abs(f).as_integer_ratio()
k = d.bit_length() - 1
result = _dec_from_triple(sign, str(n*5**k), -k)
if cls is Decimal:
return result
else:
return cls(result)
from_float = classmethod(from_float)
def _isnan(self):
"""Returns whether the number is not actually one.
0 if a number
1 if NaN
2 if sNaN
"""
if self._is_special:
exp = self._exp
if exp == 'n':
return 1
elif exp == 'N':
return 2
return 0
def _isinfinity(self):
"""Returns whether the number is infinite
0 if finite or not a number
1 if +INF
-1 if -INF
"""
if self._exp == 'F':
if self._sign:
return -1
return 1
return 0
def _check_nans(self, other=None, context=None):
"""Returns whether the number is not actually one.
if self, other are sNaN, signal
if self, other are NaN return nan
return 0
Done before operations.
"""
self_is_nan = self._isnan()
if other is None:
other_is_nan = False
else:
other_is_nan = other._isnan()
if self_is_nan or other_is_nan:
if context is None:
context = getcontext()
if self_is_nan == 2:
return context._raise_error(InvalidOperation, 'sNaN',
self)
if other_is_nan == 2:
return context._raise_error(InvalidOperation, 'sNaN',
other)
if self_is_nan:
return self._fix_nan(context)
return other._fix_nan(context)
return 0
def _compare_check_nans(self, other, context):
"""Version of _check_nans used for the signaling comparisons
compare_signal, __le__, __lt__, __ge__, __gt__.
Signal InvalidOperation if either self or other is a (quiet
or signaling) NaN. Signaling NaNs take precedence over quiet
NaNs.
Return 0 if neither operand is a NaN.
"""
if context is None:
context = getcontext()
if self._is_special or other._is_special:
if self.is_snan():
return context._raise_error(InvalidOperation,
'comparison involving sNaN',
self)
elif other.is_snan():
return context._raise_error(InvalidOperation,
'comparison involving sNaN',
other)
elif self.is_qnan():
return context._raise_error(InvalidOperation,
'comparison involving NaN',
self)
elif other.is_qnan():
return context._raise_error(InvalidOperation,
'comparison involving NaN',
other)
return 0
def __bool__(self):
"""Return True if self is nonzero; otherwise return False.
NaNs and infinities are considered nonzero.
"""
return self._is_special or self._int != '0'
def _cmp(self, other):
"""Compare the two non-NaN decimal instances self and other.
Returns -1 if self < other, 0 if self == other and 1
if self > other. This routine is for internal use only."""
if self._is_special or other._is_special:
self_inf = self._isinfinity()
other_inf = other._isinfinity()
if self_inf == other_inf:
return 0
elif self_inf < other_inf:
return -1
else:
return 1
# check for zeros; Decimal('0') == Decimal('-0')
if not self:
if not other:
return 0
else:
return -((-1)**other._sign)
if not other:
return (-1)**self._sign
# If different signs, neg one is less
if other._sign < self._sign:
return -1
if self._sign < other._sign:
return 1
self_adjusted = self.adjusted()
other_adjusted = other.adjusted()
if self_adjusted == other_adjusted:
self_padded = self._int + '0'*(self._exp - other._exp)
other_padded = other._int + '0'*(other._exp - self._exp)
if self_padded == other_padded:
return 0
elif self_padded < other_padded:
return -(-1)**self._sign
else:
return (-1)**self._sign
elif self_adjusted > other_adjusted:
return (-1)**self._sign
else: # self_adjusted < other_adjusted
return -((-1)**self._sign)
# Note: The Decimal standard doesn't cover rich comparisons for
# Decimals. In particular, the specification is silent on the
# subject of what should happen for a comparison involving a NaN.
# We take the following approach:
#
# == comparisons involving a quiet NaN always return False
# != comparisons involving a quiet NaN always return True
# == or != comparisons involving a signaling NaN signal
# InvalidOperation, and return False or True as above if the
# InvalidOperation is not trapped.
# <, >, <= and >= comparisons involving a (quiet or signaling)
# NaN signal InvalidOperation, and return False if the
# InvalidOperation is not trapped.
#
# This behavior is designed to conform as closely as possible to
# that specified by IEEE 754.
def __eq__(self, other, context=None):
self, other = _convert_for_comparison(self, other, equality_op=True)
if other is NotImplemented:
return other
if self._check_nans(other, context):
return False
return self._cmp(other) == 0
def __ne__(self, other, context=None):
self, other = _convert_for_comparison(self, other, equality_op=True)
if other is NotImplemented:
return other
if self._check_nans(other, context):
return True
return self._cmp(other) != 0
def __lt__(self, other, context=None):
self, other = _convert_for_comparison(self, other)
if other is NotImplemented:
return other
ans = self._compare_check_nans(other, context)
if ans:
return False
return self._cmp(other) < 0
def __le__(self, other, context=None):
self, other = _convert_for_comparison(self, other)
if other is NotImplemented:
return other
ans = self._compare_check_nans(other, context)
if ans:
return False
return self._cmp(other) <= 0
def __gt__(self, other, context=None):
self, other = _convert_for_comparison(self, other)
if other is NotImplemented:
return other
ans = self._compare_check_nans(other, context)
if ans:
return False
return self._cmp(other) > 0
def __ge__(self, other, context=None):
self, other = _convert_for_comparison(self, other)
if other is NotImplemented:
return other
ans = self._compare_check_nans(other, context)
if ans:
return False
return self._cmp(other) >= 0
def compare(self, other, context=None):
"""Compares one to another.
-1 => a < b
0 => a = b
1 => a > b
NaN => one is NaN
Like __cmp__, but returns Decimal instances.
"""
other = _convert_other(other, raiseit=True)
# Compare(NaN, NaN) = NaN
if (self._is_special or other and other._is_special):
ans = self._check_nans(other, context)
if ans:
return ans
return Decimal(self._cmp(other))
def __hash__(self):
"""x.__hash__() <==> hash(x)"""
# In order to make sure that the hash of a Decimal instance
# agrees with the hash of a numerically equal integer, float
# or Fraction, we follow the rules for numeric hashes outlined
# in the documentation. (See library docs, 'Built-in Types').
if self._is_special:
if self.is_snan():
raise TypeError('Cannot hash a signaling NaN value.')
elif self.is_nan():
return _PyHASH_NAN
else:
if self._sign:
return -_PyHASH_INF
else:
return _PyHASH_INF
if self._exp >= 0:
exp_hash = pow(10, self._exp, _PyHASH_MODULUS)
else:
exp_hash = pow(_PyHASH_10INV, -self._exp, _PyHASH_MODULUS)
hash_ = int(self._int) * exp_hash % _PyHASH_MODULUS
ans = hash_ if self >= 0 else -hash_
return -2 if ans == -1 else ans
def as_tuple(self):
"""Represents the number as a triple tuple.
To show the internals exactly as they are.
"""
return DecimalTuple(self._sign, tuple(map(int, self._int)), self._exp)
def __repr__(self):
"""Represents the number as an instance of Decimal."""
# Invariant: eval(repr(d)) == d
return "Decimal('%s')" % str(self)
def __str__(self, eng=False, context=None):
"""Return string representation of the number in scientific notation.
Captures all of the information in the underlying representation.
"""
sign = ['', '-'][self._sign]
if self._is_special:
if self._exp == 'F':
return sign + 'Infinity'
elif self._exp == 'n':
return sign + 'NaN' + self._int
else: # self._exp == 'N'
return sign + 'sNaN' + self._int
# number of digits of self._int to left of decimal point
leftdigits = self._exp + len(self._int)
# dotplace is number of digits of self._int to the left of the
# decimal point in the mantissa of the output string (that is,
# after adjusting the exponent)
if self._exp <= 0 and leftdigits > -6:
# no exponent required
dotplace = leftdigits
elif not eng:
# usual scientific notation: 1 digit on left of the point
dotplace = 1
elif self._int == '0':
# engineering notation, zero
dotplace = (leftdigits + 1) % 3 - 1
else:
# engineering notation, nonzero
dotplace = (leftdigits - 1) % 3 + 1
if dotplace <= 0:
intpart = '0'
fracpart = '.' + '0'*(-dotplace) + self._int
elif dotplace >= len(self._int):
intpart = self._int+'0'*(dotplace-len(self._int))
fracpart = ''
else:
intpart = self._int[:dotplace]
fracpart = '.' + self._int[dotplace:]
if leftdigits == dotplace:
exp = ''
else:
if context is None:
context = getcontext()
exp = ['e', 'E'][context.capitals] + "%+d" % (leftdigits-dotplace)
return sign + intpart + fracpart + exp
def to_eng_string(self, context=None):
"""Convert to engineering-type string.
Engineering notation has an exponent which is a multiple of 3, so there
are up to 3 digits left of the decimal place.
Same rules for when in exponential and when as a value as in __str__.
"""
return self.__str__(eng=True, context=context)
def __neg__(self, context=None):
"""Returns a copy with the sign switched.
Rounds, if it has reason.
"""
if self._is_special:
ans = self._check_nans(context=context)
if ans:
return ans
if context is None:
context = getcontext()
if not self and context.rounding != ROUND_FLOOR:
# -Decimal('0') is Decimal('0'), not Decimal('-0'), except
# in ROUND_FLOOR rounding mode.
ans = self.copy_abs()
else:
ans = self.copy_negate()
return ans._fix(context)
def __pos__(self, context=None):
"""Returns a copy, unless it is a sNaN.
Rounds the number (if more then precision digits)
"""
if self._is_special:
ans = self._check_nans(context=context)
if ans:
return ans
if context is None:
context = getcontext()
if not self and context.rounding != ROUND_FLOOR:
# + (-0) = 0, except in ROUND_FLOOR rounding mode.
ans = self.copy_abs()
else:
ans = Decimal(self)
return ans._fix(context)
def __abs__(self, round=True, context=None):
"""Returns the absolute value of self.
If the keyword argument 'round' is false, do not round. The
expression self.__abs__(round=False) is equivalent to
self.copy_abs().
"""
if not round:
return self.copy_abs()
if self._is_special:
ans = self._check_nans(context=context)
if ans:
return ans
if self._sign:
ans = self.__neg__(context=context)
else:
ans = self.__pos__(context=context)
return ans
def __add__(self, other, context=None):
"""Returns self + other.
-INF + INF (or the reverse) cause InvalidOperation errors.
"""
other = _convert_other(other)
if other is NotImplemented:
return other
if context is None:
context = getcontext()
if self._is_special or other._is_special:
ans = self._check_nans(other, context)
if ans:
return ans
if self._isinfinity():
# If both INF, same sign => same as both, opposite => error.
if self._sign != other._sign and other._isinfinity():
return context._raise_error(InvalidOperation, '-INF + INF')
return Decimal(self)
if other._isinfinity():
return Decimal(other) # Can't both be infinity here
exp = min(self._exp, other._exp)
negativezero = 0
if context.rounding == ROUND_FLOOR and self._sign != other._sign:
# If the answer is 0, the sign should be negative, in this case.
negativezero = 1
if not self and not other:
sign = min(self._sign, other._sign)
if negativezero:
sign = 1
ans = _dec_from_triple(sign, '0', exp)
ans = ans._fix(context)
return ans
if not self:
exp = max(exp, other._exp - context.prec-1)
ans = other._rescale(exp, context.rounding)
ans = ans._fix(context)
return ans
if not other:
exp = max(exp, self._exp - context.prec-1)
ans = self._rescale(exp, context.rounding)
ans = ans._fix(context)
return ans
op1 = _WorkRep(self)
op2 = _WorkRep(other)
op1, op2 = _normalize(op1, op2, context.prec)
result = _WorkRep()
if op1.sign != op2.sign:
# Equal and opposite
if op1.int == op2.int:
ans = _dec_from_triple(negativezero, '0', exp)
ans = ans._fix(context)
return ans
if op1.int < op2.int:
op1, op2 = op2, op1
# OK, now abs(op1) > abs(op2)
if op1.sign == 1:
result.sign = 1
op1.sign, op2.sign = op2.sign, op1.sign
else:
result.sign = 0
# So we know the sign, and op1 > 0.
elif op1.sign == 1:
result.sign = 1
op1.sign, op2.sign = (0, 0)
else:
result.sign = 0
# Now, op1 > abs(op2) > 0
if op2.sign == 0:
result.int = op1.int + op2.int
else:
result.int = op1.int - op2.int
result.exp = op1.exp
ans = Decimal(result)
ans = ans._fix(context)
return ans
__radd__ = __add__
def __sub__(self, other, context=None):
"""Return self - other"""
other = _convert_other(other)
if other is NotImplemented:
return other
if self._is_special or other._is_special:
ans = self._check_nans(other, context=context)
if ans:
return ans
# self - other is computed as self + other.copy_negate()
return self.__add__(other.copy_negate(), context=context)
def __rsub__(self, other, context=None):
"""Return other - self"""
other = _convert_other(other)
if other is NotImplemented:
return other
return other.__sub__(self, context=context)
def __mul__(self, other, context=None):
"""Return self * other.
(+-) INF * 0 (or its reverse) raise InvalidOperation.
"""
other = _convert_other(other)
if other is NotImplemented:
return other
if context is None:
context = getcontext()
resultsign = self._sign ^ other._sign
if self._is_special or other._is_special:
ans = self._check_nans(other, context)
if ans:
return ans
if self._isinfinity():
if not other:
return context._raise_error(InvalidOperation, '(+-)INF * 0')
return _SignedInfinity[resultsign]
if other._isinfinity():
if not self:
return context._raise_error(InvalidOperation, '0 * (+-)INF')
return _SignedInfinity[resultsign]
resultexp = self._exp + other._exp
# Special case for multiplying by zero
if not self or not other:
ans = _dec_from_triple(resultsign, '0', resultexp)
# Fixing in case the exponent is out of bounds
ans = ans._fix(context)
return ans
# Special case for multiplying by power of 10
if self._int == '1':
ans = _dec_from_triple(resultsign, other._int, resultexp)
ans = ans._fix(context)
return ans
if other._int == '1':
ans = _dec_from_triple(resultsign, self._int, resultexp)
ans = ans._fix(context)
return ans
op1 = _WorkRep(self)
op2 = _WorkRep(other)
ans = _dec_from_triple(resultsign, str(op1.int * op2.int), resultexp)
ans = ans._fix(context)
return ans
__rmul__ = __mul__
def __truediv__(self, other, context=None):
"""Return self / other."""
other = _convert_other(other)
if other is NotImplemented:
return NotImplemented
if context is None:
context = getcontext()
sign = self._sign ^ other._sign
if self._is_special or other._is_special:
ans = self._check_nans(other, context)
if ans:
return ans
if self._isinfinity() and other._isinfinity():
return context._raise_error(InvalidOperation, '(+-)INF/(+-)INF')
if self._isinfinity():
return _SignedInfinity[sign]
if other._isinfinity():
context._raise_error(Clamped, 'Division by infinity')
return _dec_from_triple(sign, '0', context.Etiny())
# Special cases for zeroes
if not other:
if not self:
return context._raise_error(DivisionUndefined, '0 / 0')
return context._raise_error(DivisionByZero, 'x / 0', sign)
if not self:
exp = self._exp - other._exp
coeff = 0
else:
# OK, so neither = 0, INF or NaN
shift = len(other._int) - len(self._int) + context.prec + 1
exp = self._exp - other._exp - shift
op1 = _WorkRep(self)
op2 = _WorkRep(other)
if shift >= 0:
coeff, remainder = divmod(op1.int * 10**shift, op2.int)
else:
coeff, remainder = divmod(op1.int, op2.int * 10**-shift)
if remainder:
# result is not exact; adjust to ensure correct rounding
if coeff % 5 == 0:
coeff += 1
else:
# result is exact; get as close to ideal exponent as possible
ideal_exp = self._exp - other._exp
while exp < ideal_exp and coeff % 10 == 0:
coeff //= 10
exp += 1
ans = _dec_from_triple(sign, str(coeff), exp)
return ans._fix(context)
def _divide(self, other, context):
"""Return (self // other, self % other), to context.prec precision.
Assumes that neither self nor other is a NaN, that self is not
infinite and that other is nonzero.
"""
sign = self._sign ^ other._sign
if other._isinfinity():
ideal_exp = self._exp
else:
ideal_exp = min(self._exp, other._exp)
expdiff = self.adjusted() - other.adjusted()
if not self or other._isinfinity() or expdiff <= -2:
return (_dec_from_triple(sign, '0', 0),
self._rescale(ideal_exp, context.rounding))
if expdiff <= context.prec:
op1 = _WorkRep(self)
op2 = _WorkRep(other)
if op1.exp >= op2.exp:
op1.int *= 10**(op1.exp - op2.exp)
else:
op2.int *= 10**(op2.exp - op1.exp)
q, r = divmod(op1.int, op2.int)
if q < 10**context.prec:
return (_dec_from_triple(sign, str(q), 0),
_dec_from_triple(self._sign, str(r), ideal_exp))
# Here the quotient is too large to be representable
ans = context._raise_error(DivisionImpossible,
'quotient too large in //, % or divmod')
return ans, ans
def __rtruediv__(self, other, context=None):
"""Swaps self/other and returns __truediv__."""
other = _convert_other(other)
if other is NotImplemented:
return other
return other.__truediv__(self, context=context)
def __divmod__(self, other, context=None):
"""
Return (self // other, self % other)
"""
other = _convert_other(other)
if other is NotImplemented:
return other
if context is None:
context = getcontext()
ans = self._check_nans(other, context)
if ans:
return (ans, ans)
sign = self._sign ^ other._sign
if self._isinfinity():
if other._isinfinity():
ans = context._raise_error(InvalidOperation, 'divmod(INF, INF)')
return ans, ans
else:
return (_SignedInfinity[sign],
context._raise_error(InvalidOperation, 'INF % x'))
if not other:
if not self:
ans = context._raise_error(DivisionUndefined, 'divmod(0, 0)')
return ans, ans
else:
return (context._raise_error(DivisionByZero, 'x // 0', sign),
context._raise_error(InvalidOperation, 'x % 0'))
quotient, remainder = self._divide(other, context)
remainder = remainder._fix(context)
return quotient, remainder
def __rdivmod__(self, other, context=None):
"""Swaps self/other and returns __divmod__."""
other = _convert_other(other)
if other is NotImplemented:
return other
return other.__divmod__(self, context=context)
def __mod__(self, other, context=None):
"""
self % other
"""
other = _convert_other(other)
if other is NotImplemented:
return other
if context is None:
context = getcontext()
ans = self._check_nans(other, context)
if ans:
return ans
if self._isinfinity():
return context._raise_error(InvalidOperation, 'INF % x')
elif not other:
if self:
return context._raise_error(InvalidOperation, 'x % 0')
else:
return context._raise_error(DivisionUndefined, '0 % 0')
remainder = self._divide(other, context)[1]
remainder = remainder._fix(context)
return remainder
def __rmod__(self, other, context=None):
"""Swaps self/other and returns __mod__."""
other = _convert_other(other)
if other is NotImplemented:
return other
return other.__mod__(self, context=context)
def remainder_near(self, other, context=None):
"""
Remainder nearest to 0- abs(remainder-near) <= other/2
"""
if context is None:
context = getcontext()
other = _convert_other(other, raiseit=True)
ans = self._check_nans(other, context)
if ans:
return ans
# self == +/-infinity -> InvalidOperation
if self._isinfinity():
return context._raise_error(InvalidOperation,
'remainder_near(infinity, x)')
# other == 0 -> either InvalidOperation or DivisionUndefined
if not other:
if self:
return context._raise_error(InvalidOperation,
'remainder_near(x, 0)')
else:
return context._raise_error(DivisionUndefined,
'remainder_near(0, 0)')
# other = +/-infinity -> remainder = self
if other._isinfinity():
ans = Decimal(self)
return ans._fix(context)
# self = 0 -> remainder = self, with ideal exponent
ideal_exponent = min(self._exp, other._exp)
if not self:
ans = _dec_from_triple(self._sign, '0', ideal_exponent)
return ans._fix(context)
# catch most cases of large or small quotient
expdiff = self.adjusted() - other.adjusted()
if expdiff >= context.prec + 1:
# expdiff >= prec+1 => abs(self/other) > 10**prec
return context._raise_error(DivisionImpossible)
if expdiff <= -2:
# expdiff <= -2 => abs(self/other) < 0.1
ans = self._rescale(ideal_exponent, context.rounding)
return ans._fix(context)
# adjust both arguments to have the same exponent, then divide
op1 = _WorkRep(self)
op2 = _WorkRep(other)
if op1.exp >= op2.exp:
op1.int *= 10**(op1.exp - op2.exp)
else:
op2.int *= 10**(op2.exp - op1.exp)
q, r = divmod(op1.int, op2.int)
# remainder is r*10**ideal_exponent; other is +/-op2.int *
# 10**ideal_exponent. Apply correction to ensure that
# abs(remainder) <= abs(other)/2
if 2*r + (q&1) > op2.int:
r -= op2.int
q += 1
if q >= 10**context.prec:
return context._raise_error(DivisionImpossible)
# result has same sign as self unless r is negative
sign = self._sign
if r < 0:
sign = 1-sign
r = -r
ans = _dec_from_triple(sign, str(r), ideal_exponent)
return ans._fix(context)
def __floordiv__(self, other, context=None):
"""self // other"""
other = _convert_other(other)
if other is NotImplemented:
return other
if context is None:
context = getcontext()
ans = self._check_nans(other, context)
if ans:
return ans
if self._isinfinity():
if other._isinfinity():
return context._raise_error(InvalidOperation, 'INF // INF')
else:
return _SignedInfinity[self._sign ^ other._sign]
if not other:
if self:
return context._raise_error(DivisionByZero, 'x // 0',
self._sign ^ other._sign)
else:
return context._raise_error(DivisionUndefined, '0 // 0')
return self._divide(other, context)[0]
def __rfloordiv__(self, other, context=None):
"""Swaps self/other and returns __floordiv__."""
other = _convert_other(other)
if other is NotImplemented:
return other
return other.__floordiv__(self, context=context)
def __float__(self):
"""Float representation."""
if self._isnan():
if self.is_snan():
raise ValueError("Cannot convert signaling NaN to float")
s = "-nan" if self._sign else "nan"
else:
s = str(self)
return float(s)
def __int__(self):
"""Converts self to an int, truncating if necessary."""
if self._is_special:
if self._isnan():
raise ValueError("Cannot convert NaN to integer")
elif self._isinfinity():
raise OverflowError("Cannot convert infinity to integer")
s = (-1)**self._sign
if self._exp >= 0:
return s*int(self._int)*10**self._exp
else:
return s*int(self._int[:self._exp] or '0')
__trunc__ = __int__
def real(self):
return self
real = property(real)
def imag(self):
return Decimal(0)
imag = property(imag)
def conjugate(self):
return self
def __complex__(self):
return complex(float(self))
def _fix_nan(self, context):
"""Decapitate the payload of a NaN to fit the context"""
payload = self._int
# maximum length of payload is precision if clamp=0,
# precision-1 if clamp=1.
max_payload_len = context.prec - context.clamp
if len(payload) > max_payload_len:
payload = payload[len(payload)-max_payload_len:].lstrip('0')
return _dec_from_triple(self._sign, payload, self._exp, True)
return Decimal(self)
def _fix(self, context):
"""Round if it is necessary to keep self within prec precision.
Rounds and fixes the exponent. Does not raise on a sNaN.
Arguments:
self - Decimal instance
context - context used.
"""
if self._is_special:
if self._isnan():
# decapitate payload if necessary
return self._fix_nan(context)
else:
# self is +/-Infinity; return unaltered
return Decimal(self)
# if self is zero then exponent should be between Etiny and
# Emax if clamp==0, and between Etiny and Etop if clamp==1.
Etiny = context.Etiny()
Etop = context.Etop()
if not self:
exp_max = [context.Emax, Etop][context.clamp]
new_exp = min(max(self._exp, Etiny), exp_max)
if new_exp != self._exp:
context._raise_error(Clamped)
return _dec_from_triple(self._sign, '0', new_exp)
else:
return Decimal(self)
# exp_min is the smallest allowable exponent of the result,
# equal to max(self.adjusted()-context.prec+1, Etiny)
exp_min = len(self._int) + self._exp - context.prec
if exp_min > Etop:
# overflow: exp_min > Etop iff self.adjusted() > Emax
ans = context._raise_error(Overflow, 'above Emax', self._sign)
context._raise_error(Inexact)
context._raise_error(Rounded)
return ans
self_is_subnormal = exp_min < Etiny
if self_is_subnormal:
exp_min = Etiny
# round if self has too many digits
if self._exp < exp_min:
digits = len(self._int) + self._exp - exp_min
if digits < 0:
self = _dec_from_triple(self._sign, '1', exp_min-1)
digits = 0
rounding_method = self._pick_rounding_function[context.rounding]
changed = rounding_method(self, digits)
coeff = self._int[:digits] or '0'
if changed > 0:
coeff = str(int(coeff)+1)
if len(coeff) > context.prec:
coeff = coeff[:-1]
exp_min += 1
# check whether the rounding pushed the exponent out of range
if exp_min > Etop:
ans = context._raise_error(Overflow, 'above Emax', self._sign)
else:
ans = _dec_from_triple(self._sign, coeff, exp_min)
# raise the appropriate signals, taking care to respect
# the precedence described in the specification
if changed and self_is_subnormal:
context._raise_error(Underflow)
if self_is_subnormal:
context._raise_error(Subnormal)
if changed:
context._raise_error(Inexact)
context._raise_error(Rounded)
if not ans:
# raise Clamped on underflow to 0
context._raise_error(Clamped)
return ans
if self_is_subnormal:
context._raise_error(Subnormal)
# fold down if clamp == 1 and self has too few digits
if context.clamp == 1 and self._exp > Etop:
context._raise_error(Clamped)
self_padded = self._int + '0'*(self._exp - Etop)
return _dec_from_triple(self._sign, self_padded, Etop)
# here self was representable to begin with; return unchanged
return Decimal(self)
# for each of the rounding functions below:
# self is a finite, nonzero Decimal
# prec is an integer satisfying 0 <= prec < len(self._int)
#
# each function returns either -1, 0, or 1, as follows:
# 1 indicates that self should be rounded up (away from zero)
# 0 indicates that self should be truncated, and that all the
# digits to be truncated are zeros (so the value is unchanged)
# -1 indicates that there are nonzero digits to be truncated
def _round_down(self, prec):
"""Also known as round-towards-0, truncate."""
if _all_zeros(self._int, prec):
return 0
else:
return -1
def _round_up(self, prec):
"""Rounds away from 0."""
return -self._round_down(prec)
def _round_half_up(self, prec):
"""Rounds 5 up (away from 0)"""
if self._int[prec] in '56789':
return 1
elif _all_zeros(self._int, prec):
return 0
else:
return -1
def _round_half_down(self, prec):
"""Round 5 down"""
if _exact_half(self._int, prec):
return -1
else:
return self._round_half_up(prec)
def _round_half_even(self, prec):
"""Round 5 to even, rest to nearest."""
if _exact_half(self._int, prec) and \
(prec == 0 or self._int[prec-1] in '02468'):
return -1
else:
return self._round_half_up(prec)
def _round_ceiling(self, prec):
"""Rounds up (not away from 0 if negative.)"""
if self._sign:
return self._round_down(prec)
else:
return -self._round_down(prec)
def _round_floor(self, prec):
"""Rounds down (not towards 0 if negative)"""
if not self._sign:
return self._round_down(prec)
else:
return -self._round_down(prec)
def _round_05up(self, prec):
"""Round down unless digit prec-1 is 0 or 5."""
if prec and self._int[prec-1] not in '05':
return self._round_down(prec)
else:
return -self._round_down(prec)
_pick_rounding_function = dict(
ROUND_DOWN = _round_down,
ROUND_UP = _round_up,
ROUND_HALF_UP = _round_half_up,
ROUND_HALF_DOWN = _round_half_down,
ROUND_HALF_EVEN = _round_half_even,
ROUND_CEILING = _round_ceiling,
ROUND_FLOOR = _round_floor,
ROUND_05UP = _round_05up,
)
def __round__(self, n=None):
"""Round self to the nearest integer, or to a given precision.
If only one argument is supplied, round a finite Decimal
instance self to the nearest integer. If self is infinite or
a NaN then a Python exception is raised. If self is finite
and lies exactly halfway between two integers then it is
rounded to the integer with even last digit.
>>> round(Decimal('123.456'))
123
>>> round(Decimal('-456.789'))
-457
>>> round(Decimal('-3.0'))
-3
>>> round(Decimal('2.5'))
2
>>> round(Decimal('3.5'))
4
>>> round(Decimal('Inf'))
Traceback (most recent call last):
...
OverflowError: cannot round an infinity
>>> round(Decimal('NaN'))
Traceback (most recent call last):
...
ValueError: cannot round a NaN
If a second argument n is supplied, self is rounded to n
decimal places using the rounding mode for the current
context.
For an integer n, round(self, -n) is exactly equivalent to
self.quantize(Decimal('1En')).
>>> round(Decimal('123.456'), 0)
Decimal('123')
>>> round(Decimal('123.456'), 2)
Decimal('123.46')
>>> round(Decimal('123.456'), -2)
Decimal('1E+2')
>>> round(Decimal('-Infinity'), 37)
Decimal('NaN')
>>> round(Decimal('sNaN123'), 0)
Decimal('NaN123')
"""
if n is not None:
# two-argument form: use the equivalent quantize call
if not isinstance(n, int):
raise TypeError('Second argument to round should be integral')
exp = _dec_from_triple(0, '1', -n)
return self.quantize(exp)
# one-argument form
if self._is_special:
if self.is_nan():
raise ValueError("cannot round a NaN")
else:
raise OverflowError("cannot round an infinity")
return int(self._rescale(0, ROUND_HALF_EVEN))
def __floor__(self):
"""Return the floor of self, as an integer.
For a finite Decimal instance self, return the greatest
integer n such that n <= self. If self is infinite or a NaN
then a Python exception is raised.
"""
if self._is_special:
if self.is_nan():
raise ValueError("cannot round a NaN")
else:
raise OverflowError("cannot round an infinity")
return int(self._rescale(0, ROUND_FLOOR))
def __ceil__(self):
"""Return the ceiling of self, as an integer.
For a finite Decimal instance self, return the least integer n
such that n >= self. If self is infinite or a NaN then a
Python exception is raised.
"""
if self._is_special:
if self.is_nan():
raise ValueError("cannot round a NaN")
else:
raise OverflowError("cannot round an infinity")
return int(self._rescale(0, ROUND_CEILING))
def fma(self, other, third, context=None):
"""Fused multiply-add.
Returns self*other+third with no rounding of the intermediate
product self*other.
self and other are multiplied together, with no rounding of
the result. The third operand is then added to the result,
and a single final rounding is performed.
"""
other = _convert_other(other, raiseit=True)
third = _convert_other(third, raiseit=True)
# compute product; raise InvalidOperation if either operand is
# a signaling NaN or if the product is zero times infinity.
if self._is_special or other._is_special:
if context is None:
context = getcontext()
if self._exp == 'N':
return context._raise_error(InvalidOperation, 'sNaN', self)
if other._exp == 'N':
return context._raise_error(InvalidOperation, 'sNaN', other)
if self._exp == 'n':
product = self
elif other._exp == 'n':
product = other
elif self._exp == 'F':
if not other:
return context._raise_error(InvalidOperation,
'INF * 0 in fma')
product = _SignedInfinity[self._sign ^ other._sign]
elif other._exp == 'F':
if not self:
return context._raise_error(InvalidOperation,
'0 * INF in fma')
product = _SignedInfinity[self._sign ^ other._sign]
else:
product = _dec_from_triple(self._sign ^ other._sign,
str(int(self._int) * int(other._int)),
self._exp + other._exp)
return product.__add__(third, context)
def _power_modulo(self, other, modulo, context=None):
"""Three argument version of __pow__"""
other = _convert_other(other)
if other is NotImplemented:
return other
modulo = _convert_other(modulo)
if modulo is NotImplemented:
return modulo
if context is None:
context = getcontext()
# deal with NaNs: if there are any sNaNs then first one wins,
# (i.e. behaviour for NaNs is identical to that of fma)
self_is_nan = self._isnan()
other_is_nan = other._isnan()
modulo_is_nan = modulo._isnan()
if self_is_nan or other_is_nan or modulo_is_nan:
if self_is_nan == 2:
return context._raise_error(InvalidOperation, 'sNaN',
self)
if other_is_nan == 2:
return context._raise_error(InvalidOperation, 'sNaN',
other)
if modulo_is_nan == 2:
return context._raise_error(InvalidOperation, 'sNaN',
modulo)
if self_is_nan:
return self._fix_nan(context)
if other_is_nan:
return other._fix_nan(context)
return modulo._fix_nan(context)
# check inputs: we apply same restrictions as Python's pow()
if not (self._isinteger() and
other._isinteger() and
modulo._isinteger()):
return context._raise_error(InvalidOperation,
'pow() 3rd argument not allowed '
'unless all arguments are integers')
if other < 0:
return context._raise_error(InvalidOperation,
'pow() 2nd argument cannot be '
'negative when 3rd argument specified')
if not modulo:
return context._raise_error(InvalidOperation,
'pow() 3rd argument cannot be 0')
# additional restriction for decimal: the modulus must be less
# than 10**prec in absolute value
if modulo.adjusted() >= context.prec:
return context._raise_error(InvalidOperation,
'insufficient precision: pow() 3rd '
'argument must not have more than '
'precision digits')
# define 0**0 == NaN, for consistency with two-argument pow
# (even though it hurts!)
if not other and not self:
return context._raise_error(InvalidOperation,
'at least one of pow() 1st argument '
'and 2nd argument must be nonzero ;'
'0**0 is not defined')
# compute sign of result
if other._iseven():
sign = 0
else:
sign = self._sign
# convert modulo to a Python integer, and self and other to
# Decimal integers (i.e. force their exponents to be >= 0)
modulo = abs(int(modulo))
base = _WorkRep(self.to_integral_value())
exponent = _WorkRep(other.to_integral_value())
# compute result using integer pow()
base = (base.int % modulo * pow(10, base.exp, modulo)) % modulo
for i in range(exponent.exp):
base = pow(base, 10, modulo)
base = pow(base, exponent.int, modulo)
return _dec_from_triple(sign, str(base), 0)
def _power_exact(self, other, p):
"""Attempt to compute self**other exactly.
Given Decimals self and other and an integer p, attempt to
compute an exact result for the power self**other, with p
digits of precision. Return None if self**other is not
exactly representable in p digits.
Assumes that elimination of special cases has already been
performed: self and other must both be nonspecial; self must
be positive and not numerically equal to 1; other must be
nonzero. For efficiency, other._exp should not be too large,
so that 10**abs(other._exp) is a feasible calculation."""
# In the comments below, we write x for the value of self and y for the
# value of other. Write x = xc*10**xe and abs(y) = yc*10**ye, with xc
# and yc positive integers not divisible by 10.
# The main purpose of this method is to identify the *failure*
# of x**y to be exactly representable with as little effort as
# possible. So we look for cheap and easy tests that
# eliminate the possibility of x**y being exact. Only if all
# these tests are passed do we go on to actually compute x**y.
# Here's the main idea. Express y as a rational number m/n, with m and
# n relatively prime and n>0. Then for x**y to be exactly
# representable (at *any* precision), xc must be the nth power of a
# positive integer and xe must be divisible by n. If y is negative
# then additionally xc must be a power of either 2 or 5, hence a power
# of 2**n or 5**n.
#
# There's a limit to how small |y| can be: if y=m/n as above
# then:
#
# (1) if xc != 1 then for the result to be representable we
# need xc**(1/n) >= 2, and hence also xc**|y| >= 2. So
# if |y| <= 1/nbits(xc) then xc < 2**nbits(xc) <=
# 2**(1/|y|), hence xc**|y| < 2 and the result is not
# representable.
#
# (2) if xe != 0, |xe|*(1/n) >= 1, so |xe|*|y| >= 1. Hence if
# |y| < 1/|xe| then the result is not representable.
#
# Note that since x is not equal to 1, at least one of (1) and
# (2) must apply. Now |y| < 1/nbits(xc) iff |yc|*nbits(xc) <
# 10**-ye iff len(str(|yc|*nbits(xc)) <= -ye.
#
# There's also a limit to how large y can be, at least if it's
# positive: the normalized result will have coefficient xc**y,
# so if it's representable then xc**y < 10**p, and y <
# p/log10(xc). Hence if y*log10(xc) >= p then the result is
# not exactly representable.
# if len(str(abs(yc*xe)) <= -ye then abs(yc*xe) < 10**-ye,
# so |y| < 1/xe and the result is not representable.
# Similarly, len(str(abs(yc)*xc_bits)) <= -ye implies |y|
# < 1/nbits(xc).
x = _WorkRep(self)
xc, xe = x.int, x.exp
while xc % 10 == 0:
xc //= 10
xe += 1
y = _WorkRep(other)
yc, ye = y.int, y.exp
while yc % 10 == 0:
yc //= 10
ye += 1
# case where xc == 1: result is 10**(xe*y), with xe*y
# required to be an integer
if xc == 1:
xe *= yc
# result is now 10**(xe * 10**ye); xe * 10**ye must be integral
while xe % 10 == 0:
xe //= 10
ye += 1
if ye < 0:
return None
exponent = xe * 10**ye
if y.sign == 1:
exponent = -exponent
# if other is a nonnegative integer, use ideal exponent
if other._isinteger() and other._sign == 0:
ideal_exponent = self._exp*int(other)
zeros = min(exponent-ideal_exponent, p-1)
else:
zeros = 0
return _dec_from_triple(0, '1' + '0'*zeros, exponent-zeros)
# case where y is negative: xc must be either a power
# of 2 or a power of 5.
if y.sign == 1:
last_digit = xc % 10
if last_digit in (2,4,6,8):
# quick test for power of 2
if xc & -xc != xc:
return None
# now xc is a power of 2; e is its exponent
e = _nbits(xc)-1
# We now have:
#
# x = 2**e * 10**xe, e > 0, and y < 0.
#
# The exact result is:
#
# x**y = 5**(-e*y) * 10**(e*y + xe*y)
#
# provided that both e*y and xe*y are integers. Note that if
# 5**(-e*y) >= 10**p, then the result can't be expressed
# exactly with p digits of precision.
#
# Using the above, we can guard against large values of ye.
# 93/65 is an upper bound for log(10)/log(5), so if
#
# ye >= len(str(93*p//65))
#
# then
#
# -e*y >= -y >= 10**ye > 93*p/65 > p*log(10)/log(5),
#
# so 5**(-e*y) >= 10**p, and the coefficient of the result
# can't be expressed in p digits.
# emax >= largest e such that 5**e < 10**p.
emax = p*93//65
if ye >= len(str(emax)):
return None
# Find -e*y and -xe*y; both must be integers
e = _decimal_lshift_exact(e * yc, ye)
xe = _decimal_lshift_exact(xe * yc, ye)
if e is None or xe is None:
return None
if e > emax:
return None
xc = 5**e
elif last_digit == 5:
# e >= log_5(xc) if xc is a power of 5; we have
# equality all the way up to xc=5**2658
e = _nbits(xc)*28//65
xc, remainder = divmod(5**e, xc)
if remainder:
return None
while xc % 5 == 0:
xc //= 5
e -= 1
# Guard against large values of ye, using the same logic as in
# the 'xc is a power of 2' branch. 10/3 is an upper bound for
# log(10)/log(2).
emax = p*10//3
if ye >= len(str(emax)):
return None
e = _decimal_lshift_exact(e * yc, ye)
xe = _decimal_lshift_exact(xe * yc, ye)
if e is None or xe is None:
return None
if e > emax:
return None
xc = 2**e
else:
return None
if xc >= 10**p:
return None
xe = -e-xe
return _dec_from_triple(0, str(xc), xe)
# now y is positive; find m and n such that y = m/n
if ye >= 0:
m, n = yc*10**ye, 1
else:
if xe != 0 and len(str(abs(yc*xe))) <= -ye:
return None
xc_bits = _nbits(xc)
if xc != 1 and len(str(abs(yc)*xc_bits)) <= -ye:
return None
m, n = yc, 10**(-ye)
while m % 2 == n % 2 == 0:
m //= 2
n //= 2
while m % 5 == n % 5 == 0:
m //= 5
n //= 5
# compute nth root of xc*10**xe
if n > 1:
# if 1 < xc < 2**n then xc isn't an nth power
if xc != 1 and xc_bits <= n:
return None
xe, rem = divmod(xe, n)
if rem != 0:
return None
# compute nth root of xc using Newton's method
a = 1 << -(-_nbits(xc)//n) # initial estimate
while True:
q, r = divmod(xc, a**(n-1))
if a <= q:
break
else:
a = (a*(n-1) + q)//n
if not (a == q and r == 0):
return None
xc = a
# now xc*10**xe is the nth root of the original xc*10**xe
# compute mth power of xc*10**xe
# if m > p*100//_log10_lb(xc) then m > p/log10(xc), hence xc**m >
# 10**p and the result is not representable.
if xc > 1 and m > p*100//_log10_lb(xc):
return None
xc = xc**m
xe *= m
if xc > 10**p:
return None
# by this point the result *is* exactly representable
# adjust the exponent to get as close as possible to the ideal
# exponent, if necessary
str_xc = str(xc)
if other._isinteger() and other._sign == 0:
ideal_exponent = self._exp*int(other)
zeros = min(xe-ideal_exponent, p-len(str_xc))
else:
zeros = 0
return _dec_from_triple(0, str_xc+'0'*zeros, xe-zeros)
def __pow__(self, other, modulo=None, context=None):
"""Return self ** other [ % modulo].
With two arguments, compute self**other.
With three arguments, compute (self**other) % modulo. For the
three argument form, the following restrictions on the
arguments hold:
- all three arguments must be integral
- other must be nonnegative
- either self or other (or both) must be nonzero
- modulo must be nonzero and must have at most p digits,
where p is the context precision.
If any of these restrictions is violated the InvalidOperation
flag is raised.
The result of pow(self, other, modulo) is identical to the
result that would be obtained by computing (self**other) %
modulo with unbounded precision, but is computed more
efficiently. It is always exact.
"""
if modulo is not None:
return self._power_modulo(other, modulo, context)
other = _convert_other(other)
if other is NotImplemented:
return other
if context is None:
context = getcontext()
# either argument is a NaN => result is NaN
ans = self._check_nans(other, context)
if ans:
return ans
# 0**0 = NaN (!), x**0 = 1 for nonzero x (including +/-Infinity)
if not other:
if not self:
return context._raise_error(InvalidOperation, '0 ** 0')
else:
return _One
# result has sign 1 iff self._sign is 1 and other is an odd integer
result_sign = 0
if self._sign == 1:
if other._isinteger():
if not other._iseven():
result_sign = 1
else:
# -ve**noninteger = NaN
# (-0)**noninteger = 0**noninteger
if self:
return context._raise_error(InvalidOperation,
'x ** y with x negative and y not an integer')
# negate self, without doing any unwanted rounding
self = self.copy_negate()
# 0**(+ve or Inf)= 0; 0**(-ve or -Inf) = Infinity
if not self:
if other._sign == 0:
return _dec_from_triple(result_sign, '0', 0)
else:
return _SignedInfinity[result_sign]
# Inf**(+ve or Inf) = Inf; Inf**(-ve or -Inf) = 0
if self._isinfinity():
if other._sign == 0:
return _SignedInfinity[result_sign]
else:
return _dec_from_triple(result_sign, '0', 0)
# 1**other = 1, but the choice of exponent and the flags
# depend on the exponent of self, and on whether other is a
# positive integer, a negative integer, or neither
if self == _One:
if other._isinteger():
# exp = max(self._exp*max(int(other), 0),
# 1-context.prec) but evaluating int(other) directly
# is dangerous until we know other is small (other
# could be 1e999999999)
if other._sign == 1:
multiplier = 0
elif other > context.prec:
multiplier = context.prec
else:
multiplier = int(other)
exp = self._exp * multiplier
if exp < 1-context.prec:
exp = 1-context.prec
context._raise_error(Rounded)
else:
context._raise_error(Inexact)
context._raise_error(Rounded)
exp = 1-context.prec
return _dec_from_triple(result_sign, '1'+'0'*-exp, exp)
# compute adjusted exponent of self
self_adj = self.adjusted()
# self ** infinity is infinity if self > 1, 0 if self < 1
# self ** -infinity is infinity if self < 1, 0 if self > 1
if other._isinfinity():
if (other._sign == 0) == (self_adj < 0):
return _dec_from_triple(result_sign, '0', 0)
else:
return _SignedInfinity[result_sign]
# from here on, the result always goes through the call
# to _fix at the end of this function.
ans = None
exact = False
# crude test to catch cases of extreme overflow/underflow. If
# log10(self)*other >= 10**bound and bound >= len(str(Emax))
# then 10**bound >= 10**len(str(Emax)) >= Emax+1 and hence
# self**other >= 10**(Emax+1), so overflow occurs. The test
# for underflow is similar.
bound = self._log10_exp_bound() + other.adjusted()
if (self_adj >= 0) == (other._sign == 0):
# self > 1 and other +ve, or self < 1 and other -ve
# possibility of overflow
if bound >= len(str(context.Emax)):
ans = _dec_from_triple(result_sign, '1', context.Emax+1)
else:
# self > 1 and other -ve, or self < 1 and other +ve
# possibility of underflow to 0
Etiny = context.Etiny()
if bound >= len(str(-Etiny)):
ans = _dec_from_triple(result_sign, '1', Etiny-1)
# try for an exact result with precision +1
if ans is None:
ans = self._power_exact(other, context.prec + 1)
if ans is not None:
if result_sign == 1:
ans = _dec_from_triple(1, ans._int, ans._exp)
exact = True
# usual case: inexact result, x**y computed directly as exp(y*log(x))
if ans is None:
p = context.prec
x = _WorkRep(self)
xc, xe = x.int, x.exp
y = _WorkRep(other)
yc, ye = y.int, y.exp
if y.sign == 1:
yc = -yc
# compute correctly rounded result: start with precision +3,
# then increase precision until result is unambiguously roundable
extra = 3
while True:
coeff, exp = _dpower(xc, xe, yc, ye, p+extra)
if coeff % (5*10**(len(str(coeff))-p-1)):
break
extra += 3
ans = _dec_from_triple(result_sign, str(coeff), exp)
# unlike exp, ln and log10, the power function respects the
# rounding mode; no need to switch to ROUND_HALF_EVEN here
# There's a difficulty here when 'other' is not an integer and
# the result is exact. In this case, the specification
# requires that the Inexact flag be raised (in spite of
# exactness), but since the result is exact _fix won't do this
# for us. (Correspondingly, the Underflow signal should also
# be raised for subnormal results.) We can't directly raise
# these signals either before or after calling _fix, since
# that would violate the precedence for signals. So we wrap
# the ._fix call in a temporary context, and reraise
# afterwards.
if exact and not other._isinteger():
# pad with zeros up to length context.prec+1 if necessary; this
# ensures that the Rounded signal will be raised.
if len(ans._int) <= context.prec:
expdiff = context.prec + 1 - len(ans._int)
ans = _dec_from_triple(ans._sign, ans._int+'0'*expdiff,
ans._exp-expdiff)
# create a copy of the current context, with cleared flags/traps
newcontext = context.copy()
newcontext.clear_flags()
for exception in _signals:
newcontext.traps[exception] = 0
# round in the new context
ans = ans._fix(newcontext)
# raise Inexact, and if necessary, Underflow
newcontext._raise_error(Inexact)
if newcontext.flags[Subnormal]:
newcontext._raise_error(Underflow)
# propagate signals to the original context; _fix could
# have raised any of Overflow, Underflow, Subnormal,
# Inexact, Rounded, Clamped. Overflow needs the correct
# arguments. Note that the order of the exceptions is
# important here.
if newcontext.flags[Overflow]:
context._raise_error(Overflow, 'above Emax', ans._sign)
for exception in Underflow, Subnormal, Inexact, Rounded, Clamped:
if newcontext.flags[exception]:
context._raise_error(exception)
else:
ans = ans._fix(context)
return ans
def __rpow__(self, other, context=None):
"""Swaps self/other and returns __pow__."""
other = _convert_other(other)
if other is NotImplemented:
return other
return other.__pow__(self, context=context)
def normalize(self, context=None):
"""Normalize- strip trailing 0s, change anything equal to 0 to 0e0"""
if context is None:
context = getcontext()
if self._is_special:
ans = self._check_nans(context=context)
if ans:
return ans
dup = self._fix(context)
if dup._isinfinity():
return dup
if not dup:
return _dec_from_triple(dup._sign, '0', 0)
exp_max = [context.Emax, context.Etop()][context.clamp]
end = len(dup._int)
exp = dup._exp
while dup._int[end-1] == '0' and exp < exp_max:
exp += 1
end -= 1
return _dec_from_triple(dup._sign, dup._int[:end], exp)
def quantize(self, exp, rounding=None, context=None, watchexp=True):
"""Quantize self so its exponent is the same as that of exp.
Similar to self._rescale(exp._exp) but with error checking.
"""
exp = _convert_other(exp, raiseit=True)
if context is None:
context = getcontext()
if rounding is None:
rounding = context.rounding
if self._is_special or exp._is_special:
ans = self._check_nans(exp, context)
if ans:
return ans
if exp._isinfinity() or self._isinfinity():
if exp._isinfinity() and self._isinfinity():
return Decimal(self) # if both are inf, it is OK
return context._raise_error(InvalidOperation,
'quantize with one INF')
# if we're not watching exponents, do a simple rescale
if not watchexp:
ans = self._rescale(exp._exp, rounding)
# raise Inexact and Rounded where appropriate
if ans._exp > self._exp:
context._raise_error(Rounded)
if ans != self:
context._raise_error(Inexact)
return ans
# exp._exp should be between Etiny and Emax
if not (context.Etiny() <= exp._exp <= context.Emax):
return context._raise_error(InvalidOperation,
'target exponent out of bounds in quantize')
if not self:
ans = _dec_from_triple(self._sign, '0', exp._exp)
return ans._fix(context)
self_adjusted = self.adjusted()
if self_adjusted > context.Emax:
return context._raise_error(InvalidOperation,
'exponent of quantize result too large for current context')
if self_adjusted - exp._exp + 1 > context.prec:
return context._raise_error(InvalidOperation,
'quantize result has too many digits for current context')
ans = self._rescale(exp._exp, rounding)
if ans.adjusted() > context.Emax:
return context._raise_error(InvalidOperation,
'exponent of quantize result too large for current context')
if len(ans._int) > context.prec:
return context._raise_error(InvalidOperation,
'quantize result has too many digits for current context')
# raise appropriate flags
if ans and ans.adjusted() < context.Emin:
context._raise_error(Subnormal)
if ans._exp > self._exp:
if ans != self:
context._raise_error(Inexact)
context._raise_error(Rounded)
# call to fix takes care of any necessary folddown, and
# signals Clamped if necessary
ans = ans._fix(context)
return ans
def same_quantum(self, other, context=None):
"""Return True if self and other have the same exponent; otherwise
return False.
If either operand is a special value, the following rules are used:
* return True if both operands are infinities
* return True if both operands are NaNs
* otherwise, return False.
"""
other = _convert_other(other, raiseit=True)
if self._is_special or other._is_special:
return (self.is_nan() and other.is_nan() or
self.is_infinite() and other.is_infinite())
return self._exp == other._exp
def _rescale(self, exp, rounding):
"""Rescale self so that the exponent is exp, either by padding with zeros
or by truncating digits, using the given rounding mode.
Specials are returned without change. This operation is
quiet: it raises no flags, and uses no information from the
context.
exp = exp to scale to (an integer)
rounding = rounding mode
"""
if self._is_special:
return Decimal(self)
if not self:
return _dec_from_triple(self._sign, '0', exp)
if self._exp >= exp:
# pad answer with zeros if necessary
return _dec_from_triple(self._sign,
self._int + '0'*(self._exp - exp), exp)
# too many digits; round and lose data. If self.adjusted() <
# exp-1, replace self by 10**(exp-1) before rounding
digits = len(self._int) + self._exp - exp
if digits < 0:
self = _dec_from_triple(self._sign, '1', exp-1)
digits = 0
this_function = self._pick_rounding_function[rounding]
changed = this_function(self, digits)
coeff = self._int[:digits] or '0'
if changed == 1:
coeff = str(int(coeff)+1)
return _dec_from_triple(self._sign, coeff, exp)
def _round(self, places, rounding):
"""Round a nonzero, nonspecial Decimal to a fixed number of
significant figures, using the given rounding mode.
Infinities, NaNs and zeros are returned unaltered.
This operation is quiet: it raises no flags, and uses no
information from the context.
"""
if places <= 0:
raise ValueError("argument should be at least 1 in _round")
if self._is_special or not self:
return Decimal(self)
ans = self._rescale(self.adjusted()+1-places, rounding)
# it can happen that the rescale alters the adjusted exponent;
# for example when rounding 99.97 to 3 significant figures.
# When this happens we end up with an extra 0 at the end of
# the number; a second rescale fixes this.
if ans.adjusted() != self.adjusted():
ans = ans._rescale(ans.adjusted()+1-places, rounding)
return ans
def to_integral_exact(self, rounding=None, context=None):
"""Rounds to a nearby integer.
If no rounding mode is specified, take the rounding mode from
the context. This method raises the Rounded and Inexact flags
when appropriate.
See also: to_integral_value, which does exactly the same as
this method except that it doesn't raise Inexact or Rounded.
"""
if self._is_special:
ans = self._check_nans(context=context)
if ans:
return ans
return Decimal(self)
if self._exp >= 0:
return Decimal(self)
if not self:
return _dec_from_triple(self._sign, '0', 0)
if context is None:
context = getcontext()
if rounding is None:
rounding = context.rounding
ans = self._rescale(0, rounding)
if ans != self:
context._raise_error(Inexact)
context._raise_error(Rounded)
return ans
def to_integral_value(self, rounding=None, context=None):
"""Rounds to the nearest integer, without raising inexact, rounded."""
if context is None:
context = getcontext()
if rounding is None:
rounding = context.rounding
if self._is_special:
ans = self._check_nans(context=context)
if ans:
return ans
return Decimal(self)
if self._exp >= 0:
return Decimal(self)
else:
return self._rescale(0, rounding)
# the method name changed, but we provide also the old one, for compatibility
to_integral = to_integral_value
def sqrt(self, context=None):
"""Return the square root of self."""
if context is None:
context = getcontext()
if self._is_special:
ans = self._check_nans(context=context)
if ans:
return ans
if self._isinfinity() and self._sign == 0:
return Decimal(self)
if not self:
# exponent = self._exp // 2. sqrt(-0) = -0
ans = _dec_from_triple(self._sign, '0', self._exp // 2)
return ans._fix(context)
if self._sign == 1:
return context._raise_error(InvalidOperation, 'sqrt(-x), x > 0')
# At this point self represents a positive number. Let p be
# the desired precision and express self in the form c*100**e
# with c a positive real number and e an integer, c and e
# being chosen so that 100**(p-1) <= c < 100**p. Then the
# (exact) square root of self is sqrt(c)*10**e, and 10**(p-1)
# <= sqrt(c) < 10**p, so the closest representable Decimal at
# precision p is n*10**e where n = round_half_even(sqrt(c)),
# the closest integer to sqrt(c) with the even integer chosen
# in the case of a tie.
#
# To ensure correct rounding in all cases, we use the
# following trick: we compute the square root to an extra
# place (precision p+1 instead of precision p), rounding down.
# Then, if the result is inexact and its last digit is 0 or 5,
# we increase the last digit to 1 or 6 respectively; if it's
# exact we leave the last digit alone. Now the final round to
# p places (or fewer in the case of underflow) will round
# correctly and raise the appropriate flags.
# use an extra digit of precision
prec = context.prec+1
# write argument in the form c*100**e where e = self._exp//2
# is the 'ideal' exponent, to be used if the square root is
# exactly representable. l is the number of 'digits' of c in
# base 100, so that 100**(l-1) <= c < 100**l.
op = _WorkRep(self)
e = op.exp >> 1
if op.exp & 1:
c = op.int * 10
l = (len(self._int) >> 1) + 1
else:
c = op.int
l = len(self._int)+1 >> 1
# rescale so that c has exactly prec base 100 'digits'
shift = prec-l
if shift >= 0:
c *= 100**shift
exact = True
else:
c, remainder = divmod(c, 100**-shift)
exact = not remainder
e -= shift
# find n = floor(sqrt(c)) using Newton's method
n = 10**prec
while True:
q = c//n
if n <= q:
break
else:
n = n + q >> 1
exact = exact and n*n == c
if exact:
# result is exact; rescale to use ideal exponent e
if shift >= 0:
# assert n % 10**shift == 0
n //= 10**shift
else:
n *= 10**-shift
e += shift
else:
# result is not exact; fix last digit as described above
if n % 5 == 0:
n += 1
ans = _dec_from_triple(0, str(n), e)
# round, and fit to current context
context = context._shallow_copy()
rounding = context._set_rounding(ROUND_HALF_EVEN)
ans = ans._fix(context)
context.rounding = rounding
return ans
def max(self, other, context=None):
"""Returns the larger value.
Like max(self, other) except if one is not a number, returns
NaN (and signals if one is sNaN). Also rounds.
"""
other = _convert_other(other, raiseit=True)
if context is None:
context = getcontext()
if self._is_special or other._is_special:
# If one operand is a quiet NaN and the other is number, then the
# number is always returned
sn = self._isnan()
on = other._isnan()
if sn or on:
if on == 1 and sn == 0:
return self._fix(context)
if sn == 1 and on == 0:
return other._fix(context)
return self._check_nans(other, context)
c = self._cmp(other)
if c == 0:
# If both operands are finite and equal in numerical value
# then an ordering is applied:
#
# If the signs differ then max returns the operand with the
# positive sign and min returns the operand with the negative sign
#
# If the signs are the same then the exponent is used to select
# the result. This is exactly the ordering used in compare_total.
c = self.compare_total(other)
if c == -1:
ans = other
else:
ans = self
return ans._fix(context)
def min(self, other, context=None):
"""Returns the smaller value.
Like min(self, other) except if one is not a number, returns
NaN (and signals if one is sNaN). Also rounds.
"""
other = _convert_other(other, raiseit=True)
if context is None:
context = getcontext()
if self._is_special or other._is_special:
# If one operand is a quiet NaN and the other is number, then the
# number is always returned
sn = self._isnan()
on = other._isnan()
if sn or on:
if on == 1 and sn == 0:
return self._fix(context)
if sn == 1 and on == 0:
return other._fix(context)
return self._check_nans(other, context)
c = self._cmp(other)
if c == 0:
c = self.compare_total(other)
if c == -1:
ans = self
else:
ans = other
return ans._fix(context)
def _isinteger(self):
"""Returns whether self is an integer"""
if self._is_special:
return False
if self._exp >= 0:
return True
rest = self._int[self._exp:]
return rest == '0'*len(rest)
def _iseven(self):
"""Returns True if self is even. Assumes self is an integer."""
if not self or self._exp > 0:
return True
return self._int[-1+self._exp] in '02468'
def adjusted(self):
"""Return the adjusted exponent of self"""
try:
return self._exp + len(self._int) - 1
# If NaN or Infinity, self._exp is string
except TypeError:
return 0
def canonical(self):
"""Returns the same Decimal object.
As we do not have different encodings for the same number, the
received object already is in its canonical form.
"""
return self
def compare_signal(self, other, context=None):
"""Compares self to the other operand numerically.
It's pretty much like compare(), but all NaNs signal, with signaling
NaNs taking precedence over quiet NaNs.
"""
other = _convert_other(other, raiseit = True)
ans = self._compare_check_nans(other, context)
if ans:
return ans
return self.compare(other, context=context)
def compare_total(self, other, context=None):
"""Compares self to other using the abstract representations.
This is not like the standard compare, which use their numerical
value. Note that a total ordering is defined for all possible abstract
representations.
"""
other = _convert_other(other, raiseit=True)
# if one is negative and the other is positive, it's easy
if self._sign and not other._sign:
return _NegativeOne
if not self._sign and other._sign:
return _One
sign = self._sign
# let's handle both NaN types
self_nan = self._isnan()
other_nan = other._isnan()
if self_nan or other_nan:
if self_nan == other_nan:
# compare payloads as though they're integers
self_key = len(self._int), self._int
other_key = len(other._int), other._int
if self_key < other_key:
if sign:
return _One
else:
return _NegativeOne
if self_key > other_key:
if sign:
return _NegativeOne
else:
return _One
return _Zero
if sign:
if self_nan == 1:
return _NegativeOne
if other_nan == 1:
return _One
if self_nan == 2:
return _NegativeOne
if other_nan == 2:
return _One
else:
if self_nan == 1:
return _One
if other_nan == 1:
return _NegativeOne
if self_nan == 2:
return _One
if other_nan == 2:
return _NegativeOne
if self < other:
return _NegativeOne
if self > other:
return _One
if self._exp < other._exp:
if sign:
return _One
else:
return _NegativeOne
if self._exp > other._exp:
if sign:
return _NegativeOne
else:
return _One
return _Zero
def compare_total_mag(self, other, context=None):
"""Compares self to other using abstract repr., ignoring sign.
Like compare_total, but with operand's sign ignored and assumed to be 0.
"""
other = _convert_other(other, raiseit=True)
s = self.copy_abs()
o = other.copy_abs()
return s.compare_total(o)
def copy_abs(self):
"""Returns a copy with the sign set to 0. """
return _dec_from_triple(0, self._int, self._exp, self._is_special)
def copy_negate(self):
"""Returns a copy with the sign inverted."""
if self._sign:
return _dec_from_triple(0, self._int, self._exp, self._is_special)
else:
return _dec_from_triple(1, self._int, self._exp, self._is_special)
def copy_sign(self, other, context=None):
"""Returns self with the sign of other."""
other = _convert_other(other, raiseit=True)
return _dec_from_triple(other._sign, self._int,
self._exp, self._is_special)
def exp(self, context=None):
"""Returns e ** self."""
if context is None:
context = getcontext()
# exp(NaN) = NaN
ans = self._check_nans(context=context)
if ans:
return ans
# exp(-Infinity) = 0
if self._isinfinity() == -1:
return _Zero
# exp(0) = 1
if not self:
return _One
# exp(Infinity) = Infinity
if self._isinfinity() == 1:
return Decimal(self)
# the result is now guaranteed to be inexact (the true
# mathematical result is transcendental). There's no need to
# raise Rounded and Inexact here---they'll always be raised as
# a result of the call to _fix.
p = context.prec
adj = self.adjusted()
# we only need to do any computation for quite a small range
# of adjusted exponents---for example, -29 <= adj <= 10 for
# the default context. For smaller exponent the result is
# indistinguishable from 1 at the given precision, while for
# larger exponent the result either overflows or underflows.
if self._sign == 0 and adj > len(str((context.Emax+1)*3)):
# overflow
ans = _dec_from_triple(0, '1', context.Emax+1)
elif self._sign == 1 and adj > len(str((-context.Etiny()+1)*3)):
# underflow to 0
ans = _dec_from_triple(0, '1', context.Etiny()-1)
elif self._sign == 0 and adj < -p:
# p+1 digits; final round will raise correct flags
ans = _dec_from_triple(0, '1' + '0'*(p-1) + '1', -p)
elif self._sign == 1 and adj < -p-1:
# p+1 digits; final round will raise correct flags
ans = _dec_from_triple(0, '9'*(p+1), -p-1)
# general case
else:
op = _WorkRep(self)
c, e = op.int, op.exp
if op.sign == 1:
c = -c
# compute correctly rounded result: increase precision by
# 3 digits at a time until we get an unambiguously
# roundable result
extra = 3
while True:
coeff, exp = _dexp(c, e, p+extra)
if coeff % (5*10**(len(str(coeff))-p-1)):
break
extra += 3
ans = _dec_from_triple(0, str(coeff), exp)
# at this stage, ans should round correctly with *any*
# rounding mode, not just with ROUND_HALF_EVEN
context = context._shallow_copy()
rounding = context._set_rounding(ROUND_HALF_EVEN)
ans = ans._fix(context)
context.rounding = rounding
return ans
def is_canonical(self):
"""Return True if self is canonical; otherwise return False.
Currently, the encoding of a Decimal instance is always
canonical, so this method returns True for any Decimal.
"""
return True
def is_finite(self):
"""Return True if self is finite; otherwise return False.
A Decimal instance is considered finite if it is neither
infinite nor a NaN.
"""
return not self._is_special
def is_infinite(self):
"""Return True if self is infinite; otherwise return False."""
return self._exp == 'F'
def is_nan(self):
"""Return True if self is a qNaN or sNaN; otherwise return False."""
return self._exp in ('n', 'N')
def is_normal(self, context=None):
"""Return True if self is a normal number; otherwise return False."""
if self._is_special or not self:
return False
if context is None:
context = getcontext()
return context.Emin <= self.adjusted()
def is_qnan(self):
"""Return True if self is a quiet NaN; otherwise return False."""
return self._exp == 'n'
def is_signed(self):
"""Return True if self is negative; otherwise return False."""
return self._sign == 1
def is_snan(self):
"""Return True if self is a signaling NaN; otherwise return False."""
return self._exp == 'N'
def is_subnormal(self, context=None):
"""Return True if self is subnormal; otherwise return False."""
if self._is_special or not self:
return False
if context is None:
context = getcontext()
return self.adjusted() < context.Emin
def is_zero(self):
"""Return True if self is a zero; otherwise return False."""
return not self._is_special and self._int == '0'
def _ln_exp_bound(self):
"""Compute a lower bound for the adjusted exponent of self.ln().
In other words, compute r such that self.ln() >= 10**r. Assumes
that self is finite and positive and that self != 1.
"""
# for 0.1 <= x <= 10 we use the inequalities 1-1/x <= ln(x) <= x-1
adj = self._exp + len(self._int) - 1
if adj >= 1:
# argument >= 10; we use 23/10 = 2.3 as a lower bound for ln(10)
return len(str(adj*23//10)) - 1
if adj <= -2:
# argument <= 0.1
return len(str((-1-adj)*23//10)) - 1
op = _WorkRep(self)
c, e = op.int, op.exp
if adj == 0:
# 1 < self < 10
num = str(c-10**-e)
den = str(c)
return len(num) - len(den) - (num < den)
# adj == -1, 0.1 <= self < 1
return e + len(str(10**-e - c)) - 1
def ln(self, context=None):
"""Returns the natural (base e) logarithm of self."""
if context is None:
context = getcontext()
# ln(NaN) = NaN
ans = self._check_nans(context=context)
if ans:
return ans
# ln(0.0) == -Infinity
if not self:
return _NegativeInfinity
# ln(Infinity) = Infinity
if self._isinfinity() == 1:
return _Infinity
# ln(1.0) == 0.0
if self == _One:
return _Zero
# ln(negative) raises InvalidOperation
if self._sign == 1:
return context._raise_error(InvalidOperation,
'ln of a negative value')
# result is irrational, so necessarily inexact
op = _WorkRep(self)
c, e = op.int, op.exp
p = context.prec
# correctly rounded result: repeatedly increase precision by 3
# until we get an unambiguously roundable result
places = p - self._ln_exp_bound() + 2 # at least p+3 places
while True:
coeff = _dlog(c, e, places)
# assert len(str(abs(coeff)))-p >= 1
if coeff % (5*10**(len(str(abs(coeff)))-p-1)):
break
places += 3
ans = _dec_from_triple(int(coeff<0), str(abs(coeff)), -places)
context = context._shallow_copy()
rounding = context._set_rounding(ROUND_HALF_EVEN)
ans = ans._fix(context)
context.rounding = rounding
return ans
def _log10_exp_bound(self):
"""Compute a lower bound for the adjusted exponent of self.log10().
In other words, find r such that self.log10() >= 10**r.
Assumes that self is finite and positive and that self != 1.
"""
# For x >= 10 or x < 0.1 we only need a bound on the integer
# part of log10(self), and this comes directly from the
# exponent of x. For 0.1 <= x <= 10 we use the inequalities
# 1-1/x <= log(x) <= x-1. If x > 1 we have |log10(x)| >
# (1-1/x)/2.31 > 0. If x < 1 then |log10(x)| > (1-x)/2.31 > 0
adj = self._exp + len(self._int) - 1
if adj >= 1:
# self >= 10
return len(str(adj))-1
if adj <= -2:
# self < 0.1
return len(str(-1-adj))-1
op = _WorkRep(self)
c, e = op.int, op.exp
if adj == 0:
# 1 < self < 10
num = str(c-10**-e)
den = str(231*c)
return len(num) - len(den) - (num < den) + 2
# adj == -1, 0.1 <= self < 1
num = str(10**-e-c)
return len(num) + e - (num < "231") - 1
def log10(self, context=None):
"""Returns the base 10 logarithm of self."""
if context is None:
context = getcontext()
# log10(NaN) = NaN
ans = self._check_nans(context=context)
if ans:
return ans
# log10(0.0) == -Infinity
if not self:
return _NegativeInfinity
# log10(Infinity) = Infinity
if self._isinfinity() == 1:
return _Infinity
# log10(negative or -Infinity) raises InvalidOperation
if self._sign == 1:
return context._raise_error(InvalidOperation,
'log10 of a negative value')
# log10(10**n) = n
if self._int[0] == '1' and self._int[1:] == '0'*(len(self._int) - 1):
# answer may need rounding
ans = Decimal(self._exp + len(self._int) - 1)
else:
# result is irrational, so necessarily inexact
op = _WorkRep(self)
c, e = op.int, op.exp
p = context.prec
# correctly rounded result: repeatedly increase precision
# until result is unambiguously roundable
places = p-self._log10_exp_bound()+2
while True:
coeff = _dlog10(c, e, places)
# assert len(str(abs(coeff)))-p >= 1
if coeff % (5*10**(len(str(abs(coeff)))-p-1)):
break
places += 3
ans = _dec_from_triple(int(coeff<0), str(abs(coeff)), -places)
context = context._shallow_copy()
rounding = context._set_rounding(ROUND_HALF_EVEN)
ans = ans._fix(context)
context.rounding = rounding
return ans
def logb(self, context=None):
""" Returns the exponent of the magnitude of self's MSD.
The result is the integer which is the exponent of the magnitude
of the most significant digit of self (as though it were truncated
to a single digit while maintaining the value of that digit and
without limiting the resulting exponent).
"""
# logb(NaN) = NaN
ans = self._check_nans(context=context)
if ans:
return ans
if context is None:
context = getcontext()
# logb(+/-Inf) = +Inf
if self._isinfinity():
return _Infinity
# logb(0) = -Inf, DivisionByZero
if not self:
return context._raise_error(DivisionByZero, 'logb(0)', 1)
# otherwise, simply return the adjusted exponent of self, as a
# Decimal. Note that no attempt is made to fit the result
# into the current context.
ans = Decimal(self.adjusted())
return ans._fix(context)
def _islogical(self):
"""Return True if self is a logical operand.
For being logical, it must be a finite number with a sign of 0,
an exponent of 0, and a coefficient whose digits must all be
either 0 or 1.
"""
if self._sign != 0 or self._exp != 0:
return False
for dig in self._int:
if dig not in '01':
return False
return True
def _fill_logical(self, context, opa, opb):
dif = context.prec - len(opa)
if dif > 0:
opa = '0'*dif + opa
elif dif < 0:
opa = opa[-context.prec:]
dif = context.prec - len(opb)
if dif > 0:
opb = '0'*dif + opb
elif dif < 0:
opb = opb[-context.prec:]
return opa, opb
def logical_and(self, other, context=None):
"""Applies an 'and' operation between self and other's digits."""
if context is None:
context = getcontext()
other = _convert_other(other, raiseit=True)
if not self._islogical() or not other._islogical():
return context._raise_error(InvalidOperation)
# fill to context.prec
(opa, opb) = self._fill_logical(context, self._int, other._int)
# make the operation, and clean starting zeroes
result = "".join([str(int(a)&int(b)) for a,b in zip(opa,opb)])
return _dec_from_triple(0, result.lstrip('0') or '0', 0)
def logical_invert(self, context=None):
"""Invert all its digits."""
if context is None:
context = getcontext()
return self.logical_xor(_dec_from_triple(0,'1'*context.prec,0),
context)
def logical_or(self, other, context=None):
"""Applies an 'or' operation between self and other's digits."""
if context is None:
context = getcontext()
other = _convert_other(other, raiseit=True)
if not self._islogical() or not other._islogical():
return context._raise_error(InvalidOperation)
# fill to context.prec
(opa, opb) = self._fill_logical(context, self._int, other._int)
# make the operation, and clean starting zeroes
result = "".join([str(int(a)|int(b)) for a,b in zip(opa,opb)])
return _dec_from_triple(0, result.lstrip('0') or '0', 0)
def logical_xor(self, other, context=None):
"""Applies an 'xor' operation between self and other's digits."""
if context is None:
context = getcontext()
other = _convert_other(other, raiseit=True)
if not self._islogical() or not other._islogical():
return context._raise_error(InvalidOperation)
# fill to context.prec
(opa, opb) = self._fill_logical(context, self._int, other._int)
# make the operation, and clean starting zeroes
result = "".join([str(int(a)^int(b)) for a,b in zip(opa,opb)])
return _dec_from_triple(0, result.lstrip('0') or '0', 0)
def max_mag(self, other, context=None):
"""Compares the values numerically with their sign ignored."""
other = _convert_other(other, raiseit=True)
if context is None:
context = getcontext()
if self._is_special or other._is_special:
# If one operand is a quiet NaN and the other is number, then the
# number is always returned
sn = self._isnan()
on = other._isnan()
if sn or on:
if on == 1 and sn == 0:
return self._fix(context)
if sn == 1 and on == 0:
return other._fix(context)
return self._check_nans(other, context)
c = self.copy_abs()._cmp(other.copy_abs())
if c == 0:
c = self.compare_total(other)
if c == -1:
ans = other
else:
ans = self
return ans._fix(context)
def min_mag(self, other, context=None):
"""Compares the values numerically with their sign ignored."""
other = _convert_other(other, raiseit=True)
if context is None:
context = getcontext()
if self._is_special or other._is_special:
# If one operand is a quiet NaN and the other is number, then the
# number is always returned
sn = self._isnan()
on = other._isnan()
if sn or on:
if on == 1 and sn == 0:
return self._fix(context)
if sn == 1 and on == 0:
return other._fix(context)
return self._check_nans(other, context)
c = self.copy_abs()._cmp(other.copy_abs())
if c == 0:
c = self.compare_total(other)
if c == -1:
ans = self
else:
ans = other
return ans._fix(context)
def next_minus(self, context=None):
"""Returns the largest representable number smaller than itself."""
if context is None:
context = getcontext()
ans = self._check_nans(context=context)
if ans:
return ans
if self._isinfinity() == -1:
return _NegativeInfinity
if self._isinfinity() == 1:
return _dec_from_triple(0, '9'*context.prec, context.Etop())
context = context.copy()
context._set_rounding(ROUND_FLOOR)
context._ignore_all_flags()
new_self = self._fix(context)
if new_self != self:
return new_self
return self.__sub__(_dec_from_triple(0, '1', context.Etiny()-1),
context)
def next_plus(self, context=None):
"""Returns the smallest representable number larger than itself."""
if context is None:
context = getcontext()
ans = self._check_nans(context=context)
if ans:
return ans
if self._isinfinity() == 1:
return _Infinity
if self._isinfinity() == -1:
return _dec_from_triple(1, '9'*context.prec, context.Etop())
context = context.copy()
context._set_rounding(ROUND_CEILING)
context._ignore_all_flags()
new_self = self._fix(context)
if new_self != self:
return new_self
return self.__add__(_dec_from_triple(0, '1', context.Etiny()-1),
context)
def next_toward(self, other, context=None):
"""Returns the number closest to self, in the direction towards other.
The result is the closest representable number to self
(excluding self) that is in the direction towards other,
unless both have the same value. If the two operands are
numerically equal, then the result is a copy of self with the
sign set to be the same as the sign of other.
"""
other = _convert_other(other, raiseit=True)
if context is None:
context = getcontext()
ans = self._check_nans(other, context)
if ans:
return ans
comparison = self._cmp(other)
if comparison == 0:
return self.copy_sign(other)
if comparison == -1:
ans = self.next_plus(context)
else: # comparison == 1
ans = self.next_minus(context)
# decide which flags to raise using value of ans
if ans._isinfinity():
context._raise_error(Overflow,
'Infinite result from next_toward',
ans._sign)
context._raise_error(Inexact)
context._raise_error(Rounded)
elif ans.adjusted() < context.Emin:
context._raise_error(Underflow)
context._raise_error(Subnormal)
context._raise_error(Inexact)
context._raise_error(Rounded)
# if precision == 1 then we don't raise Clamped for a
# result 0E-Etiny.
if not ans:
context._raise_error(Clamped)
return ans
def number_class(self, context=None):
"""Returns an indication of the class of self.
The class is one of the following strings:
sNaN
NaN
-Infinity
-Normal
-Subnormal
-Zero
+Zero
+Subnormal
+Normal
+Infinity
"""
if self.is_snan():
return "sNaN"
if self.is_qnan():
return "NaN"
inf = self._isinfinity()
if inf == 1:
return "+Infinity"
if inf == -1:
return "-Infinity"
if self.is_zero():
if self._sign:
return "-Zero"
else:
return "+Zero"
if context is None:
context = getcontext()
if self.is_subnormal(context=context):
if self._sign:
return "-Subnormal"
else:
return "+Subnormal"
# just a normal, regular, boring number, :)
if self._sign:
return "-Normal"
else:
return "+Normal"
def radix(self):
"""Just returns 10, as this is Decimal, :)"""
return Decimal(10)
def rotate(self, other, context=None):
"""Returns a rotated copy of self, value-of-other times."""
if context is None:
context = getcontext()
other = _convert_other(other, raiseit=True)
ans = self._check_nans(other, context)
if ans:
return ans
if other._exp != 0:
return context._raise_error(InvalidOperation)
if not (-context.prec <= int(other) <= context.prec):
return context._raise_error(InvalidOperation)
if self._isinfinity():
return Decimal(self)
# get values, pad if necessary
torot = int(other)
rotdig = self._int
topad = context.prec - len(rotdig)
if topad > 0:
rotdig = '0'*topad + rotdig
elif topad < 0:
rotdig = rotdig[-topad:]
# let's rotate!
rotated = rotdig[torot:] + rotdig[:torot]
return _dec_from_triple(self._sign,
rotated.lstrip('0') or '0', self._exp)
def scaleb(self, other, context=None):
"""Returns self operand after adding the second value to its exp."""
if context is None:
context = getcontext()
other = _convert_other(other, raiseit=True)
ans = self._check_nans(other, context)
if ans:
return ans
if other._exp != 0:
return context._raise_error(InvalidOperation)
liminf = -2 * (context.Emax + context.prec)
limsup = 2 * (context.Emax + context.prec)
if not (liminf <= int(other) <= limsup):
return context._raise_error(InvalidOperation)
if self._isinfinity():
return Decimal(self)
d = _dec_from_triple(self._sign, self._int, self._exp + int(other))
d = d._fix(context)
return d
def shift(self, other, context=None):
"""Returns a shifted copy of self, value-of-other times."""
if context is None:
context = getcontext()
other = _convert_other(other, raiseit=True)
ans = self._check_nans(other, context)
if ans:
return ans
if other._exp != 0:
return context._raise_error(InvalidOperation)
if not (-context.prec <= int(other) <= context.prec):
return context._raise_error(InvalidOperation)
if self._isinfinity():
return Decimal(self)
# get values, pad if necessary
torot = int(other)
rotdig = self._int
topad = context.prec - len(rotdig)
if topad > 0:
rotdig = '0'*topad + rotdig
elif topad < 0:
rotdig = rotdig[-topad:]
# let's shift!
if torot < 0:
shifted = rotdig[:torot]
else:
shifted = rotdig + '0'*torot
shifted = shifted[-context.prec:]
return _dec_from_triple(self._sign,
shifted.lstrip('0') or '0', self._exp)
# Support for pickling, copy, and deepcopy
def __reduce__(self):
return (self.__class__, (str(self),))
def __copy__(self):
if type(self) is Decimal:
return self # I'm immutable; therefore I am my own clone
return self.__class__(str(self))
def __deepcopy__(self, memo):
if type(self) is Decimal:
return self # My components are also immutable
return self.__class__(str(self))
# PEP 3101 support. the _localeconv keyword argument should be
# considered private: it's provided for ease of testing only.
def __format__(self, specifier, context=None, _localeconv=None):
"""Format a Decimal instance according to the given specifier.
The specifier should be a standard format specifier, with the
form described in PEP 3101. Formatting types 'e', 'E', 'f',
'F', 'g', 'G', 'n' and '%' are supported. If the formatting
type is omitted it defaults to 'g' or 'G', depending on the
value of context.capitals.
"""
# Note: PEP 3101 says that if the type is not present then
# there should be at least one digit after the decimal point.
# We take the liberty of ignoring this requirement for
# Decimal---it's presumably there to make sure that
# format(float, '') behaves similarly to str(float).
if context is None:
context = getcontext()
spec = _parse_format_specifier(specifier, _localeconv=_localeconv)
# special values don't care about the type or precision
if self._is_special:
sign = _format_sign(self._sign, spec)
body = str(self.copy_abs())
return _format_align(sign, body, spec)
# a type of None defaults to 'g' or 'G', depending on context
if spec['type'] is None:
spec['type'] = ['g', 'G'][context.capitals]
# if type is '%', adjust exponent of self accordingly
if spec['type'] == '%':
self = _dec_from_triple(self._sign, self._int, self._exp+2)
# round if necessary, taking rounding mode from the context
rounding = context.rounding
precision = spec['precision']
if precision is not None:
if spec['type'] in 'eE':
self = self._round(precision+1, rounding)
elif spec['type'] in 'fF%':
self = self._rescale(-precision, rounding)
elif spec['type'] in 'gG' and len(self._int) > precision:
self = self._round(precision, rounding)
# special case: zeros with a positive exponent can't be
# represented in fixed point; rescale them to 0e0.
if not self and self._exp > 0 and spec['type'] in 'fF%':
self = self._rescale(0, rounding)
# figure out placement of the decimal point
leftdigits = self._exp + len(self._int)
if spec['type'] in 'eE':
if not self and precision is not None:
dotplace = 1 - precision
else:
dotplace = 1
elif spec['type'] in 'fF%':
dotplace = leftdigits
elif spec['type'] in 'gG':
if self._exp <= 0 and leftdigits > -6:
dotplace = leftdigits
else:
dotplace = 1
# find digits before and after decimal point, and get exponent
if dotplace < 0:
intpart = '0'
fracpart = '0'*(-dotplace) + self._int
elif dotplace > len(self._int):
intpart = self._int + '0'*(dotplace-len(self._int))
fracpart = ''
else:
intpart = self._int[:dotplace] or '0'
fracpart = self._int[dotplace:]
exp = leftdigits-dotplace
# done with the decimal-specific stuff; hand over the rest
# of the formatting to the _format_number function
return _format_number(self._sign, intpart, fracpart, exp, spec)
def _dec_from_triple(sign, coefficient, exponent, special=False):
"""Create a decimal instance directly, without any validation,
normalization (e.g. removal of leading zeros) or argument
conversion.
This function is for *internal use only*.
"""
self = object.__new__(Decimal)
self._sign = sign
self._int = coefficient
self._exp = exponent
self._is_special = special
return self
# Register Decimal as a kind of Number (an abstract base class).
# However, do not register it as Real (because Decimals are not
# interoperable with floats).
_numbers.Number.register(Decimal)
##### Context class #######################################################
class _ContextManager(object):
"""Context manager class to support localcontext().
Sets a copy of the supplied context in __enter__() and restores
the previous decimal context in __exit__()
"""
def __init__(self, new_context):
self.new_context = new_context.copy()
def __enter__(self):
self.saved_context = getcontext()
setcontext(self.new_context)
return self.new_context
def __exit__(self, t, v, tb):
setcontext(self.saved_context)
class Context(object):
"""Contains the context for a Decimal instance.
Contains:
prec - precision (for use in rounding, division, square roots..)
rounding - rounding type (how you round)
traps - If traps[exception] = 1, then the exception is
raised when it is caused. Otherwise, a value is
substituted in.
flags - When an exception is caused, flags[exception] is set.
(Whether or not the trap_enabler is set)
Should be reset by user of Decimal instance.
Emin - Minimum exponent
Emax - Maximum exponent
capitals - If 1, 1*10^1 is printed as 1E+1.
If 0, printed as 1e1
clamp - If 1, change exponents if too high (Default 0)
"""
def __init__(self, prec=None, rounding=None, Emin=None, Emax=None,
capitals=None, clamp=None, flags=None, traps=None,
_ignored_flags=None):
# Set defaults; for everything except flags and _ignored_flags,
# inherit from DefaultContext.
try:
dc = DefaultContext
except NameError:
pass
self.prec = prec if prec is not None else dc.prec
self.rounding = rounding if rounding is not None else dc.rounding
self.Emin = Emin if Emin is not None else dc.Emin
self.Emax = Emax if Emax is not None else dc.Emax
self.capitals = capitals if capitals is not None else dc.capitals
self.clamp = clamp if clamp is not None else dc.clamp
if _ignored_flags is None:
self._ignored_flags = []
else:
self._ignored_flags = _ignored_flags
if traps is None:
self.traps = dc.traps.copy()
elif not isinstance(traps, dict):
self.traps = dict((s, int(s in traps)) for s in _signals + traps)
else:
self.traps = traps
if flags is None:
self.flags = dict.fromkeys(_signals, 0)
elif not isinstance(flags, dict):
self.flags = dict((s, int(s in flags)) for s in _signals + flags)
else:
self.flags = flags
def _set_integer_check(self, name, value, vmin, vmax):
if not isinstance(value, int):
raise TypeError("%s must be an integer" % name)
if vmin == '-inf':
if value > vmax:
raise ValueError("%s must be in [%s, %d]. got: %s" % (name, vmin, vmax, value))
elif vmax == 'inf':
if value < vmin:
raise ValueError("%s must be in [%d, %s]. got: %s" % (name, vmin, vmax, value))
else:
if value < vmin or value > vmax:
raise ValueError("%s must be in [%d, %d]. got %s" % (name, vmin, vmax, value))
return object.__setattr__(self, name, value)
def _set_signal_dict(self, name, d):
if not isinstance(d, dict):
raise TypeError("%s must be a signal dict" % d)
for key in d:
if not key in _signals:
raise KeyError("%s is not a valid signal dict" % d)
for key in _signals:
if not key in d:
raise KeyError("%s is not a valid signal dict" % d)
return object.__setattr__(self, name, d)
def __setattr__(self, name, value):
if name == 'prec':
return self._set_integer_check(name, value, 1, 'inf')
elif name == 'Emin':
return self._set_integer_check(name, value, '-inf', 0)
elif name == 'Emax':
return self._set_integer_check(name, value, 0, 'inf')
elif name == 'capitals':
return self._set_integer_check(name, value, 0, 1)
elif name == 'clamp':
return self._set_integer_check(name, value, 0, 1)
elif name == 'rounding':
if not value in _rounding_modes:
# raise TypeError even for strings to have consistency
# among various implementations.
raise TypeError("%s: invalid rounding mode" % value)
return object.__setattr__(self, name, value)
elif name == 'flags' or name == 'traps':
return self._set_signal_dict(name, value)
elif name == '_ignored_flags':
return object.__setattr__(self, name, value)
else:
raise AttributeError(
"'decimal.Context' object has no attribute '%s'" % name)
def __delattr__(self, name):
raise AttributeError("%s cannot be deleted" % name)
# Support for pickling, copy, and deepcopy
def __reduce__(self):
flags = [sig for sig, v in self.flags.items() if v]
traps = [sig for sig, v in self.traps.items() if v]
return (self.__class__,
(self.prec, self.rounding, self.Emin, self.Emax,
self.capitals, self.clamp, flags, traps))
def __repr__(self):
"""Show the current context."""
s = []
s.append('Context(prec=%(prec)d, rounding=%(rounding)s, '
'Emin=%(Emin)d, Emax=%(Emax)d, capitals=%(capitals)d, '
'clamp=%(clamp)d'
% vars(self))
names = [f.__name__ for f, v in self.flags.items() if v]
s.append('flags=[' + ', '.join(names) + ']')
names = [t.__name__ for t, v in self.traps.items() if v]
s.append('traps=[' + ', '.join(names) + ']')
return ', '.join(s) + ')'
def clear_flags(self):
"""Reset all flags to zero"""
for flag in self.flags:
self.flags[flag] = 0
def clear_traps(self):
"""Reset all traps to zero"""
for flag in self.traps:
self.traps[flag] = 0
def _shallow_copy(self):
"""Returns a shallow copy from self."""
nc = Context(self.prec, self.rounding, self.Emin, self.Emax,
self.capitals, self.clamp, self.flags, self.traps,
self._ignored_flags)
return nc
def copy(self):
"""Returns a deep copy from self."""
nc = Context(self.prec, self.rounding, self.Emin, self.Emax,
self.capitals, self.clamp,
self.flags.copy(), self.traps.copy(),
self._ignored_flags)
return nc
__copy__ = copy
def _raise_error(self, condition, explanation = None, *args):
"""Handles an error
If the flag is in _ignored_flags, returns the default response.
Otherwise, it sets the flag, then, if the corresponding
trap_enabler is set, it reraises the exception. Otherwise, it returns
the default value after setting the flag.
"""
error = _condition_map.get(condition, condition)
if error in self._ignored_flags:
# Don't touch the flag
return error().handle(self, *args)
self.flags[error] = 1
if not self.traps[error]:
# The errors define how to handle themselves.
return condition().handle(self, *args)
# Errors should only be risked on copies of the context
# self._ignored_flags = []
raise error(explanation)
def _ignore_all_flags(self):
"""Ignore all flags, if they are raised"""
return self._ignore_flags(*_signals)
def _ignore_flags(self, *flags):
"""Ignore the flags, if they are raised"""
# Do not mutate-- This way, copies of a context leave the original
# alone.
self._ignored_flags = (self._ignored_flags + list(flags))
return list(flags)
def _regard_flags(self, *flags):
"""Stop ignoring the flags, if they are raised"""
if flags and isinstance(flags[0], (tuple,list)):
flags = flags[0]
for flag in flags:
self._ignored_flags.remove(flag)
# We inherit object.__hash__, so we must deny this explicitly
__hash__ = None
def Etiny(self):
"""Returns Etiny (= Emin - prec + 1)"""
return int(self.Emin - self.prec + 1)
def Etop(self):
"""Returns maximum exponent (= Emax - prec + 1)"""
return int(self.Emax - self.prec + 1)
def _set_rounding(self, type):
"""Sets the rounding type.
Sets the rounding type, and returns the current (previous)
rounding type. Often used like:
context = context.copy()
# so you don't change the calling context
# if an error occurs in the middle.
rounding = context._set_rounding(ROUND_UP)
val = self.__sub__(other, context=context)
context._set_rounding(rounding)
This will make it round up for that operation.
"""
rounding = self.rounding
self.rounding= type
return rounding
def create_decimal(self, num='0'):
"""Creates a new Decimal instance but using self as context.
This method implements the to-number operation of the
IBM Decimal specification."""
if isinstance(num, str) and num != num.strip():
return self._raise_error(ConversionSyntax,
"no trailing or leading whitespace is "
"permitted.")
d = Decimal(num, context=self)
if d._isnan() and len(d._int) > self.prec - self.clamp:
return self._raise_error(ConversionSyntax,
"diagnostic info too long in NaN")
return d._fix(self)
def create_decimal_from_float(self, f):
"""Creates a new Decimal instance from a float but rounding using self
as the context.
>>> context = Context(prec=5, rounding=ROUND_DOWN)
>>> context.create_decimal_from_float(3.1415926535897932)
Decimal('3.1415')
>>> context = Context(prec=5, traps=[Inexact])
>>> context.create_decimal_from_float(3.1415926535897932)
Traceback (most recent call last):
...
decimal.Inexact: None
"""
d = Decimal.from_float(f) # An exact conversion
return d._fix(self) # Apply the context rounding
# Methods
def abs(self, a):
"""Returns the absolute value of the operand.
If the operand is negative, the result is the same as using the minus
operation on the operand. Otherwise, the result is the same as using
the plus operation on the operand.
>>> ExtendedContext.abs(Decimal('2.1'))
Decimal('2.1')
>>> ExtendedContext.abs(Decimal('-100'))
Decimal('100')
>>> ExtendedContext.abs(Decimal('101.5'))
Decimal('101.5')
>>> ExtendedContext.abs(Decimal('-101.5'))
Decimal('101.5')
>>> ExtendedContext.abs(-1)
Decimal('1')
"""
a = _convert_other(a, raiseit=True)
return a.__abs__(context=self)
def add(self, a, b):
"""Return the sum of the two operands.
>>> ExtendedContext.add(Decimal('12'), Decimal('7.00'))
Decimal('19.00')
>>> ExtendedContext.add(Decimal('1E+2'), Decimal('1.01E+4'))
Decimal('1.02E+4')
>>> ExtendedContext.add(1, Decimal(2))
Decimal('3')
>>> ExtendedContext.add(Decimal(8), 5)
Decimal('13')
>>> ExtendedContext.add(5, 5)
Decimal('10')
"""
a = _convert_other(a, raiseit=True)
r = a.__add__(b, context=self)
if r is NotImplemented:
raise TypeError("Unable to convert %s to Decimal" % b)
else:
return r
def _apply(self, a):
return str(a._fix(self))
def canonical(self, a):
"""Returns the same Decimal object.
As we do not have different encodings for the same number, the
received object already is in its canonical form.
>>> ExtendedContext.canonical(Decimal('2.50'))
Decimal('2.50')
"""
if not isinstance(a, Decimal):
raise TypeError("canonical requires a Decimal as an argument.")
return a.canonical()
def compare(self, a, b):
"""Compares values numerically.
If the signs of the operands differ, a value representing each operand
('-1' if the operand is less than zero, '0' if the operand is zero or
negative zero, or '1' if the operand is greater than zero) is used in
place of that operand for the comparison instead of the actual
operand.
The comparison is then effected by subtracting the second operand from
the first and then returning a value according to the result of the
subtraction: '-1' if the result is less than zero, '0' if the result is
zero or negative zero, or '1' if the result is greater than zero.
>>> ExtendedContext.compare(Decimal('2.1'), Decimal('3'))
Decimal('-1')
>>> ExtendedContext.compare(Decimal('2.1'), Decimal('2.1'))
Decimal('0')
>>> ExtendedContext.compare(Decimal('2.1'), Decimal('2.10'))
Decimal('0')
>>> ExtendedContext.compare(Decimal('3'), Decimal('2.1'))
Decimal('1')
>>> ExtendedContext.compare(Decimal('2.1'), Decimal('-3'))
Decimal('1')
>>> ExtendedContext.compare(Decimal('-3'), Decimal('2.1'))
Decimal('-1')
>>> ExtendedContext.compare(1, 2)
Decimal('-1')
>>> ExtendedContext.compare(Decimal(1), 2)
Decimal('-1')
>>> ExtendedContext.compare(1, Decimal(2))
Decimal('-1')
"""
a = _convert_other(a, raiseit=True)
return a.compare(b, context=self)
def compare_signal(self, a, b):
"""Compares the values of the two operands numerically.
It's pretty much like compare(), but all NaNs signal, with signaling
NaNs taking precedence over quiet NaNs.
>>> c = ExtendedContext
>>> c.compare_signal(Decimal('2.1'), Decimal('3'))
Decimal('-1')
>>> c.compare_signal(Decimal('2.1'), Decimal('2.1'))
Decimal('0')
>>> c.flags[InvalidOperation] = 0
>>> print(c.flags[InvalidOperation])
0
>>> c.compare_signal(Decimal('NaN'), Decimal('2.1'))
Decimal('NaN')
>>> print(c.flags[InvalidOperation])
1
>>> c.flags[InvalidOperation] = 0
>>> print(c.flags[InvalidOperation])
0
>>> c.compare_signal(Decimal('sNaN'), Decimal('2.1'))
Decimal('NaN')
>>> print(c.flags[InvalidOperation])
1
>>> c.compare_signal(-1, 2)
Decimal('-1')
>>> c.compare_signal(Decimal(-1), 2)
Decimal('-1')
>>> c.compare_signal(-1, Decimal(2))
Decimal('-1')
"""
a = _convert_other(a, raiseit=True)
return a.compare_signal(b, context=self)
def compare_total(self, a, b):
"""Compares two operands using their abstract representation.
This is not like the standard compare, which use their numerical
value. Note that a total ordering is defined for all possible abstract
representations.
>>> ExtendedContext.compare_total(Decimal('12.73'), Decimal('127.9'))
Decimal('-1')
>>> ExtendedContext.compare_total(Decimal('-127'), Decimal('12'))
Decimal('-1')
>>> ExtendedContext.compare_total(Decimal('12.30'), Decimal('12.3'))
Decimal('-1')
>>> ExtendedContext.compare_total(Decimal('12.30'), Decimal('12.30'))
Decimal('0')
>>> ExtendedContext.compare_total(Decimal('12.3'), Decimal('12.300'))
Decimal('1')
>>> ExtendedContext.compare_total(Decimal('12.3'), Decimal('NaN'))
Decimal('-1')
>>> ExtendedContext.compare_total(1, 2)
Decimal('-1')
>>> ExtendedContext.compare_total(Decimal(1), 2)
Decimal('-1')
>>> ExtendedContext.compare_total(1, Decimal(2))
Decimal('-1')
"""
a = _convert_other(a, raiseit=True)
return a.compare_total(b)
def compare_total_mag(self, a, b):
"""Compares two operands using their abstract representation ignoring sign.
Like compare_total, but with operand's sign ignored and assumed to be 0.
"""
a = _convert_other(a, raiseit=True)
return a.compare_total_mag(b)
def copy_abs(self, a):
"""Returns a copy of the operand with the sign set to 0.
>>> ExtendedContext.copy_abs(Decimal('2.1'))
Decimal('2.1')
>>> ExtendedContext.copy_abs(Decimal('-100'))
Decimal('100')
>>> ExtendedContext.copy_abs(-1)
Decimal('1')
"""
a = _convert_other(a, raiseit=True)
return a.copy_abs()
def copy_decimal(self, a):
"""Returns a copy of the decimal object.
>>> ExtendedContext.copy_decimal(Decimal('2.1'))
Decimal('2.1')
>>> ExtendedContext.copy_decimal(Decimal('-1.00'))
Decimal('-1.00')
>>> ExtendedContext.copy_decimal(1)
Decimal('1')
"""
a = _convert_other(a, raiseit=True)
return Decimal(a)
def copy_negate(self, a):
"""Returns a copy of the operand with the sign inverted.
>>> ExtendedContext.copy_negate(Decimal('101.5'))
Decimal('-101.5')
>>> ExtendedContext.copy_negate(Decimal('-101.5'))
Decimal('101.5')
>>> ExtendedContext.copy_negate(1)
Decimal('-1')
"""
a = _convert_other(a, raiseit=True)
return a.copy_negate()
def copy_sign(self, a, b):
"""Copies the second operand's sign to the first one.
In detail, it returns a copy of the first operand with the sign
equal to the sign of the second operand.
>>> ExtendedContext.copy_sign(Decimal( '1.50'), Decimal('7.33'))
Decimal('1.50')
>>> ExtendedContext.copy_sign(Decimal('-1.50'), Decimal('7.33'))
Decimal('1.50')
>>> ExtendedContext.copy_sign(Decimal( '1.50'), Decimal('-7.33'))
Decimal('-1.50')
>>> ExtendedContext.copy_sign(Decimal('-1.50'), Decimal('-7.33'))
Decimal('-1.50')
>>> ExtendedContext.copy_sign(1, -2)
Decimal('-1')
>>> ExtendedContext.copy_sign(Decimal(1), -2)
Decimal('-1')
>>> ExtendedContext.copy_sign(1, Decimal(-2))
Decimal('-1')
"""
a = _convert_other(a, raiseit=True)
return a.copy_sign(b)
def divide(self, a, b):
"""Decimal division in a specified context.
>>> ExtendedContext.divide(Decimal('1'), Decimal('3'))
Decimal('0.333333333')
>>> ExtendedContext.divide(Decimal('2'), Decimal('3'))
Decimal('0.666666667')
>>> ExtendedContext.divide(Decimal('5'), Decimal('2'))
Decimal('2.5')
>>> ExtendedContext.divide(Decimal('1'), Decimal('10'))
Decimal('0.1')
>>> ExtendedContext.divide(Decimal('12'), Decimal('12'))
Decimal('1')
>>> ExtendedContext.divide(Decimal('8.00'), Decimal('2'))
Decimal('4.00')
>>> ExtendedContext.divide(Decimal('2.400'), Decimal('2.0'))
Decimal('1.20')
>>> ExtendedContext.divide(Decimal('1000'), Decimal('100'))
Decimal('10')
>>> ExtendedContext.divide(Decimal('1000'), Decimal('1'))
Decimal('1000')
>>> ExtendedContext.divide(Decimal('2.40E+6'), Decimal('2'))
Decimal('1.20E+6')
>>> ExtendedContext.divide(5, 5)
Decimal('1')
>>> ExtendedContext.divide(Decimal(5), 5)
Decimal('1')
>>> ExtendedContext.divide(5, Decimal(5))
Decimal('1')
"""
a = _convert_other(a, raiseit=True)
r = a.__truediv__(b, context=self)
if r is NotImplemented:
raise TypeError("Unable to convert %s to Decimal" % b)
else:
return r
def divide_int(self, a, b):
"""Divides two numbers and returns the integer part of the result.
>>> ExtendedContext.divide_int(Decimal('2'), Decimal('3'))
Decimal('0')
>>> ExtendedContext.divide_int(Decimal('10'), Decimal('3'))
Decimal('3')
>>> ExtendedContext.divide_int(Decimal('1'), Decimal('0.3'))
Decimal('3')
>>> ExtendedContext.divide_int(10, 3)
Decimal('3')
>>> ExtendedContext.divide_int(Decimal(10), 3)
Decimal('3')
>>> ExtendedContext.divide_int(10, Decimal(3))
Decimal('3')
"""
a = _convert_other(a, raiseit=True)
r = a.__floordiv__(b, context=self)
if r is NotImplemented:
raise TypeError("Unable to convert %s to Decimal" % b)
else:
return r
def divmod(self, a, b):
"""Return (a // b, a % b).
>>> ExtendedContext.divmod(Decimal(8), Decimal(3))
(Decimal('2'), Decimal('2'))
>>> ExtendedContext.divmod(Decimal(8), Decimal(4))
(Decimal('2'), Decimal('0'))
>>> ExtendedContext.divmod(8, 4)
(Decimal('2'), Decimal('0'))
>>> ExtendedContext.divmod(Decimal(8), 4)
(Decimal('2'), Decimal('0'))
>>> ExtendedContext.divmod(8, Decimal(4))
(Decimal('2'), Decimal('0'))
"""
a = _convert_other(a, raiseit=True)
r = a.__divmod__(b, context=self)
if r is NotImplemented:
raise TypeError("Unable to convert %s to Decimal" % b)
else:
return r
def exp(self, a):
"""Returns e ** a.
>>> c = ExtendedContext.copy()
>>> c.Emin = -999
>>> c.Emax = 999
>>> c.exp(Decimal('-Infinity'))
Decimal('0')
>>> c.exp(Decimal('-1'))
Decimal('0.367879441')
>>> c.exp(Decimal('0'))
Decimal('1')
>>> c.exp(Decimal('1'))
Decimal('2.71828183')
>>> c.exp(Decimal('0.693147181'))
Decimal('2.00000000')
>>> c.exp(Decimal('+Infinity'))
Decimal('Infinity')
>>> c.exp(10)
Decimal('22026.4658')
"""
a =_convert_other(a, raiseit=True)
return a.exp(context=self)
def fma(self, a, b, c):
"""Returns a multiplied by b, plus c.
The first two operands are multiplied together, using multiply,
the third operand is then added to the result of that
multiplication, using add, all with only one final rounding.
>>> ExtendedContext.fma(Decimal('3'), Decimal('5'), Decimal('7'))
Decimal('22')
>>> ExtendedContext.fma(Decimal('3'), Decimal('-5'), Decimal('7'))
Decimal('-8')
>>> ExtendedContext.fma(Decimal('888565290'), Decimal('1557.96930'), Decimal('-86087.7578'))
Decimal('1.38435736E+12')
>>> ExtendedContext.fma(1, 3, 4)
Decimal('7')
>>> ExtendedContext.fma(1, Decimal(3), 4)
Decimal('7')
>>> ExtendedContext.fma(1, 3, Decimal(4))
Decimal('7')
"""
a = _convert_other(a, raiseit=True)
return a.fma(b, c, context=self)
def is_canonical(self, a):
"""Return True if the operand is canonical; otherwise return False.
Currently, the encoding of a Decimal instance is always
canonical, so this method returns True for any Decimal.
>>> ExtendedContext.is_canonical(Decimal('2.50'))
True
"""
if not isinstance(a, Decimal):
raise TypeError("is_canonical requires a Decimal as an argument.")
return a.is_canonical()
def is_finite(self, a):
"""Return True if the operand is finite; otherwise return False.
A Decimal instance is considered finite if it is neither
infinite nor a NaN.
>>> ExtendedContext.is_finite(Decimal('2.50'))
True
>>> ExtendedContext.is_finite(Decimal('-0.3'))
True
>>> ExtendedContext.is_finite(Decimal('0'))
True
>>> ExtendedContext.is_finite(Decimal('Inf'))
False
>>> ExtendedContext.is_finite(Decimal('NaN'))
False
>>> ExtendedContext.is_finite(1)
True
"""
a = _convert_other(a, raiseit=True)
return a.is_finite()
def is_infinite(self, a):
"""Return True if the operand is infinite; otherwise return False.
>>> ExtendedContext.is_infinite(Decimal('2.50'))
False
>>> ExtendedContext.is_infinite(Decimal('-Inf'))
True
>>> ExtendedContext.is_infinite(Decimal('NaN'))
False
>>> ExtendedContext.is_infinite(1)
False
"""
a = _convert_other(a, raiseit=True)
return a.is_infinite()
def is_nan(self, a):
"""Return True if the operand is a qNaN or sNaN;
otherwise return False.
>>> ExtendedContext.is_nan(Decimal('2.50'))
False
>>> ExtendedContext.is_nan(Decimal('NaN'))
True
>>> ExtendedContext.is_nan(Decimal('-sNaN'))
True
>>> ExtendedContext.is_nan(1)
False
"""
a = _convert_other(a, raiseit=True)
return a.is_nan()
def is_normal(self, a):
"""Return True if the operand is a normal number;
otherwise return False.
>>> c = ExtendedContext.copy()
>>> c.Emin = -999
>>> c.Emax = 999
>>> c.is_normal(Decimal('2.50'))
True
>>> c.is_normal(Decimal('0.1E-999'))
False
>>> c.is_normal(Decimal('0.00'))
False
>>> c.is_normal(Decimal('-Inf'))
False
>>> c.is_normal(Decimal('NaN'))
False
>>> c.is_normal(1)
True
"""
a = _convert_other(a, raiseit=True)
return a.is_normal(context=self)
def is_qnan(self, a):
"""Return True if the operand is a quiet NaN; otherwise return False.
>>> ExtendedContext.is_qnan(Decimal('2.50'))
False
>>> ExtendedContext.is_qnan(Decimal('NaN'))
True
>>> ExtendedContext.is_qnan(Decimal('sNaN'))
False
>>> ExtendedContext.is_qnan(1)
False
"""
a = _convert_other(a, raiseit=True)
return a.is_qnan()
def is_signed(self, a):
"""Return True if the operand is negative; otherwise return False.
>>> ExtendedContext.is_signed(Decimal('2.50'))
False
>>> ExtendedContext.is_signed(Decimal('-12'))
True
>>> ExtendedContext.is_signed(Decimal('-0'))
True
>>> ExtendedContext.is_signed(8)
False
>>> ExtendedContext.is_signed(-8)
True
"""
a = _convert_other(a, raiseit=True)
return a.is_signed()
def is_snan(self, a):
"""Return True if the operand is a signaling NaN;
otherwise return False.
>>> ExtendedContext.is_snan(Decimal('2.50'))
False
>>> ExtendedContext.is_snan(Decimal('NaN'))
False
>>> ExtendedContext.is_snan(Decimal('sNaN'))
True
>>> ExtendedContext.is_snan(1)
False
"""
a = _convert_other(a, raiseit=True)
return a.is_snan()
def is_subnormal(self, a):
"""Return True if the operand is subnormal; otherwise return False.
>>> c = ExtendedContext.copy()
>>> c.Emin = -999
>>> c.Emax = 999
>>> c.is_subnormal(Decimal('2.50'))
False
>>> c.is_subnormal(Decimal('0.1E-999'))
True
>>> c.is_subnormal(Decimal('0.00'))
False
>>> c.is_subnormal(Decimal('-Inf'))
False
>>> c.is_subnormal(Decimal('NaN'))
False
>>> c.is_subnormal(1)
False
"""
a = _convert_other(a, raiseit=True)
return a.is_subnormal(context=self)
def is_zero(self, a):
"""Return True if the operand is a zero; otherwise return False.
>>> ExtendedContext.is_zero(Decimal('0'))
True
>>> ExtendedContext.is_zero(Decimal('2.50'))
False
>>> ExtendedContext.is_zero(Decimal('-0E+2'))
True
>>> ExtendedContext.is_zero(1)
False
>>> ExtendedContext.is_zero(0)
True
"""
a = _convert_other(a, raiseit=True)
return a.is_zero()
def ln(self, a):
"""Returns the natural (base e) logarithm of the operand.
>>> c = ExtendedContext.copy()
>>> c.Emin = -999
>>> c.Emax = 999
>>> c.ln(Decimal('0'))
Decimal('-Infinity')
>>> c.ln(Decimal('1.000'))
Decimal('0')
>>> c.ln(Decimal('2.71828183'))
Decimal('1.00000000')
>>> c.ln(Decimal('10'))
Decimal('2.30258509')
>>> c.ln(Decimal('+Infinity'))
Decimal('Infinity')
>>> c.ln(1)
Decimal('0')
"""
a = _convert_other(a, raiseit=True)
return a.ln(context=self)
def log10(self, a):
"""Returns the base 10 logarithm of the operand.
>>> c = ExtendedContext.copy()
>>> c.Emin = -999
>>> c.Emax = 999
>>> c.log10(Decimal('0'))
Decimal('-Infinity')
>>> c.log10(Decimal('0.001'))
Decimal('-3')
>>> c.log10(Decimal('1.000'))
Decimal('0')
>>> c.log10(Decimal('2'))
Decimal('0.301029996')
>>> c.log10(Decimal('10'))
Decimal('1')
>>> c.log10(Decimal('70'))
Decimal('1.84509804')
>>> c.log10(Decimal('+Infinity'))
Decimal('Infinity')
>>> c.log10(0)
Decimal('-Infinity')
>>> c.log10(1)
Decimal('0')
"""
a = _convert_other(a, raiseit=True)
return a.log10(context=self)
def logb(self, a):
""" Returns the exponent of the magnitude of the operand's MSD.
The result is the integer which is the exponent of the magnitude
of the most significant digit of the operand (as though the
operand were truncated to a single digit while maintaining the
value of that digit and without limiting the resulting exponent).
>>> ExtendedContext.logb(Decimal('250'))
Decimal('2')
>>> ExtendedContext.logb(Decimal('2.50'))
Decimal('0')
>>> ExtendedContext.logb(Decimal('0.03'))
Decimal('-2')
>>> ExtendedContext.logb(Decimal('0'))
Decimal('-Infinity')
>>> ExtendedContext.logb(1)
Decimal('0')
>>> ExtendedContext.logb(10)
Decimal('1')
>>> ExtendedContext.logb(100)
Decimal('2')
"""
a = _convert_other(a, raiseit=True)
return a.logb(context=self)
def logical_and(self, a, b):
"""Applies the logical operation 'and' between each operand's digits.
The operands must be both logical numbers.
>>> ExtendedContext.logical_and(Decimal('0'), Decimal('0'))
Decimal('0')
>>> ExtendedContext.logical_and(Decimal('0'), Decimal('1'))
Decimal('0')
>>> ExtendedContext.logical_and(Decimal('1'), Decimal('0'))
Decimal('0')
>>> ExtendedContext.logical_and(Decimal('1'), Decimal('1'))
Decimal('1')
>>> ExtendedContext.logical_and(Decimal('1100'), Decimal('1010'))
Decimal('1000')
>>> ExtendedContext.logical_and(Decimal('1111'), Decimal('10'))
Decimal('10')
>>> ExtendedContext.logical_and(110, 1101)
Decimal('100')
>>> ExtendedContext.logical_and(Decimal(110), 1101)
Decimal('100')
>>> ExtendedContext.logical_and(110, Decimal(1101))
Decimal('100')
"""
a = _convert_other(a, raiseit=True)
return a.logical_and(b, context=self)
def logical_invert(self, a):
"""Invert all the digits in the operand.
The operand must be a logical number.
>>> ExtendedContext.logical_invert(Decimal('0'))
Decimal('111111111')
>>> ExtendedContext.logical_invert(Decimal('1'))
Decimal('111111110')
>>> ExtendedContext.logical_invert(Decimal('111111111'))
Decimal('0')
>>> ExtendedContext.logical_invert(Decimal('101010101'))
Decimal('10101010')
>>> ExtendedContext.logical_invert(1101)
Decimal('111110010')
"""
a = _convert_other(a, raiseit=True)
return a.logical_invert(context=self)
def logical_or(self, a, b):
"""Applies the logical operation 'or' between each operand's digits.
The operands must be both logical numbers.
>>> ExtendedContext.logical_or(Decimal('0'), Decimal('0'))
Decimal('0')
>>> ExtendedContext.logical_or(Decimal('0'), Decimal('1'))
Decimal('1')
>>> ExtendedContext.logical_or(Decimal('1'), Decimal('0'))
Decimal('1')
>>> ExtendedContext.logical_or(Decimal('1'), Decimal('1'))
Decimal('1')
>>> ExtendedContext.logical_or(Decimal('1100'), Decimal('1010'))
Decimal('1110')
>>> ExtendedContext.logical_or(Decimal('1110'), Decimal('10'))
Decimal('1110')
>>> ExtendedContext.logical_or(110, 1101)
Decimal('1111')
>>> ExtendedContext.logical_or(Decimal(110), 1101)
Decimal('1111')
>>> ExtendedContext.logical_or(110, Decimal(1101))
Decimal('1111')
"""
a = _convert_other(a, raiseit=True)
return a.logical_or(b, context=self)
def logical_xor(self, a, b):
"""Applies the logical operation 'xor' between each operand's digits.
The operands must be both logical numbers.
>>> ExtendedContext.logical_xor(Decimal('0'), Decimal('0'))
Decimal('0')
>>> ExtendedContext.logical_xor(Decimal('0'), Decimal('1'))
Decimal('1')
>>> ExtendedContext.logical_xor(Decimal('1'), Decimal('0'))
Decimal('1')
>>> ExtendedContext.logical_xor(Decimal('1'), Decimal('1'))
Decimal('0')
>>> ExtendedContext.logical_xor(Decimal('1100'), Decimal('1010'))
Decimal('110')
>>> ExtendedContext.logical_xor(Decimal('1111'), Decimal('10'))
Decimal('1101')
>>> ExtendedContext.logical_xor(110, 1101)
Decimal('1011')
>>> ExtendedContext.logical_xor(Decimal(110), 1101)
Decimal('1011')
>>> ExtendedContext.logical_xor(110, Decimal(1101))
Decimal('1011')
"""
a = _convert_other(a, raiseit=True)
return a.logical_xor(b, context=self)
def max(self, a, b):
"""max compares two values numerically and returns the maximum.
If either operand is a NaN then the general rules apply.
Otherwise, the operands are compared as though by the compare
operation. If they are numerically equal then the left-hand operand
is chosen as the result. Otherwise the maximum (closer to positive
infinity) of the two operands is chosen as the result.
>>> ExtendedContext.max(Decimal('3'), Decimal('2'))
Decimal('3')
>>> ExtendedContext.max(Decimal('-10'), Decimal('3'))
Decimal('3')
>>> ExtendedContext.max(Decimal('1.0'), Decimal('1'))
Decimal('1')
>>> ExtendedContext.max(Decimal('7'), Decimal('NaN'))
Decimal('7')
>>> ExtendedContext.max(1, 2)
Decimal('2')
>>> ExtendedContext.max(Decimal(1), 2)
Decimal('2')
>>> ExtendedContext.max(1, Decimal(2))
Decimal('2')
"""
a = _convert_other(a, raiseit=True)
return a.max(b, context=self)
def max_mag(self, a, b):
"""Compares the values numerically with their sign ignored.
>>> ExtendedContext.max_mag(Decimal('7'), Decimal('NaN'))
Decimal('7')
>>> ExtendedContext.max_mag(Decimal('7'), Decimal('-10'))
Decimal('-10')
>>> ExtendedContext.max_mag(1, -2)
Decimal('-2')
>>> ExtendedContext.max_mag(Decimal(1), -2)
Decimal('-2')
>>> ExtendedContext.max_mag(1, Decimal(-2))
Decimal('-2')
"""
a = _convert_other(a, raiseit=True)
return a.max_mag(b, context=self)
def min(self, a, b):
"""min compares two values numerically and returns the minimum.
If either operand is a NaN then the general rules apply.
Otherwise, the operands are compared as though by the compare
operation. If they are numerically equal then the left-hand operand
is chosen as the result. Otherwise the minimum (closer to negative
infinity) of the two operands is chosen as the result.
>>> ExtendedContext.min(Decimal('3'), Decimal('2'))
Decimal('2')
>>> ExtendedContext.min(Decimal('-10'), Decimal('3'))
Decimal('-10')
>>> ExtendedContext.min(Decimal('1.0'), Decimal('1'))
Decimal('1.0')
>>> ExtendedContext.min(Decimal('7'), Decimal('NaN'))
Decimal('7')
>>> ExtendedContext.min(1, 2)
Decimal('1')
>>> ExtendedContext.min(Decimal(1), 2)
Decimal('1')
>>> ExtendedContext.min(1, Decimal(29))
Decimal('1')
"""
a = _convert_other(a, raiseit=True)
return a.min(b, context=self)
def min_mag(self, a, b):
"""Compares the values numerically with their sign ignored.
>>> ExtendedContext.min_mag(Decimal('3'), Decimal('-2'))
Decimal('-2')
>>> ExtendedContext.min_mag(Decimal('-3'), Decimal('NaN'))
Decimal('-3')
>>> ExtendedContext.min_mag(1, -2)
Decimal('1')
>>> ExtendedContext.min_mag(Decimal(1), -2)
Decimal('1')
>>> ExtendedContext.min_mag(1, Decimal(-2))
Decimal('1')
"""
a = _convert_other(a, raiseit=True)
return a.min_mag(b, context=self)
def minus(self, a):
"""Minus corresponds to unary prefix minus in Python.
The operation is evaluated using the same rules as subtract; the
operation minus(a) is calculated as subtract('0', a) where the '0'
has the same exponent as the operand.
>>> ExtendedContext.minus(Decimal('1.3'))
Decimal('-1.3')
>>> ExtendedContext.minus(Decimal('-1.3'))
Decimal('1.3')
>>> ExtendedContext.minus(1)
Decimal('-1')
"""
a = _convert_other(a, raiseit=True)
return a.__neg__(context=self)
def multiply(self, a, b):
"""multiply multiplies two operands.
If either operand is a special value then the general rules apply.
Otherwise, the operands are multiplied together
('long multiplication'), resulting in a number which may be as long as
the sum of the lengths of the two operands.
>>> ExtendedContext.multiply(Decimal('1.20'), Decimal('3'))
Decimal('3.60')
>>> ExtendedContext.multiply(Decimal('7'), Decimal('3'))
Decimal('21')
>>> ExtendedContext.multiply(Decimal('0.9'), Decimal('0.8'))
Decimal('0.72')
>>> ExtendedContext.multiply(Decimal('0.9'), Decimal('-0'))
Decimal('-0.0')
>>> ExtendedContext.multiply(Decimal('654321'), Decimal('654321'))
Decimal('4.28135971E+11')
>>> ExtendedContext.multiply(7, 7)
Decimal('49')
>>> ExtendedContext.multiply(Decimal(7), 7)
Decimal('49')
>>> ExtendedContext.multiply(7, Decimal(7))
Decimal('49')
"""
a = _convert_other(a, raiseit=True)
r = a.__mul__(b, context=self)
if r is NotImplemented:
raise TypeError("Unable to convert %s to Decimal" % b)
else:
return r
def next_minus(self, a):
"""Returns the largest representable number smaller than a.
>>> c = ExtendedContext.copy()
>>> c.Emin = -999
>>> c.Emax = 999
>>> ExtendedContext.next_minus(Decimal('1'))
Decimal('0.999999999')
>>> c.next_minus(Decimal('1E-1007'))
Decimal('0E-1007')
>>> ExtendedContext.next_minus(Decimal('-1.00000003'))
Decimal('-1.00000004')
>>> c.next_minus(Decimal('Infinity'))
Decimal('9.99999999E+999')
>>> c.next_minus(1)
Decimal('0.999999999')
"""
a = _convert_other(a, raiseit=True)
return a.next_minus(context=self)
def next_plus(self, a):
"""Returns the smallest representable number larger than a.
>>> c = ExtendedContext.copy()
>>> c.Emin = -999
>>> c.Emax = 999
>>> ExtendedContext.next_plus(Decimal('1'))
Decimal('1.00000001')
>>> c.next_plus(Decimal('-1E-1007'))
Decimal('-0E-1007')
>>> ExtendedContext.next_plus(Decimal('-1.00000003'))
Decimal('-1.00000002')
>>> c.next_plus(Decimal('-Infinity'))
Decimal('-9.99999999E+999')
>>> c.next_plus(1)
Decimal('1.00000001')
"""
a = _convert_other(a, raiseit=True)
return a.next_plus(context=self)
def next_toward(self, a, b):
"""Returns the number closest to a, in direction towards b.
The result is the closest representable number from the first
operand (but not the first operand) that is in the direction
towards the second operand, unless the operands have the same
value.
>>> c = ExtendedContext.copy()
>>> c.Emin = -999
>>> c.Emax = 999
>>> c.next_toward(Decimal('1'), Decimal('2'))
Decimal('1.00000001')
>>> c.next_toward(Decimal('-1E-1007'), Decimal('1'))
Decimal('-0E-1007')
>>> c.next_toward(Decimal('-1.00000003'), Decimal('0'))
Decimal('-1.00000002')
>>> c.next_toward(Decimal('1'), Decimal('0'))
Decimal('0.999999999')
>>> c.next_toward(Decimal('1E-1007'), Decimal('-100'))
Decimal('0E-1007')
>>> c.next_toward(Decimal('-1.00000003'), Decimal('-10'))
Decimal('-1.00000004')
>>> c.next_toward(Decimal('0.00'), Decimal('-0.0000'))
Decimal('-0.00')
>>> c.next_toward(0, 1)
Decimal('1E-1007')
>>> c.next_toward(Decimal(0), 1)
Decimal('1E-1007')
>>> c.next_toward(0, Decimal(1))
Decimal('1E-1007')
"""
a = _convert_other(a, raiseit=True)
return a.next_toward(b, context=self)
def normalize(self, a):
"""normalize reduces an operand to its simplest form.
Essentially a plus operation with all trailing zeros removed from the
result.
>>> ExtendedContext.normalize(Decimal('2.1'))
Decimal('2.1')
>>> ExtendedContext.normalize(Decimal('-2.0'))
Decimal('-2')
>>> ExtendedContext.normalize(Decimal('1.200'))
Decimal('1.2')
>>> ExtendedContext.normalize(Decimal('-120'))
Decimal('-1.2E+2')
>>> ExtendedContext.normalize(Decimal('120.00'))
Decimal('1.2E+2')
>>> ExtendedContext.normalize(Decimal('0.00'))
Decimal('0')
>>> ExtendedContext.normalize(6)
Decimal('6')
"""
a = _convert_other(a, raiseit=True)
return a.normalize(context=self)
def number_class(self, a):
"""Returns an indication of the class of the operand.
The class is one of the following strings:
-sNaN
-NaN
-Infinity
-Normal
-Subnormal
-Zero
+Zero
+Subnormal
+Normal
+Infinity
>>> c = ExtendedContext.copy()
>>> c.Emin = -999
>>> c.Emax = 999
>>> c.number_class(Decimal('Infinity'))
'+Infinity'
>>> c.number_class(Decimal('1E-10'))
'+Normal'
>>> c.number_class(Decimal('2.50'))
'+Normal'
>>> c.number_class(Decimal('0.1E-999'))
'+Subnormal'
>>> c.number_class(Decimal('0'))
'+Zero'
>>> c.number_class(Decimal('-0'))
'-Zero'
>>> c.number_class(Decimal('-0.1E-999'))
'-Subnormal'
>>> c.number_class(Decimal('-1E-10'))
'-Normal'
>>> c.number_class(Decimal('-2.50'))
'-Normal'
>>> c.number_class(Decimal('-Infinity'))
'-Infinity'
>>> c.number_class(Decimal('NaN'))
'NaN'
>>> c.number_class(Decimal('-NaN'))
'NaN'
>>> c.number_class(Decimal('sNaN'))
'sNaN'
>>> c.number_class(123)
'+Normal'
"""
a = _convert_other(a, raiseit=True)
return a.number_class(context=self)
def plus(self, a):
"""Plus corresponds to unary prefix plus in Python.
The operation is evaluated using the same rules as add; the
operation plus(a) is calculated as add('0', a) where the '0'
has the same exponent as the operand.
>>> ExtendedContext.plus(Decimal('1.3'))
Decimal('1.3')
>>> ExtendedContext.plus(Decimal('-1.3'))
Decimal('-1.3')
>>> ExtendedContext.plus(-1)
Decimal('-1')
"""
a = _convert_other(a, raiseit=True)
return a.__pos__(context=self)
def power(self, a, b, modulo=None):
"""Raises a to the power of b, to modulo if given.
With two arguments, compute a**b. If a is negative then b
must be integral. The result will be inexact unless b is
integral and the result is finite and can be expressed exactly
in 'precision' digits.
With three arguments, compute (a**b) % modulo. For the
three argument form, the following restrictions on the
arguments hold:
- all three arguments must be integral
- b must be nonnegative
- at least one of a or b must be nonzero
- modulo must be nonzero and have at most 'precision' digits
The result of pow(a, b, modulo) is identical to the result
that would be obtained by computing (a**b) % modulo with
unbounded precision, but is computed more efficiently. It is
always exact.
>>> c = ExtendedContext.copy()
>>> c.Emin = -999
>>> c.Emax = 999
>>> c.power(Decimal('2'), Decimal('3'))
Decimal('8')
>>> c.power(Decimal('-2'), Decimal('3'))
Decimal('-8')
>>> c.power(Decimal('2'), Decimal('-3'))
Decimal('0.125')
>>> c.power(Decimal('1.7'), Decimal('8'))
Decimal('69.7575744')
>>> c.power(Decimal('10'), Decimal('0.301029996'))
Decimal('2.00000000')
>>> c.power(Decimal('Infinity'), Decimal('-1'))
Decimal('0')
>>> c.power(Decimal('Infinity'), Decimal('0'))
Decimal('1')
>>> c.power(Decimal('Infinity'), Decimal('1'))
Decimal('Infinity')
>>> c.power(Decimal('-Infinity'), Decimal('-1'))
Decimal('-0')
>>> c.power(Decimal('-Infinity'), Decimal('0'))
Decimal('1')
>>> c.power(Decimal('-Infinity'), Decimal('1'))
Decimal('-Infinity')
>>> c.power(Decimal('-Infinity'), Decimal('2'))
Decimal('Infinity')
>>> c.power(Decimal('0'), Decimal('0'))
Decimal('NaN')
>>> c.power(Decimal('3'), Decimal('7'), Decimal('16'))
Decimal('11')
>>> c.power(Decimal('-3'), Decimal('7'), Decimal('16'))
Decimal('-11')
>>> c.power(Decimal('-3'), Decimal('8'), Decimal('16'))
Decimal('1')
>>> c.power(Decimal('3'), Decimal('7'), Decimal('-16'))
Decimal('11')
>>> c.power(Decimal('23E12345'), Decimal('67E189'), Decimal('123456789'))
Decimal('11729830')
>>> c.power(Decimal('-0'), Decimal('17'), Decimal('1729'))
Decimal('-0')
>>> c.power(Decimal('-23'), Decimal('0'), Decimal('65537'))
Decimal('1')
>>> ExtendedContext.power(7, 7)
Decimal('823543')
>>> ExtendedContext.power(Decimal(7), 7)
Decimal('823543')
>>> ExtendedContext.power(7, Decimal(7), 2)
Decimal('1')
"""
a = _convert_other(a, raiseit=True)
r = a.__pow__(b, modulo, context=self)
if r is NotImplemented:
raise TypeError("Unable to convert %s to Decimal" % b)
else:
return r
def quantize(self, a, b):
"""Returns a value equal to 'a' (rounded), having the exponent of 'b'.
The coefficient of the result is derived from that of the left-hand
operand. It may be rounded using the current rounding setting (if the
exponent is being increased), multiplied by a positive power of ten (if
the exponent is being decreased), or is unchanged (if the exponent is
already equal to that of the right-hand operand).
Unlike other operations, if the length of the coefficient after the
quantize operation would be greater than precision then an Invalid
operation condition is raised. This guarantees that, unless there is
an error condition, the exponent of the result of a quantize is always
equal to that of the right-hand operand.
Also unlike other operations, quantize will never raise Underflow, even
if the result is subnormal and inexact.
>>> ExtendedContext.quantize(Decimal('2.17'), Decimal('0.001'))
Decimal('2.170')
>>> ExtendedContext.quantize(Decimal('2.17'), Decimal('0.01'))
Decimal('2.17')
>>> ExtendedContext.quantize(Decimal('2.17'), Decimal('0.1'))
Decimal('2.2')
>>> ExtendedContext.quantize(Decimal('2.17'), Decimal('1e+0'))
Decimal('2')
>>> ExtendedContext.quantize(Decimal('2.17'), Decimal('1e+1'))
Decimal('0E+1')
>>> ExtendedContext.quantize(Decimal('-Inf'), Decimal('Infinity'))
Decimal('-Infinity')
>>> ExtendedContext.quantize(Decimal('2'), Decimal('Infinity'))
Decimal('NaN')
>>> ExtendedContext.quantize(Decimal('-0.1'), Decimal('1'))
Decimal('-0')
>>> ExtendedContext.quantize(Decimal('-0'), Decimal('1e+5'))
Decimal('-0E+5')
>>> ExtendedContext.quantize(Decimal('+35236450.6'), Decimal('1e-2'))
Decimal('NaN')
>>> ExtendedContext.quantize(Decimal('-35236450.6'), Decimal('1e-2'))
Decimal('NaN')
>>> ExtendedContext.quantize(Decimal('217'), Decimal('1e-1'))
Decimal('217.0')
>>> ExtendedContext.quantize(Decimal('217'), Decimal('1e-0'))
Decimal('217')
>>> ExtendedContext.quantize(Decimal('217'), Decimal('1e+1'))
Decimal('2.2E+2')
>>> ExtendedContext.quantize(Decimal('217'), Decimal('1e+2'))
Decimal('2E+2')
>>> ExtendedContext.quantize(1, 2)
Decimal('1')
>>> ExtendedContext.quantize(Decimal(1), 2)
Decimal('1')
>>> ExtendedContext.quantize(1, Decimal(2))
Decimal('1')
"""
a = _convert_other(a, raiseit=True)
return a.quantize(b, context=self)
def radix(self):
"""Just returns 10, as this is Decimal, :)
>>> ExtendedContext.radix()
Decimal('10')
"""
return Decimal(10)
def remainder(self, a, b):
"""Returns the remainder from integer division.
The result is the residue of the dividend after the operation of
calculating integer division as described for divide-integer, rounded
to precision digits if necessary. The sign of the result, if
non-zero, is the same as that of the original dividend.
This operation will fail under the same conditions as integer division
(that is, if integer division on the same two operands would fail, the
remainder cannot be calculated).
>>> ExtendedContext.remainder(Decimal('2.1'), Decimal('3'))
Decimal('2.1')
>>> ExtendedContext.remainder(Decimal('10'), Decimal('3'))
Decimal('1')
>>> ExtendedContext.remainder(Decimal('-10'), Decimal('3'))
Decimal('-1')
>>> ExtendedContext.remainder(Decimal('10.2'), Decimal('1'))
Decimal('0.2')
>>> ExtendedContext.remainder(Decimal('10'), Decimal('0.3'))
Decimal('0.1')
>>> ExtendedContext.remainder(Decimal('3.6'), Decimal('1.3'))
Decimal('1.0')
>>> ExtendedContext.remainder(22, 6)
Decimal('4')
>>> ExtendedContext.remainder(Decimal(22), 6)
Decimal('4')
>>> ExtendedContext.remainder(22, Decimal(6))
Decimal('4')
"""
a = _convert_other(a, raiseit=True)
r = a.__mod__(b, context=self)
if r is NotImplemented:
raise TypeError("Unable to convert %s to Decimal" % b)
else:
return r
def remainder_near(self, a, b):
"""Returns to be "a - b * n", where n is the integer nearest the exact
value of "x / b" (if two integers are equally near then the even one
is chosen). If the result is equal to 0 then its sign will be the
sign of a.
This operation will fail under the same conditions as integer division
(that is, if integer division on the same two operands would fail, the
remainder cannot be calculated).
>>> ExtendedContext.remainder_near(Decimal('2.1'), Decimal('3'))
Decimal('-0.9')
>>> ExtendedContext.remainder_near(Decimal('10'), Decimal('6'))
Decimal('-2')
>>> ExtendedContext.remainder_near(Decimal('10'), Decimal('3'))
Decimal('1')
>>> ExtendedContext.remainder_near(Decimal('-10'), Decimal('3'))
Decimal('-1')
>>> ExtendedContext.remainder_near(Decimal('10.2'), Decimal('1'))
Decimal('0.2')
>>> ExtendedContext.remainder_near(Decimal('10'), Decimal('0.3'))
Decimal('0.1')
>>> ExtendedContext.remainder_near(Decimal('3.6'), Decimal('1.3'))
Decimal('-0.3')
>>> ExtendedContext.remainder_near(3, 11)
Decimal('3')
>>> ExtendedContext.remainder_near(Decimal(3), 11)
Decimal('3')
>>> ExtendedContext.remainder_near(3, Decimal(11))
Decimal('3')
"""
a = _convert_other(a, raiseit=True)
return a.remainder_near(b, context=self)
def rotate(self, a, b):
"""Returns a rotated copy of a, b times.
The coefficient of the result is a rotated copy of the digits in
the coefficient of the first operand. The number of places of
rotation is taken from the absolute value of the second operand,
with the rotation being to the left if the second operand is
positive or to the right otherwise.
>>> ExtendedContext.rotate(Decimal('34'), Decimal('8'))
Decimal('400000003')
>>> ExtendedContext.rotate(Decimal('12'), Decimal('9'))
Decimal('12')
>>> ExtendedContext.rotate(Decimal('123456789'), Decimal('-2'))
Decimal('891234567')
>>> ExtendedContext.rotate(Decimal('123456789'), Decimal('0'))
Decimal('123456789')
>>> ExtendedContext.rotate(Decimal('123456789'), Decimal('+2'))
Decimal('345678912')
>>> ExtendedContext.rotate(1333333, 1)
Decimal('13333330')
>>> ExtendedContext.rotate(Decimal(1333333), 1)
Decimal('13333330')
>>> ExtendedContext.rotate(1333333, Decimal(1))
Decimal('13333330')
"""
a = _convert_other(a, raiseit=True)
return a.rotate(b, context=self)
def same_quantum(self, a, b):
"""Returns True if the two operands have the same exponent.
The result is never affected by either the sign or the coefficient of
either operand.
>>> ExtendedContext.same_quantum(Decimal('2.17'), Decimal('0.001'))
False
>>> ExtendedContext.same_quantum(Decimal('2.17'), Decimal('0.01'))
True
>>> ExtendedContext.same_quantum(Decimal('2.17'), Decimal('1'))
False
>>> ExtendedContext.same_quantum(Decimal('Inf'), Decimal('-Inf'))
True
>>> ExtendedContext.same_quantum(10000, -1)
True
>>> ExtendedContext.same_quantum(Decimal(10000), -1)
True
>>> ExtendedContext.same_quantum(10000, Decimal(-1))
True
"""
a = _convert_other(a, raiseit=True)
return a.same_quantum(b)
def scaleb (self, a, b):
"""Returns the first operand after adding the second value its exp.
>>> ExtendedContext.scaleb(Decimal('7.50'), Decimal('-2'))
Decimal('0.0750')
>>> ExtendedContext.scaleb(Decimal('7.50'), Decimal('0'))
Decimal('7.50')
>>> ExtendedContext.scaleb(Decimal('7.50'), Decimal('3'))
Decimal('7.50E+3')
>>> ExtendedContext.scaleb(1, 4)
Decimal('1E+4')
>>> ExtendedContext.scaleb(Decimal(1), 4)
Decimal('1E+4')
>>> ExtendedContext.scaleb(1, Decimal(4))
Decimal('1E+4')
"""
a = _convert_other(a, raiseit=True)
return a.scaleb(b, context=self)
def shift(self, a, b):
"""Returns a shifted copy of a, b times.
The coefficient of the result is a shifted copy of the digits
in the coefficient of the first operand. The number of places
to shift is taken from the absolute value of the second operand,
with the shift being to the left if the second operand is
positive or to the right otherwise. Digits shifted into the
coefficient are zeros.
>>> ExtendedContext.shift(Decimal('34'), Decimal('8'))
Decimal('400000000')
>>> ExtendedContext.shift(Decimal('12'), Decimal('9'))
Decimal('0')
>>> ExtendedContext.shift(Decimal('123456789'), Decimal('-2'))
Decimal('1234567')
>>> ExtendedContext.shift(Decimal('123456789'), Decimal('0'))
Decimal('123456789')
>>> ExtendedContext.shift(Decimal('123456789'), Decimal('+2'))
Decimal('345678900')
>>> ExtendedContext.shift(88888888, 2)
Decimal('888888800')
>>> ExtendedContext.shift(Decimal(88888888), 2)
Decimal('888888800')
>>> ExtendedContext.shift(88888888, Decimal(2))
Decimal('888888800')
"""
a = _convert_other(a, raiseit=True)
return a.shift(b, context=self)
def sqrt(self, a):
"""Square root of a non-negative number to context precision.
If the result must be inexact, it is rounded using the round-half-even
algorithm.
>>> ExtendedContext.sqrt(Decimal('0'))
Decimal('0')
>>> ExtendedContext.sqrt(Decimal('-0'))
Decimal('-0')
>>> ExtendedContext.sqrt(Decimal('0.39'))
Decimal('0.624499800')
>>> ExtendedContext.sqrt(Decimal('100'))
Decimal('10')
>>> ExtendedContext.sqrt(Decimal('1'))
Decimal('1')
>>> ExtendedContext.sqrt(Decimal('1.0'))
Decimal('1.0')
>>> ExtendedContext.sqrt(Decimal('1.00'))
Decimal('1.0')
>>> ExtendedContext.sqrt(Decimal('7'))
Decimal('2.64575131')
>>> ExtendedContext.sqrt(Decimal('10'))
Decimal('3.16227766')
>>> ExtendedContext.sqrt(2)
Decimal('1.41421356')
>>> ExtendedContext.prec
9
"""
a = _convert_other(a, raiseit=True)
return a.sqrt(context=self)
def subtract(self, a, b):
"""Return the difference between the two operands.
>>> ExtendedContext.subtract(Decimal('1.3'), Decimal('1.07'))
Decimal('0.23')
>>> ExtendedContext.subtract(Decimal('1.3'), Decimal('1.30'))
Decimal('0.00')
>>> ExtendedContext.subtract(Decimal('1.3'), Decimal('2.07'))
Decimal('-0.77')
>>> ExtendedContext.subtract(8, 5)
Decimal('3')
>>> ExtendedContext.subtract(Decimal(8), 5)
Decimal('3')
>>> ExtendedContext.subtract(8, Decimal(5))
Decimal('3')
"""
a = _convert_other(a, raiseit=True)
r = a.__sub__(b, context=self)
if r is NotImplemented:
raise TypeError("Unable to convert %s to Decimal" % b)
else:
return r
def to_eng_string(self, a):
"""Converts a number to a string, using scientific notation.
The operation is not affected by the context.
"""
a = _convert_other(a, raiseit=True)
return a.to_eng_string(context=self)
def to_sci_string(self, a):
"""Converts a number to a string, using scientific notation.
The operation is not affected by the context.
"""
a = _convert_other(a, raiseit=True)
return a.__str__(context=self)
def to_integral_exact(self, a):
"""Rounds to an integer.
When the operand has a negative exponent, the result is the same
as using the quantize() operation using the given operand as the
left-hand-operand, 1E+0 as the right-hand-operand, and the precision
of the operand as the precision setting; Inexact and Rounded flags
are allowed in this operation. The rounding mode is taken from the
context.
>>> ExtendedContext.to_integral_exact(Decimal('2.1'))
Decimal('2')
>>> ExtendedContext.to_integral_exact(Decimal('100'))
Decimal('100')
>>> ExtendedContext.to_integral_exact(Decimal('100.0'))
Decimal('100')
>>> ExtendedContext.to_integral_exact(Decimal('101.5'))
Decimal('102')
>>> ExtendedContext.to_integral_exact(Decimal('-101.5'))
Decimal('-102')
>>> ExtendedContext.to_integral_exact(Decimal('10E+5'))
Decimal('1.0E+6')
>>> ExtendedContext.to_integral_exact(Decimal('7.89E+77'))
Decimal('7.89E+77')
>>> ExtendedContext.to_integral_exact(Decimal('-Inf'))
Decimal('-Infinity')
"""
a = _convert_other(a, raiseit=True)
return a.to_integral_exact(context=self)
def to_integral_value(self, a):
"""Rounds to an integer.
When the operand has a negative exponent, the result is the same
as using the quantize() operation using the given operand as the
left-hand-operand, 1E+0 as the right-hand-operand, and the precision
of the operand as the precision setting, except that no flags will
be set. The rounding mode is taken from the context.
>>> ExtendedContext.to_integral_value(Decimal('2.1'))
Decimal('2')
>>> ExtendedContext.to_integral_value(Decimal('100'))
Decimal('100')
>>> ExtendedContext.to_integral_value(Decimal('100.0'))
Decimal('100')
>>> ExtendedContext.to_integral_value(Decimal('101.5'))
Decimal('102')
>>> ExtendedContext.to_integral_value(Decimal('-101.5'))
Decimal('-102')
>>> ExtendedContext.to_integral_value(Decimal('10E+5'))
Decimal('1.0E+6')
>>> ExtendedContext.to_integral_value(Decimal('7.89E+77'))
Decimal('7.89E+77')
>>> ExtendedContext.to_integral_value(Decimal('-Inf'))
Decimal('-Infinity')
"""
a = _convert_other(a, raiseit=True)
return a.to_integral_value(context=self)
# the method name changed, but we provide also the old one, for compatibility
to_integral = to_integral_value
class _WorkRep(object):
__slots__ = ('sign','int','exp')
# sign: 0 or 1
# int: int
# exp: None, int, or string
def __init__(self, value=None):
if value is None:
self.sign = None
self.int = 0
self.exp = None
elif isinstance(value, Decimal):
self.sign = value._sign
self.int = int(value._int)
self.exp = value._exp
else:
# assert isinstance(value, tuple)
self.sign = value[0]
self.int = value[1]
self.exp = value[2]
def __repr__(self):
return "(%r, %r, %r)" % (self.sign, self.int, self.exp)
__str__ = __repr__
def _normalize(op1, op2, prec = 0):
"""Normalizes op1, op2 to have the same exp and length of coefficient.
Done during addition.
"""
if op1.exp < op2.exp:
tmp = op2
other = op1
else:
tmp = op1
other = op2
# Let exp = min(tmp.exp - 1, tmp.adjusted() - precision - 1).
# Then adding 10**exp to tmp has the same effect (after rounding)
# as adding any positive quantity smaller than 10**exp; similarly
# for subtraction. So if other is smaller than 10**exp we replace
# it with 10**exp. This avoids tmp.exp - other.exp getting too large.
tmp_len = len(str(tmp.int))
other_len = len(str(other.int))
exp = tmp.exp + min(-1, tmp_len - prec - 2)
if other_len + other.exp - 1 < exp:
other.int = 1
other.exp = exp
tmp.int *= 10 ** (tmp.exp - other.exp)
tmp.exp = other.exp
return op1, op2
##### Integer arithmetic functions used by ln, log10, exp and __pow__ #####
_nbits = int.bit_length
def _decimal_lshift_exact(n, e):
""" Given integers n and e, return n * 10**e if it's an integer, else None.
The computation is designed to avoid computing large powers of 10
unnecessarily.
>>> _decimal_lshift_exact(3, 4)
30000
>>> _decimal_lshift_exact(300, -999999999) # returns None
"""
if n == 0:
return 0
elif e >= 0:
return n * 10**e
else:
# val_n = largest power of 10 dividing n.
str_n = str(abs(n))
val_n = len(str_n) - len(str_n.rstrip('0'))
return None if val_n < -e else n // 10**-e
def _sqrt_nearest(n, a):
"""Closest integer to the square root of the positive integer n. a is
an initial approximation to the square root. Any positive integer
will do for a, but the closer a is to the square root of n the
faster convergence will be.
"""
if n <= 0 or a <= 0:
raise ValueError("Both arguments to _sqrt_nearest should be positive.")
b=0
while a != b:
b, a = a, a--n//a>>1
return a
def _rshift_nearest(x, shift):
"""Given an integer x and a nonnegative integer shift, return closest
integer to x / 2**shift; use round-to-even in case of a tie.
"""
b, q = 1 << shift, x >> shift
return q + (2*(x & (b-1)) + (q&1) > b)
def _div_nearest(a, b):
"""Closest integer to a/b, a and b positive integers; rounds to even
in the case of a tie.
"""
q, r = divmod(a, b)
return q + (2*r + (q&1) > b)
def _ilog(x, M, L = 8):
"""Integer approximation to M*log(x/M), with absolute error boundable
in terms only of x/M.
Given positive integers x and M, return an integer approximation to
M * log(x/M). For L = 8 and 0.1 <= x/M <= 10 the difference
between the approximation and the exact result is at most 22. For
L = 8 and 1.0 <= x/M <= 10.0 the difference is at most 15. In
both cases these are upper bounds on the error; it will usually be
much smaller."""
# The basic algorithm is the following: let log1p be the function
# log1p(x) = log(1+x). Then log(x/M) = log1p((x-M)/M). We use
# the reduction
#
# log1p(y) = 2*log1p(y/(1+sqrt(1+y)))
#
# repeatedly until the argument to log1p is small (< 2**-L in
# absolute value). For small y we can use the Taylor series
# expansion
#
# log1p(y) ~ y - y**2/2 + y**3/3 - ... - (-y)**T/T
#
# truncating at T such that y**T is small enough. The whole
# computation is carried out in a form of fixed-point arithmetic,
# with a real number z being represented by an integer
# approximation to z*M. To avoid loss of precision, the y below
# is actually an integer approximation to 2**R*y*M, where R is the
# number of reductions performed so far.
y = x-M
# argument reduction; R = number of reductions performed
R = 0
while (R <= L and abs(y) << L-R >= M or
R > L and abs(y) >> R-L >= M):
y = _div_nearest((M*y) << 1,
M + _sqrt_nearest(M*(M+_rshift_nearest(y, R)), M))
R += 1
# Taylor series with T terms
T = -int(-10*len(str(M))//(3*L))
yshift = _rshift_nearest(y, R)
w = _div_nearest(M, T)
for k in range(T-1, 0, -1):
w = _div_nearest(M, k) - _div_nearest(yshift*w, M)
return _div_nearest(w*y, M)
def _dlog10(c, e, p):
"""Given integers c, e and p with c > 0, p >= 0, compute an integer
approximation to 10**p * log10(c*10**e), with an absolute error of
at most 1. Assumes that c*10**e is not exactly 1."""
# increase precision by 2; compensate for this by dividing
# final result by 100
p += 2
# write c*10**e as d*10**f with either:
# f >= 0 and 1 <= d <= 10, or
# f <= 0 and 0.1 <= d <= 1.
# Thus for c*10**e close to 1, f = 0
l = len(str(c))
f = e+l - (e+l >= 1)
if p > 0:
M = 10**p
k = e+p-f
if k >= 0:
c *= 10**k
else:
c = _div_nearest(c, 10**-k)
log_d = _ilog(c, M) # error < 5 + 22 = 27
log_10 = _log10_digits(p) # error < 1
log_d = _div_nearest(log_d*M, log_10)
log_tenpower = f*M # exact
else:
log_d = 0 # error < 2.31
log_tenpower = _div_nearest(f, 10**-p) # error < 0.5
return _div_nearest(log_tenpower+log_d, 100)
def _dlog(c, e, p):
"""Given integers c, e and p with c > 0, compute an integer
approximation to 10**p * log(c*10**e), with an absolute error of
at most 1. Assumes that c*10**e is not exactly 1."""
# Increase precision by 2. The precision increase is compensated
# for at the end with a division by 100.
p += 2
# rewrite c*10**e as d*10**f with either f >= 0 and 1 <= d <= 10,
# or f <= 0 and 0.1 <= d <= 1. Then we can compute 10**p * log(c*10**e)
# as 10**p * log(d) + 10**p*f * log(10).
l = len(str(c))
f = e+l - (e+l >= 1)
# compute approximation to 10**p*log(d), with error < 27
if p > 0:
k = e+p-f
if k >= 0:
c *= 10**k
else:
c = _div_nearest(c, 10**-k) # error of <= 0.5 in c
# _ilog magnifies existing error in c by a factor of at most 10
log_d = _ilog(c, 10**p) # error < 5 + 22 = 27
else:
# p <= 0: just approximate the whole thing by 0; error < 2.31
log_d = 0
# compute approximation to f*10**p*log(10), with error < 11.
if f:
extra = len(str(abs(f)))-1
if p + extra >= 0:
# error in f * _log10_digits(p+extra) < |f| * 1 = |f|
# after division, error < |f|/10**extra + 0.5 < 10 + 0.5 < 11
f_log_ten = _div_nearest(f*_log10_digits(p+extra), 10**extra)
else:
f_log_ten = 0
else:
f_log_ten = 0
# error in sum < 11+27 = 38; error after division < 0.38 + 0.5 < 1
return _div_nearest(f_log_ten + log_d, 100)
class _Log10Memoize(object):
"""Class to compute, store, and allow retrieval of, digits of the
constant log(10) = 2.302585.... This constant is needed by
Decimal.ln, Decimal.log10, Decimal.exp and Decimal.__pow__."""
def __init__(self):
self.digits = "23025850929940456840179914546843642076011014886"
def getdigits(self, p):
"""Given an integer p >= 0, return floor(10**p)*log(10).
For example, self.getdigits(3) returns 2302.
"""
# digits are stored as a string, for quick conversion to
# integer in the case that we've already computed enough
# digits; the stored digits should always be correct
# (truncated, not rounded to nearest).
if p < 0:
raise ValueError("p should be nonnegative")
if p >= len(self.digits):
# compute p+3, p+6, p+9, ... digits; continue until at
# least one of the extra digits is nonzero
extra = 3
while True:
# compute p+extra digits, correct to within 1ulp
M = 10**(p+extra+2)
digits = str(_div_nearest(_ilog(10*M, M), 100))
if digits[-extra:] != '0'*extra:
break
extra += 3
# keep all reliable digits so far; remove trailing zeros
# and next nonzero digit
self.digits = digits.rstrip('0')[:-1]
return int(self.digits[:p+1])
_log10_digits = _Log10Memoize().getdigits
def _iexp(x, M, L=8):
"""Given integers x and M, M > 0, such that x/M is small in absolute
value, compute an integer approximation to M*exp(x/M). For 0 <=
x/M <= 2.4, the absolute error in the result is bounded by 60 (and
is usually much smaller)."""
# Algorithm: to compute exp(z) for a real number z, first divide z
# by a suitable power R of 2 so that |z/2**R| < 2**-L. Then
# compute expm1(z/2**R) = exp(z/2**R) - 1 using the usual Taylor
# series
#
# expm1(x) = x + x**2/2! + x**3/3! + ...
#
# Now use the identity
#
# expm1(2x) = expm1(x)*(expm1(x)+2)
#
# R times to compute the sequence expm1(z/2**R),
# expm1(z/2**(R-1)), ... , exp(z/2), exp(z).
# Find R such that x/2**R/M <= 2**-L
R = _nbits((x<<L)//M)
# Taylor series. (2**L)**T > M
T = -int(-10*len(str(M))//(3*L))
y = _div_nearest(x, T)
Mshift = M<<R
for i in range(T-1, 0, -1):
y = _div_nearest(x*(Mshift + y), Mshift * i)
# Expansion
for k in range(R-1, -1, -1):
Mshift = M<<(k+2)
y = _div_nearest(y*(y+Mshift), Mshift)
return M+y
def _dexp(c, e, p):
"""Compute an approximation to exp(c*10**e), with p decimal places of
precision.
Returns integers d, f such that:
10**(p-1) <= d <= 10**p, and
(d-1)*10**f < exp(c*10**e) < (d+1)*10**f
In other words, d*10**f is an approximation to exp(c*10**e) with p
digits of precision, and with an error in d of at most 1. This is
almost, but not quite, the same as the error being < 1ulp: when d
= 10**(p-1) the error could be up to 10 ulp."""
# we'll call iexp with M = 10**(p+2), giving p+3 digits of precision
p += 2
# compute log(10) with extra precision = adjusted exponent of c*10**e
extra = max(0, e + len(str(c)) - 1)
q = p + extra
# compute quotient c*10**e/(log(10)) = c*10**(e+q)/(log(10)*10**q),
# rounding down
shift = e+q
if shift >= 0:
cshift = c*10**shift
else:
cshift = c//10**-shift
quot, rem = divmod(cshift, _log10_digits(q))
# reduce remainder back to original precision
rem = _div_nearest(rem, 10**extra)
# error in result of _iexp < 120; error after division < 0.62
return _div_nearest(_iexp(rem, 10**p), 1000), quot - p + 3
def _dpower(xc, xe, yc, ye, p):
"""Given integers xc, xe, yc and ye representing Decimals x = xc*10**xe and
y = yc*10**ye, compute x**y. Returns a pair of integers (c, e) such that:
10**(p-1) <= c <= 10**p, and
(c-1)*10**e < x**y < (c+1)*10**e
in other words, c*10**e is an approximation to x**y with p digits
of precision, and with an error in c of at most 1. (This is
almost, but not quite, the same as the error being < 1ulp: when c
== 10**(p-1) we can only guarantee error < 10ulp.)
We assume that: x is positive and not equal to 1, and y is nonzero.
"""
# Find b such that 10**(b-1) <= |y| <= 10**b
b = len(str(abs(yc))) + ye
# log(x) = lxc*10**(-p-b-1), to p+b+1 places after the decimal point
lxc = _dlog(xc, xe, p+b+1)
# compute product y*log(x) = yc*lxc*10**(-p-b-1+ye) = pc*10**(-p-1)
shift = ye-b
if shift >= 0:
pc = lxc*yc*10**shift
else:
pc = _div_nearest(lxc*yc, 10**-shift)
if pc == 0:
# we prefer a result that isn't exactly 1; this makes it
# easier to compute a correctly rounded result in __pow__
if ((len(str(xc)) + xe >= 1) == (yc > 0)): # if x**y > 1:
coeff, exp = 10**(p-1)+1, 1-p
else:
coeff, exp = 10**p-1, -p
else:
coeff, exp = _dexp(pc, -(p+1), p+1)
coeff = _div_nearest(coeff, 10)
exp += 1
return coeff, exp
def _log10_lb(c, correction = {
'1': 100, '2': 70, '3': 53, '4': 40, '5': 31,
'6': 23, '7': 16, '8': 10, '9': 5}):
"""Compute a lower bound for 100*log10(c) for a positive integer c."""
if c <= 0:
raise ValueError("The argument to _log10_lb should be nonnegative.")
str_c = str(c)
return 100*len(str_c) - correction[str_c[0]]
##### Helper Functions ####################################################
def _convert_other(other, raiseit=False, allow_float=False):
"""Convert other to Decimal.
Verifies that it's ok to use in an implicit construction.
If allow_float is true, allow conversion from float; this
is used in the comparison methods (__eq__ and friends).
"""
if isinstance(other, Decimal):
return other
if isinstance(other, int):
return Decimal(other)
if allow_float and isinstance(other, float):
return Decimal.from_float(other)
if raiseit:
raise TypeError("Unable to convert %s to Decimal" % other)
return NotImplemented
def _convert_for_comparison(self, other, equality_op=False):
"""Given a Decimal instance self and a Python object other, return
a pair (s, o) of Decimal instances such that "s op o" is
equivalent to "self op other" for any of the 6 comparison
operators "op".
"""
if isinstance(other, Decimal):
return self, other
# Comparison with a Rational instance (also includes integers):
# self op n/d <=> self*d op n (for n and d integers, d positive).
# A NaN or infinity can be left unchanged without affecting the
# comparison result.
if isinstance(other, _numbers.Rational):
if not self._is_special:
self = _dec_from_triple(self._sign,
str(int(self._int) * other.denominator),
self._exp)
return self, Decimal(other.numerator)
# Comparisons with float and complex types. == and != comparisons
# with complex numbers should succeed, returning either True or False
# as appropriate. Other comparisons return NotImplemented.
if equality_op and isinstance(other, _numbers.Complex) and other.imag == 0:
other = other.real
if isinstance(other, float):
context = getcontext()
if equality_op:
context.flags[FloatOperation] = 1
else:
context._raise_error(FloatOperation,
"strict semantics for mixing floats and Decimals are enabled")
return self, Decimal.from_float(other)
return NotImplemented, NotImplemented
##### Setup Specific Contexts ############################################
# The default context prototype used by Context()
# Is mutable, so that new contexts can have different default values
DefaultContext = Context(
prec=17, rounding=ROUND_HALF_EVEN,
traps=[DivisionByZero, Overflow, InvalidOperation],
flags=[],
Emax=308,
Emin=-324,
capitals=1,
clamp=0
)
# Pre-made alternate contexts offered by the specification
# Don't change these; the user should be able to select these
# contexts and be able to reproduce results from other implementations
# of the spec.
BasicContext = Context(
prec=9, rounding=ROUND_HALF_UP,
traps=[DivisionByZero, Overflow, InvalidOperation, Clamped, Underflow],
flags=[],
)
ExtendedContext = Context(
prec=9, rounding=ROUND_HALF_EVEN,
traps=[],
flags=[],
)
##### crud for parsing strings #############################################
#
# Regular expression used for parsing numeric strings. Additional
# comments:
#
# 1. Uncomment the two '\s*' lines to allow leading and/or trailing
# whitespace. But note that the specification disallows whitespace in
# a numeric string.
#
# 2. For finite numbers (not infinities and NaNs) the body of the
# number between the optional sign and the optional exponent must have
# at least one decimal digit, possibly after the decimal point. The
# lookahead expression '(?=\d|\.\d)' checks this.
#import re
#_parser = re.compile(r""" # A numeric string consists of:
# \s*
# (?P<sign>[-+])? # an optional sign, followed by either...
# (
# (?=\d|\.\d) # ...a number (with at least one digit)
# (?P<int>\d*) # having a (possibly empty) integer part
# (\.(?P<frac>\d*))? # followed by an optional fractional part
# (E(?P<exp>[-+]?\d+))? # followed by an optional exponent, or...
# |
# Inf(inity)? # ...an infinity, or...
# |
# (?P<signal>s)? # ...an (optionally signaling)
# NaN # NaN
# (?P<diag>\d*) # with (possibly empty) diagnostic info.
# )
# \s*
# \Z
#""", re.VERBOSE | re.IGNORECASE).match
import _jsre as re
_all_zeros = re.compile('0*$').match
_exact_half = re.compile('50*$').match
##### PEP3101 support functions ##############################################
# The functions in this section have little to do with the Decimal
# class, and could potentially be reused or adapted for other pure
# Python numeric classes that want to implement __format__
#
# A format specifier for Decimal looks like:
#
# [[fill]align][sign][#][0][minimumwidth][,][.precision][type]
#_parse_format_specifier_regex = re.compile(r"""\A
#(?:
# (?P<fill>.)?
# (?P<align>[<>=^])
#)?
#(?P<sign>[-+ ])?
#(?P<alt>\#)?
#(?P<zeropad>0)?
#(?P<minimumwidth>(?!0)\d+)?
#(?P<thousands_sep>,)?
#(?:\.(?P<precision>0|(?!0)\d+))?
#(?P<type>[eEfFgGn%])?
#\Z
#""", re.VERBOSE|re.DOTALL)
del re
# The locale module is only needed for the 'n' format specifier. The
# rest of the PEP 3101 code functions quite happily without it, so we
# don't care too much if locale isn't present.
try:
import locale as _locale
except ImportError:
pass
def _parse_format_specifier(format_spec, _localeconv=None):
"""Parse and validate a format specifier.
Turns a standard numeric format specifier into a dict, with the
following entries:
fill: fill character to pad field to minimum width
align: alignment type, either '<', '>', '=' or '^'
sign: either '+', '-' or ' '
minimumwidth: nonnegative integer giving minimum width
zeropad: boolean, indicating whether to pad with zeros
thousands_sep: string to use as thousands separator, or ''
grouping: grouping for thousands separators, in format
used by localeconv
decimal_point: string to use for decimal point
precision: nonnegative integer giving precision, or None
type: one of the characters 'eEfFgG%', or None
"""
m = _parse_format_specifier_regex.match(format_spec)
if m is None:
raise ValueError("Invalid format specifier: " + format_spec)
# get the dictionary
format_dict = m.groupdict()
# zeropad; defaults for fill and alignment. If zero padding
# is requested, the fill and align fields should be absent.
fill = format_dict['fill']
align = format_dict['align']
format_dict['zeropad'] = (format_dict['zeropad'] is not None)
if format_dict['zeropad']:
if fill is not None:
raise ValueError("Fill character conflicts with '0'"
" in format specifier: " + format_spec)
if align is not None:
raise ValueError("Alignment conflicts with '0' in "
"format specifier: " + format_spec)
format_dict['fill'] = fill or ' '
# PEP 3101 originally specified that the default alignment should
# be left; it was later agreed that right-aligned makes more sense
# for numeric types. See http://bugs.python.org/issue6857.
format_dict['align'] = align or '>'
# default sign handling: '-' for negative, '' for positive
if format_dict['sign'] is None:
format_dict['sign'] = '-'
# minimumwidth defaults to 0; precision remains None if not given
format_dict['minimumwidth'] = int(format_dict['minimumwidth'] or '0')
if format_dict['precision'] is not None:
format_dict['precision'] = int(format_dict['precision'])
# if format type is 'g' or 'G' then a precision of 0 makes little
# sense; convert it to 1. Same if format type is unspecified.
if format_dict['precision'] == 0:
if format_dict['type'] is None or format_dict['type'] in 'gGn':
format_dict['precision'] = 1
# determine thousands separator, grouping, and decimal separator, and
# add appropriate entries to format_dict
if format_dict['type'] == 'n':
# apart from separators, 'n' behaves just like 'g'
format_dict['type'] = 'g'
if _localeconv is None:
_localeconv = _locale.localeconv()
if format_dict['thousands_sep'] is not None:
raise ValueError("Explicit thousands separator conflicts with "
"'n' type in format specifier: " + format_spec)
format_dict['thousands_sep'] = _localeconv['thousands_sep']
format_dict['grouping'] = _localeconv['grouping']
format_dict['decimal_point'] = _localeconv['decimal_point']
else:
if format_dict['thousands_sep'] is None:
format_dict['thousands_sep'] = ''
format_dict['grouping'] = [3, 0]
format_dict['decimal_point'] = '.'
return format_dict
def _format_align(sign, body, spec):
"""Given an unpadded, non-aligned numeric string 'body' and sign
string 'sign', add padding and alignment conforming to the given
format specifier dictionary 'spec' (as produced by
parse_format_specifier).
"""
# how much extra space do we have to play with?
minimumwidth = spec['minimumwidth']
fill = spec['fill']
padding = fill*(minimumwidth - len(sign) - len(body))
align = spec['align']
if align == '<':
result = sign + body + padding
elif align == '>':
result = padding + sign + body
elif align == '=':
result = sign + padding + body
elif align == '^':
half = len(padding)//2
result = padding[:half] + sign + body + padding[half:]
else:
raise ValueError('Unrecognised alignment field')
return result
def _group_lengths(grouping):
"""Convert a localeconv-style grouping into a (possibly infinite)
iterable of integers representing group lengths.
"""
# The result from localeconv()['grouping'], and the input to this
# function, should be a list of integers in one of the
# following three forms:
#
# (1) an empty list, or
# (2) nonempty list of positive integers + [0]
# (3) list of positive integers + [locale.CHAR_MAX], or
from itertools import chain, repeat
if not grouping:
return []
elif grouping[-1] == 0 and len(grouping) >= 2:
return chain(grouping[:-1], repeat(grouping[-2]))
elif grouping[-1] == _locale.CHAR_MAX:
return grouping[:-1]
else:
raise ValueError('unrecognised format for grouping')
def _insert_thousands_sep(digits, spec, min_width=1):
"""Insert thousands separators into a digit string.
spec is a dictionary whose keys should include 'thousands_sep' and
'grouping'; typically it's the result of parsing the format
specifier using _parse_format_specifier.
The min_width keyword argument gives the minimum length of the
result, which will be padded on the left with zeros if necessary.
If necessary, the zero padding adds an extra '0' on the left to
avoid a leading thousands separator. For example, inserting
commas every three digits in '123456', with min_width=8, gives
'0,123,456', even though that has length 9.
"""
sep = spec['thousands_sep']
grouping = spec['grouping']
groups = []
for l in _group_lengths(grouping):
if l <= 0:
raise ValueError("group length should be positive")
# max(..., 1) forces at least 1 digit to the left of a separator
l = min(max(len(digits), min_width, 1), l)
groups.append('0'*(l - len(digits)) + digits[-l:])
digits = digits[:-l]
min_width -= l
if not digits and min_width <= 0:
break
min_width -= len(sep)
else:
l = max(len(digits), min_width, 1)
groups.append('0'*(l - len(digits)) + digits[-l:])
return sep.join(reversed(groups))
def _format_sign(is_negative, spec):
"""Determine sign character."""
if is_negative:
return '-'
elif spec['sign'] in ' +':
return spec['sign']
else:
return ''
def _format_number(is_negative, intpart, fracpart, exp, spec):
"""Format a number, given the following data:
is_negative: true if the number is negative, else false
intpart: string of digits that must appear before the decimal point
fracpart: string of digits that must come after the point
exp: exponent, as an integer
spec: dictionary resulting from parsing the format specifier
This function uses the information in spec to:
insert separators (decimal separator and thousands separators)
format the sign
format the exponent
add trailing '%' for the '%' type
zero-pad if necessary
fill and align if necessary
"""
sign = _format_sign(is_negative, spec)
if fracpart or spec['alt']:
fracpart = spec['decimal_point'] + fracpart
if exp != 0 or spec['type'] in 'eE':
echar = {'E': 'E', 'e': 'e', 'G': 'E', 'g': 'e'}[spec['type']]
fracpart += "{0}{1:+}".format(echar, exp)
if spec['type'] == '%':
fracpart += '%'
if spec['zeropad']:
min_width = spec['minimumwidth'] - len(fracpart) - len(sign)
else:
min_width = 0
intpart = _insert_thousands_sep(intpart, spec, min_width)
return _format_align(sign, intpart+fracpart, spec)
##### Useful Constants (internal use only) ################################
# Reusable defaults
_Infinity = Decimal('Inf')
_NegativeInfinity = Decimal('-Inf')
_NaN = Decimal('NaN')
_Zero = Decimal(0)
_One = Decimal(1)
_NegativeOne = Decimal(-1)
# _SignedInfinity[sign] is infinity w/ that sign
_SignedInfinity = (_Infinity, _NegativeInfinity)
# Constants related to the hash implementation; hash(x) is based
# on the reduction of x modulo _PyHASH_MODULUS
_PyHASH_MODULUS = sys.hash_info.modulus
# hash values to use for positive and negative infinities, and nans
_PyHASH_INF = sys.hash_info.inf
_PyHASH_NAN = sys.hash_info.nan
# _PyHASH_10INV is the inverse of 10 modulo the prime _PyHASH_MODULUS
_PyHASH_10INV = pow(10, _PyHASH_MODULUS - 2, _PyHASH_MODULUS)
del sys
try:
import _decimal
except ImportError:
pass
else:
s1 = set(dir())
s2 = set(dir(_decimal))
for name in s1 - s2:
del globals()[name]
del s1, s2, name
from _decimal import *
if __name__ == '__main__':
import doctest, decimal
doctest.testmod(decimal) | unknown | codeparrot/codeparrot-clean | ||
# Copyright (c) 2014 Rackspace, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import uuid
try:
import ordereddict as collections
except ImportError: # pragma: no cover
import collections # pragma: no cover
import ddt
import mock
from oslo_config import cfg
import testtools
from poppy.model.helpers import provider_details
from poppy.storage.cassandra import driver
from poppy.storage.cassandra import services
from poppy.transport.pecan.models.request import service as req_service
from tests.unit import base
@ddt.ddt
class CassandraStorageServiceTests(base.TestCase):
def setUp(self):
super(CassandraStorageServiceTests, self).setUp()
# mock arguments to use
self.project_id = '123456'
self.service_id = uuid.uuid4()
self.service_name = 'mocksite'
# create mocked config and driver
conf = cfg.ConfigOpts()
conf.register_opt(
cfg.StrOpt(
'datacenter',
default='',
help='datacenter where the C* cluster hosted'))
conf.register_opts(driver.CASSANDRA_OPTIONS,
group=driver.CASSANDRA_GROUP)
cassandra_driver = driver.CassandraStorageDriver(conf)
migrations_patcher = mock.patch(
'cdeploy.migrator.Migrator'
)
migrations_patcher.start()
self.addCleanup(migrations_patcher.stop)
cluster_patcher = mock.patch('cassandra.cluster.Cluster')
self.mock_cluster = cluster_patcher.start()
self.mock_session = self.mock_cluster().connect()
self.addCleanup(cluster_patcher.stop)
# stubbed cassandra driver
self.sc = services.ServicesController(cassandra_driver)
@ddt.file_data('data_get_service.json')
def test_get_service(self, value):
# mock the response from cassandra
value[0]['service_id'] = self.service_id
self.mock_session.execute.return_value = value
actual_response = self.sc.get_service(self.project_id, self.service_id)
# TODO(amitgandhinz): assert the response
# matches the expectation (using jsonschema)
self.assertEqual(str(actual_response.service_id), str(self.service_id))
@ddt.file_data('data_get_service.json')
def test_update_state(self, value):
details = value[0]['provider_details']
new_details = {}
for provider, detail in list(details.items()):
detail = json.loads(detail)
detail['status'] = 'deployed'
detail['access_urls'] = [
{
'provider_url': "{0}.com".format(provider.lower()),
'domain': detail['access_urls'][0]
}
]
new_details[provider] = json.dumps(detail)
value[0]['provider_details'] = new_details
# mock the response from cassandra
value[0]['service_id'] = self.service_id
self.mock_session.execute.return_value = [value[0]]
expected_obj = self.sc.get_service(self.project_id, self.service_id)
actual_obj = self.sc.update_state(self.project_id, self.service_id,
'deployed')
self.assertEqual(expected_obj.service_id, actual_obj.service_id)
def test_get_service_with_exception(self):
# mock the response from cassandra
self.mock_session.execute.return_value = []
self.assertRaises(
ValueError,
self.sc.get_service,
self.project_id,
self.service_id
)
@ddt.file_data('../data/data_create_service.json')
@mock.patch.object(services.ServicesController,
'domain_exists_elsewhere',
return_value=False)
def test_create_service(self, value, mock_check):
service_obj = req_service.load_from_json(value)
responses = self.sc.create_service(self.project_id, service_obj)
# Expect the response to be None as there are no providers passed
# into the driver to respond to this call
self.assertEqual(responses, None)
# TODO(amitgandhinz): need to validate the create to cassandra worked.
@ddt.file_data('../data/data_create_service.json')
@mock.patch.object(services.ServicesController,
'domain_exists_elsewhere',
return_value=True)
def test_create_service_exist(self, value, mock_check):
service_obj = req_service.load_from_json(value)
self.sc.get = mock.Mock(return_value=service_obj)
self.assertRaises(
ValueError,
self.sc.create_service,
self.project_id, service_obj
)
@ddt.file_data('data_list_services.json')
def test_list_services(self, value):
# mock the response from cassandra
value[0]['project_id'] = self.project_id
self.mock_session.prepare.return_value = mock.Mock()
self.mock_session.execute.return_value = value
actual_response = self.sc.get_services(self.project_id, None, None)
# TODO(amitgandhinz): assert the response
# matches the expectation (using jsonschema)
self.assertEqual(actual_response[0].name, "mocksite")
self.assertEqual(actual_response[0].project_id, self.project_id)
@ddt.file_data('data_get_service.json')
def test_delete_service(self, value):
details = value[0]['provider_details']
new_details = {}
for provider, detail in list(details.items()):
detail = json.loads(detail)
detail['status'] = 'deployed'
detail['access_urls'] = [
{
'provider_url': "{0}.com".format(provider.lower()),
'domain': detail['access_urls'][0]
}
]
new_details[provider] = json.dumps(detail)
value[0]['provider_details'] = new_details
# mock the response from cassandra
value[0]['service_id'] = self.service_id
# self.mock_session.execute.return_value = value
def mock_execute_side_effect(*args):
if args[0].query_string == services.CQL_GET_SERVICE:
return [value[0]]
else:
return None
self.mock_session.execute.side_effect = mock_execute_side_effect
self.sc.delete_service(
self.project_id,
self.service_id
)
# TODO(isaacm): Add assertions on queries called
def test_delete_service_no_result(self):
# mock the response from cassandra
self.mock_session.execute.return_value = iter([{}])
actual_response = self.sc.delete_service(
self.project_id,
self.service_id
)
# Expect the response to be None as there are no providers passed
# into the driver to respond to this call
self.assertEqual(actual_response, None)
@ddt.file_data('../data/data_update_service.json')
@mock.patch.object(services.ServicesController,
'domain_exists_elsewhere',
return_value=False)
@mock.patch.object(services.ServicesController,
'set_service_provider_details')
def test_update_service(self, service_json,
mock_set_service_provider_details,
mock_check):
with mock.patch.object(
services.ServicesController,
'get_provider_details') as mock_provider_det:
mock_provider_det.return_value = {
"MaxCDN": "{\"id\": 11942, \"access_urls\": "
"[{\"provider_url\": \"maxcdn.provider.com\", "
"\"domain\": \"xk.cd\"}], "
"\"domains_certificate_status\":"
"{\"mypullzone.com\": "
"\"failed\"} }",
}
self.mock_session.execute.return_value = iter([{}])
service_obj = req_service.load_from_json(service_json)
actual_response = self.sc.update_service(
self.project_id,
self.service_id,
service_obj
)
# Expect the response to be None as there are no
# providers passed into the driver to respond to this call
self.assertEqual(actual_response, None)
@ddt.file_data('data_provider_details.json')
def test_get_provider_details(self, provider_details_json):
# mock the response from cassandra
self.mock_session.execute.return_value = [
{'provider_details': provider_details_json}
]
actual_response = self.sc.get_provider_details(
self.project_id,
self.service_id
)
self.assertTrue("MaxCDN" in actual_response)
self.assertTrue("Mock" in actual_response)
self.assertTrue("CloudFront" in actual_response)
self.assertTrue("Fastly" in actual_response)
@ddt.file_data('data_provider_details.json')
def test_get_provider_details_value_error(self, provider_details_json):
# mock the response from cassandra
self.mock_session.execute.return_value = []
with testtools.ExpectedException(ValueError):
self.sc.get_provider_details(
self.project_id,
self.service_id
)
@ddt.file_data('data_provider_details.json')
def test_update_provider_details(self, provider_details_json):
provider_details_dict = {}
for k, v in provider_details_json.items():
provider_detail_dict = json.loads(v)
provider_details_dict[k] = provider_details.ProviderDetail(
provider_service_id=(
provider_detail_dict["id"]),
access_urls=provider_detail_dict["access_urls"],
domains_certificate_status=provider_detail_dict.get(
"domains_certificate_status", {}))
# mock the response from cassandra
self.mock_session.execute.return_value = None
# this is for update_provider_details unittest code coverage
arg_provider_details_dict = {}
status = None
for provider_name in provider_details_dict:
the_provider_detail_dict = collections.OrderedDict()
the_provider_detail_dict["id"] = (
provider_details_dict[provider_name].provider_service_id)
the_provider_detail_dict["access_urls"] = (
provider_details_dict[provider_name].access_urls)
the_provider_detail_dict["status"] = (
provider_details_dict[provider_name].status)
status = the_provider_detail_dict["status"]
the_provider_detail_dict["name"] = (
provider_details_dict[provider_name].name)
the_provider_detail_dict["domains_certificate_status"] = (
provider_details_dict[provider_name].
domains_certificate_status.to_dict())
the_provider_detail_dict["error_info"] = (
provider_details_dict[provider_name].error_info)
the_provider_detail_dict["error_message"] = (
provider_details_dict[provider_name].error_message)
arg_provider_details_dict[provider_name] = json.dumps(
the_provider_detail_dict)
provider_details_args = {
'project_id': self.project_id,
'service_id': self.service_id,
'provider_details': arg_provider_details_dict
}
status_args = {
'status': status,
'project_id': self.project_id,
'service_id': self.service_id
}
# This is to verify mock has been called with the correct arguments
def assert_mock_execute_args(*args):
if args[0].query_string == services.CQL_UPDATE_PROVIDER_DETAILS:
self.assertEqual(args[1], provider_details_args)
elif args[0].query_string == services.CQL_SET_SERVICE_STATUS:
self.assertEqual(args[1], status_args)
self.mock_session.execute.side_effect = assert_mock_execute_args
with mock.patch.object(
services.ServicesController,
'get_provider_details') as mock_provider_det:
mock_provider_det.return_value = {
"MaxCDN": # "{\"id\": 11942, \"access_urls\": "
# "[{\"provider_url\": \"maxcdn.provider.com\", "
# "\"domain\": \"xk.cd\"}], "
# "\"domains_certificate_status\":"
# "{\"mypullzone.com\": "
# "\"failed\"} }",
provider_details.ProviderDetail(
provider_service_id='{}',
access_urls=[]
)
}
self.sc.update_provider_details(
self.project_id,
self.service_id,
provider_details_dict
)
@ddt.file_data('data_provider_details.json')
def test_update_provider_details_domain_deleted(
self,
provider_details_json,
):
provider_details_dict = {}
for k, v in provider_details_json.items():
provider_detail_dict = json.loads(v)
provider_details_dict[k] = provider_details.ProviderDetail(
provider_service_id=(
provider_detail_dict["id"]),
access_urls=provider_detail_dict["access_urls"],
domains_certificate_status=provider_detail_dict.get(
"domains_certificate_status", {}))
# mock the response from cassandra
self.mock_session.execute.return_value = None
# this is for update_provider_details unittest code coverage
arg_provider_details_dict = {}
status = None
for provider_name in provider_details_dict:
the_provider_detail_dict = collections.OrderedDict()
the_provider_detail_dict["id"] = (
provider_details_dict[provider_name].provider_service_id)
the_provider_detail_dict["access_urls"] = (
provider_details_dict[provider_name].access_urls)
the_provider_detail_dict["status"] = (
provider_details_dict[provider_name].status)
status = the_provider_detail_dict["status"]
the_provider_detail_dict["name"] = (
provider_details_dict[provider_name].name)
the_provider_detail_dict["domains_certificate_status"] = (
provider_details_dict[provider_name].
domains_certificate_status.to_dict())
the_provider_detail_dict["error_info"] = (
provider_details_dict[provider_name].error_info)
the_provider_detail_dict["error_message"] = (
provider_details_dict[provider_name].error_message)
arg_provider_details_dict[provider_name] = json.dumps(
the_provider_detail_dict)
provider_details_args = {
'project_id': self.project_id,
'service_id': self.service_id,
'provider_details': arg_provider_details_dict
}
status_args = {
'status': status,
'project_id': self.project_id,
'service_id': self.service_id
}
# This is to verify mock has been called with the correct arguments
def assert_mock_execute_args(*args):
if args[0].query_string == services.CQL_UPDATE_PROVIDER_DETAILS:
self.assertEqual(args[1], provider_details_args)
elif args[0].query_string == services.CQL_SET_SERVICE_STATUS:
self.assertEqual(args[1], status_args)
self.mock_session.execute.side_effect = assert_mock_execute_args
with mock.patch.object(
services.ServicesController,
'get_provider_details') as mock_provider_det:
mock_provider_det.return_value = {
"MaxCDN": provider_details.ProviderDetail(
provider_service_id=(
"{\"id\": 11942, \"access_urls\": "
"[{\"provider_url\": \"maxcdn.provider.com\", "
"\"domain\": \"xk2.cd\"}], "
"\"domains_certificate_status\":"
"{\"mypullzone.com\": "
"\"failed\"} }"
),
access_urls=[
{
"provider_url": "fastly.provider.com",
"domain": "xk2.cd"
}
]
)
}
self.sc.update_provider_details(
self.project_id,
self.service_id,
provider_details_dict
)
delete_queries = []
deleted_domains = []
for query_mock_call in self.sc.session.execute.mock_calls:
name, args, kwargs = query_mock_call
for arg in args:
if hasattr(arg, 'query_string'):
if (
arg.query_string ==
services.CQL_DELETE_PROVIDER_URL
):
delete_queries.append(query_mock_call)
_, delete_query_args = args
deleted_domains.append(
delete_query_args["domain_name"])
self.assertEqual(1, len(delete_queries))
self.assertEqual(['xk2.cd'], deleted_domains)
self.assertTrue(self.sc.session.execute.called)
def test_update_provider_details_new_provider_details_empty(self):
provider_details_dict = {}
# mock the response from cassandra
self.mock_session.execute.return_value = None
# this is for update_provider_details unittest code coverage
arg_provider_details_dict = {}
status = None
provider_details_args = {
'project_id': self.project_id,
'service_id': self.service_id,
'provider_details': arg_provider_details_dict
}
status_args = {
'status': status,
'project_id': self.project_id,
'service_id': self.service_id
}
# This is to verify mock has been called with the correct arguments
def assert_mock_execute_args(*args):
if args[0].query_string == services.CQL_UPDATE_PROVIDER_DETAILS:
self.assertEqual(args[1], provider_details_args)
elif args[0].query_string == services.CQL_SET_SERVICE_STATUS:
self.assertEqual(args[1], status_args)
self.mock_session.execute.side_effect = assert_mock_execute_args
with mock.patch.object(
services.ServicesController,
'get_provider_details') as mock_provider_det:
mock_provider_det.return_value = {
"MaxCDN": provider_details.ProviderDetail(
provider_service_id=(
"{\"id\": 11942, \"access_urls\": "
"[{\"provider_url\": \"maxcdn.provider.com\", "
"\"domain\": \"xk2.cd\"}], "
"\"domains_certificate_status\":"
"{\"mypullzone.com\": "
"\"failed\"} }"
),
access_urls=[
{
"provider_url": "fastly.provider.com",
"domain": "xk2.cd"
}
]
)
}
self.sc.update_provider_details(
self.project_id,
self.service_id,
provider_details_dict
)
delete_queries = []
deleted_domains = []
for query_mock_call in self.sc.session.execute.mock_calls:
name, args, kwargs = query_mock_call
for arg in args:
if hasattr(arg, 'query_string'):
if (
arg.query_string ==
services.CQL_DELETE_PROVIDER_URL
):
delete_queries.append(query_mock_call)
_, delete_query_args = args
deleted_domains.append(
delete_query_args["domain_name"])
self.assertEqual(1, len(delete_queries))
self.assertEqual(['xk2.cd'], deleted_domains)
self.assertTrue(self.sc.session.execute.called)
def test_session(self):
session = self.sc.session
self.assertNotEqual(session, None)
def test_domain_exists_elsewhere_true(self):
self.mock_session.execute.return_value = [
{
'service_id': 'service_id',
'project_id': 'project_id',
'domain_name': 'domain_name'
}
]
self.assertTrue(
self.sc.domain_exists_elsewhere('domain_name', 'new_service_id'))
def test_domain_exists_elsewhere_false(self):
self.mock_session.execute.return_value = [
{
'service_id': 'service_id',
'project_id': 'project_id',
'domain_name': 'domain_name'
}
]
self.assertFalse(
self.sc.domain_exists_elsewhere('domain_name', 'service_id'))
def test_domain_exists_elsewhere_no_results(self):
self.mock_session.execute.return_value = []
self.assertFalse(
self.sc.domain_exists_elsewhere('domain_name', 'new_service_id'))
def test_domain_exists_elsewhere_value_error(self):
self.mock_session.execute.side_effect = ValueError(
'Mock -- Something went wrong!'
)
self.assertFalse(
self.sc.domain_exists_elsewhere('domain_name', 'new_service_id'))
def test_get_service_count_positive(self):
self.mock_session.execute.return_value = [
{
'count': 1
}
]
self.assertEqual(1, self.sc.get_service_count('project_id'))
@ddt.file_data('data_list_services.json')
def test_get_services_marker_not_none(self, data):
self.mock_session.execute.return_value = data
results = self.sc.get_services('project_id', uuid.uuid4(), 1)
self.assertEqual(data[0]["project_id"], results[0].project_id)
def test_get_services_by_status_positive(self):
self.mock_session.execute.return_value = [
{'service_id': 1},
{'service_id': 2},
{'service_id': 3}
]
self.assertEqual(
[
{'service_id': '1'},
{'service_id': '2'},
{'service_id': '3'}
],
self.sc.get_services_by_status('project_id')
)
def test_delete_services_by_status_positive(self):
try:
self.sc.delete_services_by_status(
'project_id', uuid.uuid4(), 'status'
)
except Exception as e:
self.fail(e)
def test_get_domains_by_provider_url_positive(self):
self.mock_session.execute.return_value = [
{'domain_name': 'www.xyz.com'},
]
self.assertEqual([{'domain_name': 'www.xyz.com'}],
self.sc.get_domains_by_provider_url('provider_url'))
def test_delete_provider_url_positive(self):
try:
self.sc.delete_provider_url('provider_url', 'domain_name')
except Exception as e:
self.fail(e)
def test_get_service_limit_positive(self):
self.mock_session.execute.return_value = [
{'project_limit': 999}
]
self.assertEqual(999, self.sc.get_service_limit('project_id'))
def test_get_service_limit_empty_result(self):
self.mock_session.execute.return_value = []
self.assertEqual(
self.sc._driver.max_services_conf.max_services_per_project,
self.sc.get_service_limit('project_id'))
def test_get_service_limit_value_error(self):
self.mock_session.execute.side_effect = ValueError(
'Mock -- Something went wrong!'
)
self.assertEqual(
self.sc._driver.max_services_conf.max_services_per_project,
self.sc.get_service_limit('project_id')
)
def test_set_service_limit_positive(self):
try:
self.sc.set_service_limit('project_id', 'project_limit')
except Exception as e:
self.fail(e)
@ddt.file_data('data_list_services.json')
def test_get_service_details_by_domain_name(self, data):
service_id = uuid.uuid4()
self.mock_session.execute.side_effect = [
[{
'project_id': 'project_id',
'service_id': service_id,
'domain_name': 'domain_name'
}],
[data[0]]
]
results = self.sc.get_service_details_by_domain_name('domain_name')
self.assertEqual(data[0]["project_id"], results.project_id)
@ddt.file_data('data_list_services.json')
def test_get_service_details_by_domain_name_domain_not_present(
self, data):
self.mock_session.execute.side_effect = [
[{
'project_id': 'proj_id', # differs from arg to func
'service_id': uuid.uuid4(),
'domain_name': 'domain_name'
}],
[data[0]]
]
with testtools.ExpectedException(ValueError):
self.sc.get_service_details_by_domain_name(
'domain_name',
project_id='project_id'
)
@ddt.file_data('data_provider_details.json')
def test_set_service_provider_details(self, data):
service_id = uuid.uuid4()
def mock_execute_side_effect(*args):
if args[0].query_string == services.CQL_GET_PROVIDER_DETAILS:
return [{'provider_details': data}]
else:
return None
self.mock_session.execute.side_effect = mock_execute_side_effect
self.sc.set_service_provider_details(
'project_id', service_id, 'deployed'
)
[
update_service_status,
get_provider_details,
_,
update_provider_details,
_,
_,
_,
_,
_,
] = self.mock_session.execute.mock_calls
self.assertEqual(services.CQL_SET_SERVICE_STATUS,
update_service_status[1][0].query_string)
self.assertEqual(services.CQL_GET_PROVIDER_DETAILS,
get_provider_details[1][0].query_string)
self.assertEqual(services.CQL_UPDATE_PROVIDER_DETAILS,
update_provider_details[1][0].query_string) | unknown | codeparrot/codeparrot-clean | ||
#!/usr/bin/python
# -*- coding: utf-8 -*-
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
module: campfire
version_added: "1.2"
short_description: Send a message to Campfire
description:
- Send a message to Campfire.
- Messages with newlines will result in a "Paste" message being sent.
options:
subscription:
description:
- The subscription name to use.
required: true
token:
description:
- API token.
required: true
room:
description:
- Room number to which the message should be sent.
required: true
msg:
description:
- The message body.
required: true
notify:
description:
- Send a notification sound before the message.
required: false
choices: ["56k", "bell", "bezos", "bueller", "clowntown",
"cottoneyejoe", "crickets", "dadgummit", "dangerzone",
"danielsan", "deeper", "drama", "greatjob", "greyjoy",
"guarantee", "heygirl", "horn", "horror",
"inconceivable", "live", "loggins", "makeitso", "noooo",
"nyan", "ohmy", "ohyeah", "pushit", "rimshot",
"rollout", "rumble", "sax", "secret", "sexyback",
"story", "tada", "tmyk", "trololo", "trombone", "unix",
"vuvuzela", "what", "whoomp", "yeah", "yodel"]
# informational: requirements for nodes
requirements: [ ]
author: "Adam Garside (@fabulops)"
'''
EXAMPLES = '''
- campfire: subscription=foo token=12345 room=123 msg="Task completed."
- campfire: subscription=foo token=12345 room=123 notify=loggins
msg="Task completed ... with feeling."
'''
import cgi
def main():
module = AnsibleModule(
argument_spec=dict(
subscription=dict(required=True),
token=dict(required=True, no_log=True),
room=dict(required=True),
msg=dict(required=True),
notify=dict(required=False,
choices=["56k", "bell", "bezos", "bueller",
"clowntown", "cottoneyejoe",
"crickets", "dadgummit", "dangerzone",
"danielsan", "deeper", "drama",
"greatjob", "greyjoy", "guarantee",
"heygirl", "horn", "horror",
"inconceivable", "live", "loggins",
"makeitso", "noooo", "nyan", "ohmy",
"ohyeah", "pushit", "rimshot",
"rollout", "rumble", "sax", "secret",
"sexyback", "story", "tada", "tmyk",
"trololo", "trombone", "unix",
"vuvuzela", "what", "whoomp", "yeah",
"yodel"]),
),
supports_check_mode=False
)
subscription = module.params["subscription"]
token = module.params["token"]
room = module.params["room"]
msg = module.params["msg"]
notify = module.params["notify"]
URI = "https://%s.campfirenow.com" % subscription
NSTR = "<message><type>SoundMessage</type><body>%s</body></message>"
MSTR = "<message><body>%s</body></message>"
AGENT = "Ansible/1.2"
# Hack to add basic auth username and password the way fetch_url expects
module.params['url_username'] = token
module.params['url_password'] = 'X'
target_url = '%s/room/%s/speak.xml' % (URI, room)
headers = {'Content-Type': 'application/xml',
'User-agent': AGENT}
# Send some audible notification if requested
if notify:
response, info = fetch_url(module, target_url, data=NSTR % cgi.escape(notify), headers=headers)
if info['status'] not in [200, 201]:
module.fail_json(msg="unable to send msg: '%s', campfire api"
" returned error code: '%s'" %
(notify, info['status']))
# Send the message
response, info = fetch_url(module, target_url, data=MSTR %cgi.escape(msg), headers=headers)
if info['status'] not in [200, 201]:
module.fail_json(msg="unable to send msg: '%s', campfire api"
" returned error code: '%s'" %
(msg, info['status']))
module.exit_json(changed=True, room=room, msg=msg, notify=notify)
# import module snippets
from ansible.module_utils.basic import *
from ansible.module_utils.urls import *
if __name__ == '__main__':
main() | unknown | codeparrot/codeparrot-clean | ||
# Copyright (C) 2012,2013
# Max Planck Institute for Polymer Research
# Copyright (C) 2008,2009,2010,2011
# Max-Planck-Institute for Polymer Research & Fraunhofer SCAI
#
# This file is part of ESPResSo++.
#
# ESPResSo++ is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# ESPResSo++ is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
r"""
***************************************
**ExtAnalyze** - Integrator Extension
***************************************
This class can be used to execute nearly all analysis objects
within the main integration loop which allows to automatically
accumulate time averages (with standard deviation error bars).
Example Usage:
-----------------
>>> pt = espressopp.analysis.PressureTensor(system)
>>> extension_pt = espressopp.integrator.ExtAnalyze(pt , interval=100)
>>> integrator.addExtension(extension_pt)
>>> integrator.run(10000)
>>>
>>> pt_ave = pt.getAverageValue()
>>> print "average Pressure Tensor = ", pt_ave[:6]
>>> print " std deviation = ", pt_ave[6:]
>>> print "number of measurements = ", pt.getNumberOfMeasurements()
.. function:: espressopp.integrator.ExtAnalyze(action_obj, interval)
:param action_obj:
:param interval: (default: 1)
:type action_obj:
:type interval: int
"""
from espressopp.esutil import cxxinit
from espressopp import pmi
from espressopp.integrator.Extension import *
from _espressopp import integrator_ExtAnalyze
class ExtAnalyzeLocal(ExtensionLocal, integrator_ExtAnalyze):
def __init__(self, action_obj, interval=1):
if not (pmi._PMIComm and pmi._PMIComm.isActive()) or pmi._MPIcomm.rank in pmi._PMIComm.getMPIcpugroup():
cxxinit(self, integrator_ExtAnalyze, action_obj, interval)
if pmi.isController :
class ExtAnalyze(Extension):
__metaclass__ = pmi.Proxy
pmiproxydefs = dict(
cls = 'espressopp.integrator.ExtAnalyzeLocal',
) | unknown | codeparrot/codeparrot-clean | ||
/**
* Copyright (c) Meta Platforms, Inc. and affiliates.
*
* This source code is licensed under the MIT license found in the
* LICENSE file in the root directory of this source tree.
*/
import {CompilerDiagnostic, CompilerError} from '..';
import {ErrorCategory} from '../CompilerError';
import {HIRFunction} from '../HIR';
import {getFunctionCallSignature} from '../Inference/InferMutationAliasingEffects';
import {Result} from '../Utils/Result';
/**
* Checks that known-impure functions are not called during render. Examples of invalid functions to
* call during render are `Math.random()` and `Date.now()`. Users may extend this set of
* impure functions via a module type provider and specifying functions with `impure: true`.
*
* TODO: add best-effort analysis of functions which are called during render. We have variations of
* this in several of our validation passes and should unify those analyses into a reusable helper
* and use it here.
*/
export function validateNoImpureFunctionsInRender(
fn: HIRFunction,
): Result<void, CompilerError> {
const errors = new CompilerError();
for (const [, block] of fn.body.blocks) {
for (const instr of block.instructions) {
const value = instr.value;
if (value.kind === 'MethodCall' || value.kind == 'CallExpression') {
const callee =
value.kind === 'MethodCall' ? value.property : value.callee;
const signature = getFunctionCallSignature(
fn.env,
callee.identifier.type,
);
if (signature != null && signature.impure === true) {
errors.pushDiagnostic(
CompilerDiagnostic.create({
category: ErrorCategory.Purity,
reason: 'Cannot call impure function during render',
description:
(signature.canonicalName != null
? `\`${signature.canonicalName}\` is an impure function. `
: '') +
'Calling an impure function can produce unstable results that update unpredictably when the component happens to re-render. (https://react.dev/reference/rules/components-and-hooks-must-be-pure#components-and-hooks-must-be-idempotent)',
suggestions: null,
}).withDetails({
kind: 'error',
loc: callee.loc,
message: 'Cannot call impure function',
}),
);
}
}
}
}
return errors.asResult();
} | typescript | github | https://github.com/facebook/react | compiler/packages/babel-plugin-react-compiler/src/Validation/ValidateNoImpureFunctionsInRender.ts |
/**
* @jest-environment node
*/
import urlDataStrategy from "./utils/urlDataStrategy";
import type {
StaticHandler,
StaticHandlerContext,
} from "../../lib/router/router";
import {
createStaticHandler,
getStaticContextFromError,
} from "../../lib/router/router";
import {
ErrorResponseImpl,
isRouteErrorResponse,
redirect,
} from "../../lib/router/utils";
import { createDeferred } from "./utils/data-router-setup";
import {
createRequest,
createSubmitRequest,
invariant,
sleep,
} from "./utils/utils";
process.on("unhandledRejection", (e) => {
console.error("unhandledRejection", e);
});
process.on("uncaughtException", (e) => {
console.error("uncaughtException", e);
});
describe("ssr", () => {
const SSR_ROUTES = [
{
id: "index",
path: "/",
loader: () => "INDEX LOADER",
},
{
id: "parent",
path: "/parent",
loader: () => "PARENT LOADER",
action: () => "PARENT ACTION",
children: [
{
id: "parentIndex",
index: true,
loader: () => "PARENT INDEX LOADER",
action: () => "PARENT INDEX ACTION",
},
{
id: "child",
path: "child",
loader: () => "CHILD LOADER",
action: () => "CHILD ACTION",
},
{
id: "json",
path: "json",
loader: () => Response.json({ type: "loader" }),
action: () => Response.json({ type: "action" }),
},
{
id: "deferred",
path: "deferred",
loader: ({ request }) => {
if (new URL(request.url).searchParams.has("reject")) {
let promise = new Promise((_, r) =>
setTimeout(() => r("broken!"), 10),
);
promise.catch(() => {});
return {
critical: "loader",
lazy: promise,
};
}
if (new URL(request.url).searchParams.has("undefined")) {
return {
critical: "loader",
lazy: new Promise((r) => setTimeout(() => r(undefined), 10)),
};
}
if (new URL(request.url).searchParams.has("status")) {
return {
critical: "loader",
lazy: new Promise((r) => setTimeout(() => r("lazy"), 10)),
};
}
return {
critical: "loader",
lazy: new Promise((r) => setTimeout(() => r("lazy"), 10)),
};
},
},
{
id: "error",
path: "error",
loader: () => Promise.reject("ERROR LOADER ERROR"),
action: () => Promise.reject("ERROR ACTION ERROR"),
},
{
id: "errorBoundary",
path: "error-boundary",
hasErrorBoundary: true,
loader: () => Promise.reject("ERROR BOUNDARY LOADER ERROR"),
action: () => Promise.reject("ERROR BOUNDARY ACTION ERROR"),
},
],
},
{
id: "redirect",
path: "/redirect",
loader: () => redirect("/"),
},
{
id: "custom",
path: "/custom",
loader: () =>
new Response(new URLSearchParams([["foo", "bar"]]).toString(), {
headers: { "Content-Type": "application/x-www-form-urlencoded" },
}),
},
];
// Regardless of if the URL is internal or external - all absolute URL
// responses should return untouched during SSR so the browser can handle
// them
let ABSOLUTE_URLS = [
"http://localhost/",
"https://localhost/about",
"http://remix.run/blog",
"https://remix.run/blog",
"//remix.run/blog",
"app://whatever",
"mailto:hello@remix.run",
"web+remix:whatever",
];
describe("document requests", () => {
it("should support document load navigations", async () => {
let { query } = createStaticHandler(SSR_ROUTES);
let context = await query(createRequest("/parent/child"));
expect(context).toMatchObject({
actionData: null,
loaderData: {
parent: "PARENT LOADER",
child: "CHILD LOADER",
},
errors: null,
location: { pathname: "/parent/child" },
matches: [{ route: { id: "parent" } }, { route: { id: "child" } }],
});
});
it("should support document load navigations with HEAD requests", async () => {
let { query } = createStaticHandler(SSR_ROUTES);
let context = await query(
createRequest("/parent/child", { method: "HEAD" }),
);
expect(context).toMatchObject({
actionData: null,
loaderData: {
parent: "PARENT LOADER",
child: "CHILD LOADER",
},
errors: null,
location: { pathname: "/parent/child" },
matches: [{ route: { id: "parent" } }, { route: { id: "child" } }],
});
});
it("should support document load navigations with a basename", async () => {
let { query } = createStaticHandler(SSR_ROUTES, { basename: "/base" });
let context = await query(createRequest("/base/parent/child"));
expect(context).toMatchObject({
actionData: null,
loaderData: {
parent: "PARENT LOADER",
child: "CHILD LOADER",
},
errors: null,
location: { pathname: "/base/parent/child" },
matches: [{ route: { id: "parent" } }, { route: { id: "child" } }],
});
});
it("should not fill in null loaderData values for routes without loaders", async () => {
let { query } = createStaticHandler([
{
id: "root",
path: "/",
children: [
{
id: "none",
path: "none",
},
{
id: "a",
path: "a",
loader: () => "A",
children: [
{
id: "b",
path: "b",
},
],
},
],
},
]);
// No loaders at all
let context = await query(createRequest("/none"));
expect(context).toMatchObject({
actionData: null,
loaderData: {},
errors: null,
location: { pathname: "/none" },
});
// Mix of loaders and no loaders
context = await query(createRequest("/a/b"));
expect(context).toMatchObject({
actionData: null,
loaderData: {
a: "A",
},
errors: null,
location: { pathname: "/a/b" },
});
});
it("should support document load navigations returning responses", async () => {
let { query } = createStaticHandler(SSR_ROUTES);
let context = await query(createRequest("/parent/json"));
expect(context).toMatchObject({
actionData: null,
loaderData: {
parent: "PARENT LOADER",
json: { type: "loader" },
},
errors: null,
matches: [{ route: { id: "parent" } }, { route: { id: "json" } }],
});
});
it("should support document load navigations returning deferred", async () => {
let { query } = createStaticHandler(SSR_ROUTES);
let context = (await query(
createRequest("/parent/deferred"),
)) as StaticHandlerContext;
expect(context).toMatchObject({
actionData: null,
loaderData: {
parent: "PARENT LOADER",
deferred: {
critical: "loader",
lazy: expect.any(Promise),
},
},
errors: null,
location: { pathname: "/parent/deferred" },
matches: [{ route: { id: "parent" } }, { route: { id: "deferred" } }],
});
await new Promise((r) => setTimeout(r, 10));
await expect(context.loaderData.deferred.lazy).resolves.toBe("lazy");
});
it("should support route.lazy", async () => {
let { query } = createStaticHandler([
{
id: "root",
path: "/",
async lazy() {
await sleep(100);
return {
async loader() {
await sleep(100);
return "ROOT LOADER";
},
};
},
},
{
id: "parent",
path: "/parent",
async lazy() {
await sleep(100);
return {
async loader() {
await sleep(100);
return "PARENT LOADER";
},
};
},
children: [
{
id: "child",
path: "child",
async lazy() {
await sleep(100);
return {
async loader() {
await sleep(100);
return "CHILD LOADER";
},
};
},
},
],
},
]);
let context = await query(createRequest("/"));
expect(context).toMatchObject({
loaderData: {
root: "ROOT LOADER",
},
errors: null,
location: { pathname: "/" },
matches: [{ route: { id: "root" } }],
});
context = await query(createRequest("/parent/child"));
expect(context).toMatchObject({
actionData: null,
loaderData: {
parent: "PARENT LOADER",
child: "CHILD LOADER",
},
errors: null,
location: { pathname: "/parent/child" },
matches: [{ route: { id: "parent" } }, { route: { id: "child" } }],
});
});
it("should support document submit navigations", async () => {
let { query } = createStaticHandler(SSR_ROUTES);
let context = await query(createSubmitRequest("/parent/child"));
expect(context).toMatchObject({
actionData: {
child: "CHILD ACTION",
},
loaderData: {
parent: "PARENT LOADER",
child: "CHILD LOADER",
},
errors: null,
location: { pathname: "/parent/child" },
matches: [{ route: { id: "parent" } }, { route: { id: "child" } }],
});
});
it("should support alternative submission methods", async () => {
let { query } = createStaticHandler(SSR_ROUTES);
let context;
let expected = {
actionData: {
child: "CHILD ACTION",
},
loaderData: {
parent: "PARENT LOADER",
child: "CHILD LOADER",
},
errors: null,
location: { pathname: "/parent/child" },
matches: [{ route: { id: "parent" } }, { route: { id: "child" } }],
};
context = await query(
createSubmitRequest("/parent/child", { method: "PUT" }),
);
expect(context).toMatchObject(expected);
context = await query(
createSubmitRequest("/parent/child", { method: "PATCH" }),
);
expect(context).toMatchObject(expected);
context = await query(
createSubmitRequest("/parent/child", { method: "DELETE" }),
);
expect(context).toMatchObject(expected);
});
it("should support document submit navigations returning responses", async () => {
let { query } = createStaticHandler(SSR_ROUTES);
let context = await query(createSubmitRequest("/parent/json"));
expect(context).toMatchObject({
actionData: {
json: { type: "action" },
},
loaderData: {
parent: "PARENT LOADER",
json: { type: "loader" },
},
errors: null,
matches: [{ route: { id: "parent" } }, { route: { id: "json" } }],
});
});
it("should support document submit navigations to layout routes", async () => {
let { query } = createStaticHandler(SSR_ROUTES);
let context = await query(createSubmitRequest("/parent"));
expect(context).toMatchObject({
actionData: {
parent: "PARENT ACTION",
},
loaderData: {
parent: "PARENT LOADER",
parentIndex: "PARENT INDEX LOADER",
},
errors: null,
matches: [
{ route: { id: "parent" } },
{ route: { id: "parentIndex" } },
],
});
});
it("should support document submit navigations to index routes", async () => {
let { query } = createStaticHandler(SSR_ROUTES);
let context = await query(createSubmitRequest("/parent?index"));
expect(context).toMatchObject({
actionData: {
parentIndex: "PARENT INDEX ACTION",
},
loaderData: {
parent: "PARENT LOADER",
parentIndex: "PARENT INDEX LOADER",
},
errors: null,
matches: [
{ route: { id: "parent" } },
{ route: { id: "parentIndex" } },
],
});
});
it("should handle redirect Responses", async () => {
let { query } = createStaticHandler(SSR_ROUTES);
let response = await query(createRequest("/redirect"));
expect(response instanceof Response).toBe(true);
expect((response as Response).status).toBe(302);
expect((response as Response).headers.get("Location")).toBe("/");
});
it("should handle relative redirect responses (loader)", async () => {
let { query } = createStaticHandler([
{
path: "/",
children: [
{
path: "parent",
children: [
{
path: "child",
loader: () => redirect(".."),
},
],
},
],
},
]);
let response = await query(createRequest("/parent/child"));
expect(response instanceof Response).toBe(true);
expect((response as Response).status).toBe(302);
expect((response as Response).headers.get("Location")).toBe("/parent");
});
it("should handle relative redirect responses (action)", async () => {
let { query } = createStaticHandler([
{
path: "/",
children: [
{
path: "parent",
children: [
{
path: "child",
action: () => redirect(".."),
},
],
},
],
},
]);
let response = await query(createSubmitRequest("/parent/child"));
expect(response instanceof Response).toBe(true);
expect((response as Response).status).toBe(302);
expect((response as Response).headers.get("Location")).toBe("/parent");
});
it("should handle absolute redirect Responses", async () => {
for (let url of ABSOLUTE_URLS) {
let handler = createStaticHandler([
{
path: "/",
loader: () => redirect(url),
},
]);
let response = await handler.query(createRequest("/"));
expect(response instanceof Response).toBe(true);
expect((response as Response).status).toBe(302);
expect((response as Response).headers.get("Location")).toBe(url);
}
});
it("should handle 404 navigations", async () => {
let { query } = createStaticHandler(SSR_ROUTES);
let context = await query(createRequest("/not/found"));
expect(context).toMatchObject({
loaderData: {},
actionData: null,
errors: {
index: new ErrorResponseImpl(
404,
"Not Found",
new Error('No route matches URL "/not/found"'),
true,
),
},
matches: [{ route: { id: "index" } }],
});
});
it("should handle load error responses", async () => {
let { query } = createStaticHandler(SSR_ROUTES);
let context;
// Error handled by child
context = await query(createRequest("/parent/error-boundary"));
expect(context).toMatchObject({
actionData: null,
loaderData: {
parent: "PARENT LOADER",
},
errors: {
errorBoundary: "ERROR BOUNDARY LOADER ERROR",
},
matches: [
{ route: { id: "parent" } },
{ route: { id: "errorBoundary" } },
],
});
// Error propagates to parent
context = await query(createRequest("/parent/error"));
expect(context).toMatchObject({
actionData: null,
loaderData: {
parent: "PARENT LOADER",
},
errors: {
parent: "ERROR LOADER ERROR",
},
matches: [{ route: { id: "parent" } }, { route: { id: "error" } }],
});
});
it("should handle submit error responses", async () => {
let { query } = createStaticHandler(SSR_ROUTES);
let context;
// Error handled by child
context = await query(createSubmitRequest("/parent/error-boundary"));
expect(context).toMatchObject({
actionData: null,
loaderData: {
parent: "PARENT LOADER",
},
errors: {
errorBoundary: "ERROR BOUNDARY ACTION ERROR",
},
matches: [
{ route: { id: "parent" } },
{ route: { id: "errorBoundary" } },
],
});
// Error propagates to parent
context = await query(createSubmitRequest("/parent/error"));
expect(context).toMatchObject({
actionData: null,
loaderData: {},
errors: {
parent: "ERROR ACTION ERROR",
},
matches: [{ route: { id: "parent" } }, { route: { id: "error" } }],
});
});
it("should handle multiple errors at separate boundaries", async () => {
let routes = [
{
id: "root",
path: "/",
loader: () => Promise.reject("ROOT"),
hasErrorBoundary: true,
children: [
{
id: "child",
path: "child",
loader: () => Promise.reject("CHILD"),
hasErrorBoundary: true,
},
],
},
];
let { query } = createStaticHandler(routes);
let context;
context = await query(createRequest("/child"));
expect(context.errors).toEqual({
root: "ROOT",
child: "CHILD",
});
});
it("should handle multiple errors at the same boundary", async () => {
let routes = [
{
id: "root",
path: "/",
loader: () => Promise.reject("ROOT"),
hasErrorBoundary: true,
children: [
{
id: "child",
path: "child",
loader: () => Promise.reject("CHILD"),
},
],
},
];
let { query } = createStaticHandler(routes);
let context;
context = await query(createRequest("/child"));
expect(context.errors).toEqual({
// higher error value wins
root: "ROOT",
});
});
it("should skip bubbling loader errors when skipLoaderErrorBubbling is passed", async () => {
let routes = [
{
id: "root",
path: "/",
hasErrorBoundary: true,
children: [
{
id: "child",
path: "child",
loader: () => Promise.reject("CHILD"),
},
],
},
];
let { query } = createStaticHandler(routes);
let context;
context = await query(createRequest("/child"), {
skipLoaderErrorBubbling: true,
});
expect(context.errors).toEqual({
child: "CHILD",
});
});
it("should handle aborted load requests", async () => {
let dfd = createDeferred();
let controller = new AbortController();
let { query } = createStaticHandler([
{
id: "root",
path: "/path",
loader: () => dfd.promise,
},
]);
let request = createRequest("/path?key=value", {
signal: controller.signal,
});
let e;
try {
let contextPromise = query(request);
controller.abort();
// This should resolve even though we never resolved the loader
await contextPromise;
} catch (_e) {
e = _e;
}
expect(e).toBeInstanceOf(DOMException);
expect(e.name).toBe("AbortError");
expect(e.message).toBe("This operation was aborted");
});
it("should handle aborted submit requests", async () => {
let dfd = createDeferred();
let controller = new AbortController();
let { query } = createStaticHandler([
{
id: "root",
path: "/path",
action: () => dfd.promise,
},
]);
let request = createSubmitRequest("/path?key=value", {
signal: controller.signal,
});
let e;
try {
let contextPromise = query(request);
controller.abort();
// This should resolve even though we never resolved the loader
await contextPromise;
} catch (_e) {
e = _e;
}
expect(e).toBeInstanceOf(DOMException);
expect(e.name).toBe("AbortError");
expect(e.message).toBe("This operation was aborted");
});
it("should handle aborted requests", async () => {
let dfd = createDeferred();
let controller = new AbortController();
let { query } = createStaticHandler([
{
id: "root",
path: "/path",
loader: () => dfd.promise,
},
]);
let request = createRequest("/path?key=value", {
signal: controller.signal,
});
let e;
try {
let contextPromise = query(request);
controller.abort(new Error("Oh no!"));
// This should resolve even though we never resolved the loader
await contextPromise;
} catch (_e) {
e = _e;
}
expect(e).toBeInstanceOf(Error);
expect(e.message).toBe("Oh no!");
});
it("should assign signals to requests by default (per the", async () => {
let { query } = createStaticHandler(SSR_ROUTES);
let request = createRequest("/", { signal: undefined });
let context = await query(request);
expect((context as StaticHandlerContext).loaderData.index).toBe(
"INDEX LOADER",
);
});
it("should handle not found action submissions with a 405 error", async () => {
let { query } = createStaticHandler([
{
id: "root",
path: "/",
},
]);
let request = createSubmitRequest("/");
let context = await query(request);
expect(context).toMatchObject({
actionData: null,
loaderData: {},
errors: {
root: new ErrorResponseImpl(
405,
"Method Not Allowed",
new Error(
'You made a POST request to "/" but did not provide an `action` ' +
'for route "root", so there is no way to handle the request.',
),
true,
),
},
matches: [{ route: { id: "root" } }],
});
});
it("should handle unsupported methods with a 405 error", async () => {
let { query } = createStaticHandler([
{
id: "root",
path: "/",
},
]);
let request = createRequest("/", { method: "OPTIONS" });
let context = await query(request);
expect(context).toMatchObject({
actionData: null,
loaderData: {},
errors: {
root: new ErrorResponseImpl(
405,
"Method Not Allowed",
new Error('Invalid request method "OPTIONS"'),
true,
),
},
matches: [{ route: { id: "root" } }],
});
});
it("should send proper arguments to loaders", async () => {
let rootLoaderStub = jest.fn(() => "ROOT");
let childLoaderStub = jest.fn(() => "CHILD");
let { query } = createStaticHandler([
{
id: "root",
path: "/",
loader: rootLoaderStub,
children: [
{
id: "child",
path: "child",
loader: childLoaderStub,
},
],
},
]);
await query(createRequest("/child"));
// @ts-expect-error
let rootLoaderRequest = rootLoaderStub.mock.calls[0][0]?.request;
// @ts-expect-error
let childLoaderRequest = childLoaderStub.mock.calls[0][0]?.request;
expect(rootLoaderRequest.method).toBe("GET");
expect(rootLoaderRequest.url).toBe("http://localhost/child");
expect(childLoaderRequest.method).toBe("GET");
expect(childLoaderRequest.url).toBe("http://localhost/child");
});
it("should send proper arguments to actions", async () => {
let actionStub = jest.fn(() => "ACTION");
let rootLoaderStub = jest.fn(() => "ROOT");
let childLoaderStub = jest.fn(() => "CHILD");
let { query } = createStaticHandler([
{
id: "root",
path: "/",
loader: rootLoaderStub,
children: [
{
id: "child",
path: "child",
action: actionStub,
loader: childLoaderStub,
},
],
},
]);
await query(
createSubmitRequest("/child", {
headers: {
test: "value",
},
}),
);
// @ts-expect-error
let actionRequest = actionStub.mock.calls[0][0]?.request;
expect(actionRequest.method).toBe("POST");
expect(actionRequest.url).toBe("http://localhost/child");
expect(actionRequest.headers.get("Content-Type")).toBe(
"application/x-www-form-urlencoded;charset=UTF-8",
);
expect((await actionRequest.formData()).get("key")).toBe("value");
// @ts-expect-error
let rootLoaderRequest = rootLoaderStub.mock.calls[0][0]?.request;
// @ts-expect-error
let childLoaderRequest = childLoaderStub.mock.calls[0][0]?.request;
expect(rootLoaderRequest.method).toBe("GET");
expect(rootLoaderRequest.url).toBe("http://localhost/child");
expect(rootLoaderRequest.headers.get("test")).toBe("value");
expect(await rootLoaderRequest.text()).toBe("");
expect(childLoaderRequest.method).toBe("GET");
expect(childLoaderRequest.url).toBe("http://localhost/child");
expect(childLoaderRequest.headers.get("test")).toBe("value");
// Can't re-read body here since it's the same request as the root
});
it("should send proper arguments to loaders after an action errors", async () => {
let actionStub = jest.fn(() => Promise.reject("ACTION ERROR"));
let rootLoaderStub = jest.fn(() => "ROOT");
let childLoaderStub = jest.fn(() => "CHILD");
let { query } = createStaticHandler([
{
id: "root",
path: "/",
loader: rootLoaderStub,
children: [
{
id: "child",
path: "child",
action: actionStub,
loader: childLoaderStub,
hasErrorBoundary: true,
},
],
},
]);
await query(
createSubmitRequest("/child", {
headers: {
test: "value",
},
}),
);
// @ts-expect-error
let actionRequest = actionStub.mock.calls[0][0]?.request;
expect(actionRequest.method).toBe("POST");
expect(actionRequest.url).toBe("http://localhost/child");
expect(actionRequest.headers.get("Content-Type")).toBe(
"application/x-www-form-urlencoded;charset=UTF-8",
);
expect((await actionRequest.formData()).get("key")).toBe("value");
// @ts-expect-error
let rootLoaderRequest = rootLoaderStub.mock.calls[0][0]?.request;
expect(rootLoaderRequest.method).toBe("GET");
expect(rootLoaderRequest.url).toBe("http://localhost/child");
expect(rootLoaderRequest.headers.get("test")).toBe("value");
expect(await rootLoaderRequest.text()).toBe("");
expect(childLoaderStub).not.toHaveBeenCalled();
});
it("should support a requestContext passed to loaders and actions", async () => {
let requestContext = { sessionId: "12345" };
let rootStub = jest.fn(() => "ROOT");
let childStub = jest.fn(() => "CHILD");
let actionStub = jest.fn(() => "CHILD ACTION");
let arg = (s) => s.mock.calls[0][0];
let { query } = createStaticHandler([
{
id: "root",
path: "/",
loader: rootStub,
children: [
{
id: "child",
path: "child",
action: actionStub,
loader: childStub,
},
],
},
]);
await query(createRequest("/child"), { requestContext });
expect(arg(rootStub).context.sessionId).toBe("12345");
expect(arg(childStub).context.sessionId).toBe("12345");
actionStub.mockClear();
rootStub.mockClear();
childStub.mockClear();
await query(createSubmitRequest("/child"), { requestContext });
expect(arg(actionStub).context.sessionId).toBe("12345");
expect(arg(rootStub).context.sessionId).toBe("12345");
expect(arg(childStub).context.sessionId).toBe("12345");
});
describe("deferred", () => {
let { query } = createStaticHandler(SSR_ROUTES);
it("should return DeferredData on symbol", async () => {
let context = (await query(
createRequest("/parent/deferred"),
)) as StaticHandlerContext;
expect(context).toMatchObject({
loaderData: {
parent: "PARENT LOADER",
deferred: {
critical: "loader",
lazy: expect.any(Promise),
},
},
});
await new Promise((r) => setTimeout(r, 10));
await expect(context.loaderData.deferred.lazy).resolves.toBe("lazy");
expect(context).toMatchObject({
loaderData: {
parent: "PARENT LOADER",
deferred: {
critical: "loader",
lazy: expect.any(Promise),
},
},
});
});
it("should return rejected promises", async () => {
let context = (await query(
createRequest("/parent/deferred?reject"),
)) as StaticHandlerContext;
expect(context).toMatchObject({
loaderData: {
parent: "PARENT LOADER",
deferred: {
critical: "loader",
lazy: expect.any(Promise),
},
},
});
await new Promise((r) => setTimeout(r, 10));
await expect(context.loaderData.deferred.lazy).rejects.toBe("broken!");
});
it("should return resolved undefined", async () => {
let context = (await query(
createRequest("/parent/deferred?undefined"),
)) as StaticHandlerContext;
expect(context).toMatchObject({
loaderData: {
parent: "PARENT LOADER",
deferred: {
critical: "loader",
lazy: expect.any(Promise),
},
},
});
await new Promise((r) => setTimeout(r, 10));
await expect(context.loaderData.deferred.lazy).resolves.toBeUndefined();
});
});
describe("statusCode", () => {
it("should expose a 200 status code by default", async () => {
let { query } = createStaticHandler([
{
id: "root",
path: "/",
},
]);
let context = (await query(createRequest("/"))) as StaticHandlerContext;
expect(context.statusCode).toBe(200);
});
it("should expose a 500 status code on loader errors", async () => {
let { query } = createStaticHandler([
{
id: "root",
path: "/",
loader: () => Response.json({ data: "ROOT" }, { status: 201 }),
children: [
{
id: "child",
index: true,
loader: () => {
throw new Error("💥");
},
},
],
},
]);
let context = (await query(createRequest("/"))) as StaticHandlerContext;
expect(context.statusCode).toBe(500);
});
it("should expose a 500 status code on action errors", async () => {
let { query } = createStaticHandler([
{
id: "root",
path: "/",
loader: () => Response.json({ data: "ROOT" }, { status: 201 }),
children: [
{
id: "child",
index: true,
loader: () => Response.json({ data: "CHILD" }, { status: 202 }),
action: () => {
throw new Error("💥");
},
},
],
},
]);
let context = (await query(
createSubmitRequest("/?index"),
)) as StaticHandlerContext;
expect(context.statusCode).toBe(500);
});
it("should expose a 4xx status code on thrown loader responses", async () => {
let { query } = createStaticHandler([
{
id: "root",
path: "/",
loader: () => Response.json({ data: "ROOT" }, { status: 201 }),
children: [
{
id: "child",
index: true,
loader: () => {
throw new Response(null, { status: 400 });
},
},
],
},
]);
let context = (await query(createRequest("/"))) as StaticHandlerContext;
expect(context.statusCode).toBe(400);
});
it("should expose a 4xx status code on thrown action responses", async () => {
let { query } = createStaticHandler([
{
id: "root",
path: "/",
loader: () => Response.json({ data: "ROOT" }, { status: 201 }),
children: [
{
id: "child",
index: true,
loader: () => Response.json({ data: "CHILD" }, { status: 202 }),
action: () => {
throw new Response(null, { status: 400 });
},
},
],
},
]);
let context = (await query(
createSubmitRequest("/?index"),
)) as StaticHandlerContext;
expect(context.statusCode).toBe(400);
});
it("should expose the action status on submissions", async () => {
let { query } = createStaticHandler([
{
id: "root",
path: "/",
loader: () => Response.json({ data: "ROOT" }, { status: 201 }),
children: [
{
id: "child",
index: true,
loader: () => Response.json({ data: "ROOT" }, { status: 202 }),
action: () => Response.json({ data: "ROOT" }, { status: 203 }),
},
],
},
]);
let context = (await query(
createSubmitRequest("/?index"),
)) as StaticHandlerContext;
expect(context.statusCode).toBe(203);
});
it("should expose the deepest 2xx status", async () => {
let { query } = createStaticHandler([
{
id: "root",
path: "/",
loader: () => Response.json({ data: "ROOT" }, { status: 201 }),
children: [
{
id: "child",
index: true,
loader: () => Response.json({ data: "ROOT" }, { status: 202 }),
},
],
},
]);
let context = (await query(createRequest("/"))) as StaticHandlerContext;
expect(context.statusCode).toBe(202);
});
it("should expose the shallowest 4xx/5xx status", async () => {
let context;
let query: StaticHandler["query"];
query = createStaticHandler([
{
id: "root",
path: "/",
loader: () => {
throw new Response(null, { status: 400 });
},
children: [
{
id: "child",
index: true,
loader: () => {
throw new Response(null, { status: 401 });
},
},
],
},
]).query;
context = (await query(createRequest("/"))) as StaticHandlerContext;
expect(context.statusCode).toBe(400);
query = createStaticHandler([
{
id: "root",
path: "/",
loader: () => {
throw new Response(null, { status: 400 });
},
children: [
{
id: "child",
index: true,
loader: () => {
throw new Response(null, { status: 500 });
},
},
],
},
]).query;
context = (await query(createRequest("/"))) as StaticHandlerContext;
expect(context.statusCode).toBe(400);
query = createStaticHandler([
{
id: "root",
path: "/",
loader: () => {
throw new Response(null, { status: 400 });
},
children: [
{
id: "child",
index: true,
loader: () => {
throw new Error("💥");
},
},
],
},
]).query;
context = (await query(createRequest("/"))) as StaticHandlerContext;
expect(context.statusCode).toBe(400);
});
});
describe("headers", () => {
it("should expose headers from action/loader responses", async () => {
let { query } = createStaticHandler([
{
id: "root",
path: "/",
loader: () => new Response(null, { headers: { two: "2" } }),
children: [
{
id: "child",
index: true,
action: () => new Response(null, { headers: { one: "1" } }),
loader: () => new Response(null, { headers: { three: "3" } }),
},
],
},
]);
let context = (await query(
createSubmitRequest("/?index"),
)) as StaticHandlerContext;
expect(Array.from(context.actionHeaders.child.entries())).toEqual([
["one", "1"],
]);
expect(Array.from(context.loaderHeaders.root.entries())).toEqual([
["two", "2"],
]);
expect(Array.from(context.loaderHeaders.child.entries())).toEqual([
["three", "3"],
]);
});
it("should expose headers from loader error responses", async () => {
let { query } = createStaticHandler([
{
id: "root",
path: "/",
loader: () => new Response(null, { headers: { one: "1" } }),
children: [
{
id: "child",
index: true,
loader: () => {
throw new Response(null, { headers: { two: "2" } });
},
},
],
},
]);
let context = (await query(createRequest("/"))) as StaticHandlerContext;
expect(Array.from(context.loaderHeaders.root.entries())).toEqual([
["one", "1"],
]);
expect(Array.from(context.loaderHeaders.child.entries())).toEqual([
["two", "2"],
]);
});
it("should expose headers from action error responses", async () => {
let { query } = createStaticHandler([
{
id: "root",
path: "/",
children: [
{
id: "child",
index: true,
action: () => {
throw new Response(null, { headers: { one: "1" } });
},
},
],
},
]);
let context = (await query(
createSubmitRequest("/?index"),
)) as StaticHandlerContext;
expect(Array.from(context.actionHeaders.child.entries())).toEqual([
["one", "1"],
]);
});
});
describe("getStaticContextFromError", () => {
it("should provide a context for a second-pass render for a thrown error", async () => {
let { query } = createStaticHandler(SSR_ROUTES);
let context = await query(createRequest("/"));
expect(context).toMatchObject({
errors: null,
loaderData: {
index: "INDEX LOADER",
},
statusCode: 200,
});
let error = new Error("💥");
invariant(!(context instanceof Response), "Uh oh");
context = getStaticContextFromError(SSR_ROUTES, context, error);
expect(context).toMatchObject({
errors: {
index: error,
},
loaderData: {
index: "INDEX LOADER",
},
statusCode: 500,
});
});
it("should accept a thrown response from entry.server", async () => {
let { query } = createStaticHandler(SSR_ROUTES);
let context = await query(createRequest("/"));
expect(context).toMatchObject({
errors: null,
loaderData: {
index: "INDEX LOADER",
},
statusCode: 200,
});
let errorResponse = new ErrorResponseImpl(400, "Bad Request", "Oops!");
invariant(!(context instanceof Response), "Uh oh");
context = getStaticContextFromError(SSR_ROUTES, context, errorResponse);
expect(context).toMatchObject({
errors: {
index: errorResponse,
},
loaderData: {
index: "INDEX LOADER",
},
statusCode: 400,
});
});
});
describe("router dataStrategy", () => {
it("should support document load navigations with custom dataStrategy", async () => {
let { query } = createStaticHandler(SSR_ROUTES);
let context = await query(createRequest("/custom"), {
dataStrategy: urlDataStrategy,
});
expect(context).toMatchObject({
actionData: null,
loaderData: {
custom: expect.any(URLSearchParams),
},
errors: null,
location: { pathname: "/custom" },
matches: [{ route: { id: "custom" } }],
});
expect(
(context as StaticHandlerContext).loaderData.custom.get("foo"),
).toEqual("bar");
});
});
});
describe("singular route requests", () => {
function setupFlexRouteTest() {
function queryRoute(
req: Request,
routeId: string,
type: "loader" | "action",
data: any,
isError = false,
) {
let handler = createStaticHandler([
{
id: "flex",
path: "/flex",
[type]: () =>
isError ? Promise.reject(data) : Promise.resolve(data),
},
]);
return handler.queryRoute(req, { routeId });
}
return {
resolveLoader(data: any) {
return queryRoute(
createRequest("/flex"),
"flex",
"loader",
data,
false,
);
},
rejectLoader(data: any) {
return queryRoute(
createRequest("/flex"),
"flex",
"loader",
data,
true,
);
},
resolveAction(data: any) {
return queryRoute(
createSubmitRequest("/flex"),
"flex",
"action",
data,
false,
);
},
rejectAction(data: any) {
return queryRoute(
createSubmitRequest("/flex"),
"flex",
"action",
data,
true,
);
},
};
}
it("should match routes automatically if no routeId is provided", async () => {
let { queryRoute } = createStaticHandler(SSR_ROUTES);
let data;
data = await queryRoute(createRequest("/parent"));
expect(data).toBe("PARENT LOADER");
data = await queryRoute(createRequest("/parent?index"));
expect(data).toBe("PARENT INDEX LOADER");
data = await queryRoute(createRequest("/parent/child"), {
routeId: "child",
});
expect(data).toBe("CHILD LOADER");
});
it("should support HEAD requests", async () => {
let { queryRoute } = createStaticHandler(SSR_ROUTES);
let data = await queryRoute(createRequest("/parent", { method: "HEAD" }));
expect(data).toBe("PARENT LOADER");
});
it("should support OPTIONS requests", async () => {
let { queryRoute } = createStaticHandler(SSR_ROUTES);
let data = await queryRoute(
createRequest("/parent", { method: "OPTIONS" }),
);
expect(data).toBe("PARENT LOADER");
});
it("should support singular route load navigations (primitives)", async () => {
let { queryRoute } = createStaticHandler(SSR_ROUTES);
let data;
// Layout route
data = await queryRoute(createRequest("/parent"), {
routeId: "parent",
});
expect(data).toBe("PARENT LOADER");
// Index route
data = await queryRoute(createRequest("/parent"), {
routeId: "parentIndex",
});
expect(data).toBe("PARENT INDEX LOADER");
// Parent in nested route
data = await queryRoute(createRequest("/parent/child"), {
routeId: "parent",
});
expect(data).toBe("PARENT LOADER");
// Child in nested route
data = await queryRoute(createRequest("/parent/child"), {
routeId: "child",
});
expect(data).toBe("CHILD LOADER");
// Non-undefined falsey values should count
let T = setupFlexRouteTest();
data = await T.resolveLoader(null);
expect(data).toBeNull();
data = await T.resolveLoader(false);
expect(data).toBe(false);
data = await T.resolveLoader("");
expect(data).toBe("");
});
it("should support singular route load navigations (Responses)", async () => {
/* eslint-disable jest/no-conditional-expect */
let T = setupFlexRouteTest();
let data;
// When Responses are returned or thrown, it should always resolve the
// raw Response from queryRoute
// Returned Success Response
data = await T.resolveLoader(new Response("Created!", { status: 201 }));
expect(data.status).toBe(201);
expect(await data.text()).toBe("Created!");
// Thrown Success Response
try {
await T.rejectLoader(new Response("Created!", { status: 201 }));
expect(false).toBe(true);
} catch (data) {
expect(data.status).toBe(201);
expect(await data.text()).toBe("Created!");
}
// Returned Redirect Response
data = await T.resolveLoader(
new Response(null, {
status: 302,
headers: { Location: "/" },
}),
);
expect(data.status).toBe(302);
expect(data.headers.get("Location")).toBe("/");
// Thrown Redirect Response
data = await T.rejectLoader(
new Response(null, {
status: 301,
headers: { Location: "/" },
}),
);
expect(data.status).toBe(301);
expect(data.headers.get("Location")).toBe("/");
// Returned Error Response
data = await T.resolveLoader(new Response("Why?", { status: 400 }));
expect(data.status).toBe(400);
expect(await data.text()).toBe("Why?");
// Thrown Error Response
try {
await T.rejectLoader(new Response("Oh no!", { status: 401 }));
expect(false).toBe(true);
} catch (data) {
expect(data.status).toBe(401);
expect(await data.text()).toBe("Oh no!");
}
/* eslint-enable jest/no-conditional-expect */
});
it("should support singular route load navigations (Errors)", async () => {
let T = setupFlexRouteTest();
let data;
// Returned Error instance is treated as data since it was not thrown
data = await T.resolveLoader(new Error("Why?"));
expect(data).toEqual(new Error("Why?"));
// Anything thrown (Error instance or not) will throw from queryRoute
// so we know to handle it as an errorPath in the server. Generally
// though in queryRoute, we would expect responses to be coming back -
// not
// Thrown Error
try {
await T.rejectLoader(new Error("Oh no!"));
} catch (e) {
data = e;
}
expect(data).toEqual(new Error("Oh no!"));
// Thrown non-Error
try {
await T.rejectLoader("This is weird?");
} catch (e) {
data = e;
}
expect(data).toEqual("This is weird?");
// Non-undefined falsey values should count
try {
await T.rejectLoader(null);
} catch (e) {
data = e;
}
expect(data).toBeNull();
try {
await T.rejectLoader(false);
} catch (e) {
data = e;
}
expect(data).toBe(false);
try {
await T.rejectLoader("");
} catch (e) {
data = e;
}
expect(data).toBe("");
});
it("should support singular route load navigations (with a basename)", async () => {
let { queryRoute } = createStaticHandler(SSR_ROUTES, {
basename: "/base",
});
let data;
// Layout route
data = await queryRoute(createRequest("/base/parent"), {
routeId: "parent",
});
expect(data).toBe("PARENT LOADER");
// Index route
data = await queryRoute(createRequest("/base/parent"), {
routeId: "parentIndex",
});
expect(data).toBe("PARENT INDEX LOADER");
// Parent in nested route
data = await queryRoute(createRequest("/base/parent/child"), {
routeId: "parent",
});
expect(data).toBe("PARENT LOADER");
// Child in nested route
data = await queryRoute(createRequest("/base/parent/child"), {
routeId: "child",
});
expect(data).toBe("CHILD LOADER");
// Non-undefined falsey values should count
let T = setupFlexRouteTest();
data = await T.resolveLoader(null);
expect(data).toBeNull();
data = await T.resolveLoader(false);
expect(data).toBe(false);
data = await T.resolveLoader("");
expect(data).toBe("");
});
it("should support singular route submit navigations (primitives)", async () => {
let { queryRoute } = createStaticHandler(SSR_ROUTES);
let data;
// Layout route
data = await queryRoute(createSubmitRequest("/parent"), {
routeId: "parent",
});
expect(data).toBe("PARENT ACTION");
// Index route
data = await queryRoute(createSubmitRequest("/parent"), {
routeId: "parentIndex",
});
expect(data).toBe("PARENT INDEX ACTION");
// Parent in nested route
data = await queryRoute(createSubmitRequest("/parent/child"), {
routeId: "parent",
});
expect(data).toBe("PARENT ACTION");
// Child in nested route
data = await queryRoute(createSubmitRequest("/parent/child"), {
routeId: "child",
});
expect(data).toBe("CHILD ACTION");
// Non-undefined falsey values should count
let T = setupFlexRouteTest();
data = await T.resolveAction(null);
expect(data).toBeNull();
data = await T.resolveAction(false);
expect(data).toBe(false);
data = await T.resolveAction("");
expect(data).toBe("");
});
it("should support alternative submission methods", async () => {
let { queryRoute } = createStaticHandler(SSR_ROUTES);
let data;
data = await queryRoute(
createSubmitRequest("/parent", { method: "PUT" }),
{ routeId: "parent" },
);
expect(data).toBe("PARENT ACTION");
data = await queryRoute(
createSubmitRequest("/parent", { method: "PATCH" }),
{ routeId: "parent" },
);
expect(data).toBe("PARENT ACTION");
data = await queryRoute(
createSubmitRequest("/parent", { method: "DELETE" }),
{ routeId: "parent" },
);
expect(data).toBe("PARENT ACTION");
});
it("should support singular route submit navigations (Responses)", async () => {
/* eslint-disable jest/no-conditional-expect */
let T = setupFlexRouteTest();
let data;
// When Responses are returned or thrown, it should always resolve the
// raw Response from queryRoute
// Returned Success Response
data = await T.resolveAction(new Response("Created!", { status: 201 }));
expect(data.status).toBe(201);
expect(await data.text()).toBe("Created!");
// Thrown Success Response
try {
await T.rejectAction(new Response("Created!", { status: 201 }));
expect(false).toBe(true);
} catch (data) {
expect(data.status).toBe(201);
expect(await data.text()).toBe("Created!");
}
// Returned Redirect Response
data = await T.resolveAction(
new Response(null, {
status: 302,
headers: { Location: "/" },
}),
);
expect(data.status).toBe(302);
expect(data.headers.get("Location")).toBe("/");
// Thrown Redirect Response
data = await T.rejectAction(
new Response(null, {
status: 301,
headers: { Location: "/" },
}),
);
expect(data.status).toBe(301);
expect(data.headers.get("Location")).toBe("/");
// Returned Error Response
data = await T.resolveAction(new Response("Why?", { status: 400 }));
expect(data.status).toBe(400);
expect(await data.text()).toBe("Why?");
// Thrown Error Response
try {
await T.rejectAction(new Response("Oh no!", { status: 401 }));
expect(false).toBe(true);
} catch (data) {
expect(data.status).toBe(401);
expect(await data.text()).toBe("Oh no!");
}
/* eslint-enable jest/no-conditional-expect */
});
it("should support singular route submit navigations (Errors)", async () => {
let T = setupFlexRouteTest();
let data;
// Returned Error instance is treated as data since it was not thrown
data = await T.resolveAction(new Error("Why?"));
expect(data).toEqual(new Error("Why?"));
// Anything thrown (Error instance or not) will throw from queryRoute
// so we know to handle it as an errorPath in the server. Generally
// though in queryRoute, we would expect responses to be coming back -
// not
// Thrown Error
try {
await T.rejectAction(new Error("Oh no!"));
} catch (e) {
data = e;
}
expect(data).toEqual(new Error("Oh no!"));
// Thrown non-Error
try {
await T.rejectAction("This is weird?");
} catch (e) {
data = e;
}
expect(data).toEqual("This is weird?");
// Non-undefined falsey values should count
try {
await T.rejectAction(null);
} catch (e) {
data = e;
}
expect(data).toBeNull();
try {
await T.rejectAction(false);
} catch (e) {
data = e;
}
expect(data).toBe(false);
try {
await T.rejectAction("");
} catch (e) {
data = e;
}
expect(data).toBe("");
});
it("should allow returning undefined from an action/loader", async () => {
let T = setupFlexRouteTest();
expect(await T.resolveLoader(undefined)).toBeUndefined();
expect(await T.resolveAction(undefined)).toBeUndefined();
});
it("should handle relative redirect responses (loader)", async () => {
let { queryRoute } = createStaticHandler([
{
path: "/",
children: [
{
path: "parent",
children: [
{
id: "child",
path: "child",
loader: () => redirect(".."),
},
],
},
],
},
]);
let response = await queryRoute(createRequest("/parent/child"), {
routeId: "child",
});
expect(response instanceof Response).toBe(true);
expect((response as Response).status).toBe(302);
expect((response as Response).headers.get("Location")).toBe("/parent");
});
it("should handle relative redirect responses (action)", async () => {
let { queryRoute } = createStaticHandler([
{
path: "/",
children: [
{
path: "parent",
children: [
{
id: "child",
path: "child",
action: () => redirect(".."),
},
],
},
],
},
]);
let response = await queryRoute(createSubmitRequest("/parent/child"), {
routeId: "child",
});
expect(response instanceof Response).toBe(true);
expect((response as Response).status).toBe(302);
expect((response as Response).headers.get("Location")).toBe("/parent");
});
it("should handle absolute redirect Responses", async () => {
for (let url of ABSOLUTE_URLS) {
let handler = createStaticHandler([
{
id: "root",
path: "/",
loader: () => redirect(url),
},
]);
let response = await handler.queryRoute(createRequest("/"), {
routeId: "root",
});
expect(response instanceof Response).toBe(true);
expect((response as Response).status).toBe(302);
expect((response as Response).headers.get("Location")).toBe(url);
}
});
it("should not unwrap responses returned from loaders", async () => {
let response = Response.json({ key: "value" });
let { queryRoute } = createStaticHandler([
{
id: "root",
path: "/",
loader: () => Promise.resolve(response),
},
]);
let request = createRequest("/");
let data = await queryRoute(request, { routeId: "root" });
expect(data instanceof Response).toBe(true);
expect(await data.json()).toEqual({ key: "value" });
});
it("should not unwrap responses returned from actions", async () => {
let response = Response.json({ key: "value" });
let { queryRoute } = createStaticHandler([
{
id: "root",
path: "/",
action: () => Promise.resolve(response),
},
]);
let request = createSubmitRequest("/");
let data = await queryRoute(request, { routeId: "root" });
expect(data instanceof Response).toBe(true);
expect(await data.json()).toEqual({ key: "value" });
});
it("should not unwrap responses thrown from loaders", async () => {
let response = Response.json({ key: "value" });
let { queryRoute } = createStaticHandler([
{
id: "root",
path: "/",
loader: () => Promise.reject(response),
},
]);
let request = createRequest("/");
let data;
try {
await queryRoute(request, { routeId: "root" });
} catch (e) {
data = e;
}
expect(data instanceof Response).toBe(true);
expect(await data.json()).toEqual({ key: "value" });
});
it("should not unwrap responses thrown from actions", async () => {
let response = Response.json({ key: "value" });
let { queryRoute } = createStaticHandler([
{
id: "root",
path: "/",
action: () => Promise.reject(response),
},
]);
let request = createSubmitRequest("/");
let data;
try {
await queryRoute(request, { routeId: "root" });
} catch (e) {
data = e;
}
expect(data instanceof Response).toBe(true);
expect(await data.json()).toEqual({ key: "value" });
});
it("should handle aborted load requests", async () => {
let dfd = createDeferred();
let controller = new AbortController();
let { queryRoute } = createStaticHandler([
{
id: "root",
path: "/path",
loader: () => dfd.promise,
},
]);
let request = createRequest("/path?key=value", {
signal: controller.signal,
});
let e;
try {
let statePromise = queryRoute(request, { routeId: "root" });
controller.abort();
// This should resolve even though we never resolved the loader
await statePromise;
} catch (_e) {
e = _e;
}
expect(e).toBeInstanceOf(DOMException);
expect(e.name).toBe("AbortError");
expect(e.message).toBe("This operation was aborted");
});
it("should handle aborted submit requests - custom reason", async () => {
let dfd = createDeferred();
let controller = new AbortController();
let { queryRoute } = createStaticHandler([
{
id: "root",
path: "/path",
action: () => dfd.promise,
},
]);
let request = createSubmitRequest("/path?key=value", {
signal: controller.signal,
});
let e;
try {
let statePromise = queryRoute(request, { routeId: "root" });
controller.abort();
// This should resolve even though we never resolved the loader
await statePromise;
} catch (_e) {
e = _e;
}
expect(e).toBeInstanceOf(DOMException);
expect(e.name).toBe("AbortError");
expect(e.message).toBe("This operation was aborted");
});
it("should handle aborted load requests - custom reason", async () => {
let dfd = createDeferred();
let controller = new AbortController();
let { queryRoute } = createStaticHandler([
{
id: "root",
path: "/path",
loader: () => dfd.promise,
},
]);
let request = createRequest("/path?key=value", {
signal: controller.signal,
});
let e;
try {
let statePromise = queryRoute(request, { routeId: "root" });
controller.abort(new Error("Oh no!"));
// This should resolve even though we never resolved the loader
await statePromise;
} catch (_e) {
e = _e;
}
expect(e).toBeInstanceOf(Error);
expect(e.message).toBe("Oh no!");
});
it("should assign signals to requests by default (per the spec)", async () => {
let { queryRoute } = createStaticHandler(SSR_ROUTES);
let request = createRequest("/", { signal: undefined });
let data = await queryRoute(request, { routeId: "index" });
expect(data).toBe("INDEX LOADER");
});
it("should support a requestContext passed to loaders and actions", async () => {
let requestContext = { sessionId: "12345" };
let childStub = jest.fn(() => "CHILD");
let actionStub = jest.fn(() => "CHILD ACTION");
let arg = (s) => s.mock.calls[0][0];
let { queryRoute } = createStaticHandler([
{
path: "/",
children: [
{
id: "child",
path: "child",
action: actionStub,
loader: childStub,
},
],
},
]);
await queryRoute(createRequest("/child"), {
routeId: "child",
requestContext,
});
expect(arg(childStub).context.sessionId).toBe("12345");
await queryRoute(createSubmitRequest("/child"), {
routeId: "child",
requestContext,
});
expect(arg(actionStub).context.sessionId).toBe("12345");
});
describe("Errors with Status Codes", () => {
/* eslint-disable jest/no-conditional-expect */
let { queryRoute } = createStaticHandler([
{
id: "root",
path: "/",
},
]);
it("should handle not found paths with a 404 Response", async () => {
try {
await queryRoute(createRequest("/junk"));
expect(false).toBe(true);
} catch (data) {
expect(isRouteErrorResponse(data)).toBe(true);
expect(data.status).toBe(404);
expect(data.error).toEqual(new Error('No route matches URL "/junk"'));
expect(data.internal).toBe(true);
}
try {
await queryRoute(createSubmitRequest("/junk"));
expect(false).toBe(true);
} catch (data) {
expect(isRouteErrorResponse(data)).toBe(true);
expect(data.status).toBe(404);
expect(data.error).toEqual(new Error('No route matches URL "/junk"'));
expect(data.internal).toBe(true);
}
});
it("should handle not found routeIds with a 403 Response", async () => {
try {
await queryRoute(createRequest("/"), { routeId: "junk" });
expect(false).toBe(true);
} catch (data) {
expect(isRouteErrorResponse(data)).toBe(true);
expect(data.status).toBe(403);
expect(data.error).toEqual(
new Error('Route "junk" does not match URL "/"'),
);
expect(data.internal).toBe(true);
}
try {
await queryRoute(createSubmitRequest("/"), { routeId: "junk" });
expect(false).toBe(true);
} catch (data) {
expect(isRouteErrorResponse(data)).toBe(true);
expect(data.status).toBe(403);
expect(data.error).toEqual(
new Error('Route "junk" does not match URL "/"'),
);
expect(data.internal).toBe(true);
}
});
it("should handle missing loaders with a 400 Response", async () => {
try {
await queryRoute(createRequest("/"), { routeId: "root" });
expect(false).toBe(true);
} catch (data) {
expect(isRouteErrorResponse(data)).toBe(true);
expect(data.status).toBe(400);
expect(data.error).toEqual(
new Error(
'You made a GET request to "/" but did not provide a `loader` ' +
'for route "root", so there is no way to handle the request.',
),
);
expect(data.internal).toBe(true);
}
});
it("should handle missing actions with a 405 Response", async () => {
try {
await queryRoute(createSubmitRequest("/"), { routeId: "root" });
expect(false).toBe(true);
} catch (data) {
expect(isRouteErrorResponse(data)).toBe(true);
expect(data.status).toBe(405);
expect(data.error).toEqual(
new Error(
'You made a POST request to "/" but did not provide an `action` ' +
'for route "root", so there is no way to handle the request.',
),
);
expect(data.internal).toBe(true);
}
});
it("should handle unsupported methods with a 405 Response", async () => {
try {
await queryRoute(createRequest("/", { method: "CHICKEN" }), {
routeId: "root",
});
expect(false).toBe(true);
} catch (data) {
expect(isRouteErrorResponse(data)).toBe(true);
expect(data.status).toBe(405);
expect(data.error).toEqual(
new Error('Invalid request method "CHICKEN"'),
);
expect(data.internal).toBe(true);
}
});
/* eslint-enable jest/no-conditional-expect */
});
describe("router dataStrategy", () => {
it("should apply a custom data strategy", async () => {
let { queryRoute } = createStaticHandler(SSR_ROUTES);
let data;
data = await queryRoute(createRequest("/custom"), {
dataStrategy: urlDataStrategy,
});
expect(data).toBeInstanceOf(URLSearchParams);
expect((data as URLSearchParams).get("foo")).toBe("bar");
});
});
});
}); | typescript | github | https://github.com/remix-run/react-router | packages/react-router/__tests__/router/ssr-test.ts |
import sys
import os
# Debug mode
DEBUG = False
# Enable debug mode if --debug
for argument in sys.argv:
if argument == '--debug':
DEBUG = True
# Host, Port
HOST = '127.0.0.1'
PORT = 8080
# Domain
SERVER_NAME = 'localhost:8080'
# Database
SQLALCHEMY_DATABASE_URI = 'mysql+pymysql://poss:poss@localhost/poss'
SQLALCHEMY_ECHO = DEBUG
DATABASE_CONNECT_OPTIONS = {}
# Login, Cookie and Session settings
CSRF_ENABLED = True
# Random strings for cookie generation, 40 chars should be enough,
# https://api.fnkr.net/random/?length=40&count=2
CSRF_SESSION_KEY = '--- CHANGE THIS TO SOME RANDOM VALUE ---'
SECRET_KEY = '--- CHANGE THIS TO SOME RANDOM VALUE ---'
SESSION_COOKIE_NAME = 'poss'
PERMANENT_SESSION_LIFETIME = 2678400
SESSION_COOKIE_SECURE = False
# URL scheme that should be used for URL generation
# if no URL scheme is available
PREFERRED_URL_SCHEME = 'http'
# Data storage, will be created if it does not exist
DATA_DIR = os.path.join(os.path.abspath(os.path.dirname(__file__)), 'data')
# Define the application directory
BASE_DIR = os.path.abspath(os.path.dirname(__file__))
# Config file version
CONFIG_VERSION = 1 | unknown | codeparrot/codeparrot-clean | ||
/*
Copyright 2014 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// +k8s:deepcopy-gen=package
// +groupName=
// Package core contains the latest (or "internal") version of the
// Kubernetes API objects. This is the API objects as represented in memory.
// The contract presented to clients is located in the versioned packages,
// which are sub-directories. The first one is "v1". Those packages
// describe how a particular version is serialized to storage/network.
package core | go | github | https://github.com/kubernetes/kubernetes | pkg/apis/core/doc.go |
from __future__ import print_function, division
from warnings import warn
from collections import namedtuple
from copy import deepcopy
from .hashable import Hashable
from .utils import flatten_2d_list
from nilm_metadata import get_appliance_types
ApplianceID = namedtuple('ApplianceID', ['type', 'instance'])
DEFAULT_ON_POWER_THRESHOLD = 10
class Appliance(Hashable):
"""Represents an appliance instance.
Attributes
----------
metadata : dict
See here metadata attributes:
http://nilm-metadata.readthedocs.org/en/latest/dataset_metadata.html#appliance
"""
#: Static (AKA class) variable. Maps from appliance_type (string) to a dict
#: describing metadata about each appliance type.
appliance_types = {}
def __init__(self, metadata=None):
self.metadata = {} if metadata is None else metadata
# Instantiate static appliance_types
if not Appliance.appliance_types:
Appliance.appliance_types = get_appliance_types()
# Check appliance type
if (self.identifier.type and
not Appliance.appliance_types.has_key(self.identifier.type)):
warn("'{}' is not a recognised appliance type."
.format(self.identifier.type), RuntimeWarning)
@property
def identifier(self):
"""Return ApplianceID"""
md = self.metadata
return ApplianceID(md.get('type'), md.get('instance'))
@property
def type(self):
"""Return deepcopy of dict describing appliance type."""
return deepcopy(Appliance.appliance_types[self.identifier.type])
@property
def n_meters(self):
"""Return number of meters (int) to which this appliance is connected"""
return len(self.metadata['meters'])
def on_power_threshold(self):
threshold_from_appliance_type = self.type.get('on_power_threshold',
DEFAULT_ON_POWER_THRESHOLD)
return self.metadata.get('on_power_threshold',
threshold_from_appliance_type)
def label(self, pretty=False):
"""Return string '(<type>, <identifier>)' e.g. '(fridge, 1)'
if `pretty=False` else if `pretty=True` then return a string like
'Fridge' or 'Fridge 2'. If type == 'unknown' then
appends `original_name` to end of label."""
if pretty:
label = str(self.identifier.type)
label = label.capitalize()
if self.identifier.instance > 1:
label += " {}".format(self.identifier.instance)
else:
label = str(tuple(self.identifier))
if self.identifier.type is 'unknown':
label += ', original name = {}'.format(
self.metadata.get('original_name'))
return label
def categories(self):
"""Return 1D list of category names (strings)."""
return flatten_2d_list(self.type.get('categories').values())
def matches(self, key):
"""
Parameters
----------
key : dict
Returns
-------
Bool
True if all key:value pairs in `key` match `appliance.metadata`
or `Appliance.appliance_types[appliance.metadata['type']]`.
Returns True if key is empty dict.
"""
if not key:
return True
if not isinstance(key, dict):
raise TypeError()
match = True
for k, v in key.iteritems():
if hasattr(self.identifier, k):
if getattr(self.identifier, k) != v:
match = False
elif self.metadata.has_key(k):
if self.metadata[k] != v:
match = False
elif k == 'category':
if v not in self.categories():
match = False
elif self.type.has_key(k):
metadata_value = self.type[k]
if (isinstance(metadata_value, list) and
not isinstance(v, list)):
# for example, 'control' is a list in metadata
if v not in metadata_value:
match = False
elif metadata_value != v:
match = False
else:
raise KeyError("'{}' not a valid key.".format(k))
return match | unknown | codeparrot/codeparrot-clean | ||
# Copyright (C) University of Tennessee Health Science Center, Memphis, TN.
#
# This program is free software: you can redistribute it and/or modify it
# under the terms of the GNU Affero General Public License
# as published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU Affero General Public License for more details.
#
# This program is available from Source Forge: at GeneNetwork Project
# (sourceforge.net/projects/genenetwork/).
#
# Contact Drs. Robert W. Williams and Xiaodong Zhou (2010)
# at rwilliams@uthsc.edu and xzhou15@uthsc.edu
#
#
#
# This module is used by GeneNetwork project (www.genenetwork.org)
#
# Created by GeneNetwork Core Team 2010/08/10
#
# Last updated by GeneNetwork Core Team 2010/10/20
#!/usr/bin/env python
##Copyright (c) 2002, Fedor Baart & Hans de Wit (Stichting Farmaceutische Kengetallen)
##All rights reserved.
##
##Redistribution and use in source and binary forms, with or without modification,
##are permitted provided that the following conditions are met:
##
##Redistributions of source code must retain the above copyright notice, this
##list of conditions and the following disclaimer.
##
##Redistributions in binary form must reproduce the above copyright notice,
##this list of conditions and the following disclaimer in the documentation and/or
##other materials provided with the distribution.
##
##Neither the name of the Stichting Farmaceutische Kengetallen nor the names of
##its contributors may be used to endorse or promote products derived from this
##software without specific prior written permission.
##
##THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
##AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
##IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
##DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE
##FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
##DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
##SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
##CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
##OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
##OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
##Thanks to Gerald Rosennfellner for his help and useful comments.
__doc__="""Use SVGdraw to generate your SVGdrawings.
SVGdraw uses an object model drawing and a method toXML to create SVG graphics
by using easy to use classes and methods usualy you start by creating a drawing eg
d=drawing()
#then you create a SVG root element
s=svg()
#then you add some elements eg a circle and add it to the svg root element
c=circle()
#you can supply attributes by using named arguments.
c=circle(fill='red',stroke='blue')
#or by updating the attributes attribute:
c.attributes['stroke-width']=1
s.addElement(c)
#then you add the svg root element to the drawing
d.setSVG(s)
#and finaly you xmlify the drawing
d.toXml()
this results in the svg source of the drawing, which consists of a circle
on a white background. Its as easy as that;)
This module was created using the SVG specification of www.w3c.org and the
O'Reilly (www.oreilly.com) python books as information sources. A svg viewer
is available from www.adobe.com"""
__version__="1.0"
# there are two possibilities to generate svg:
# via a dom implementation and directly using <element>text</element> strings
# the latter is way faster (and shorter in coding)
# the former is only used in debugging svg programs
# maybe it will be removed alltogether after a while
# with the following variable you indicate whether to use the dom implementation
# Note that PyXML is required for using the dom implementation.
# It is also possible to use the standard minidom. But I didn't try that one.
# Anyway the text based approach is about 60 times faster than using the full dom implementation.
use_dom_implementation=0
import exceptions
if use_dom_implementation<>0:
try:
from xml.dom import implementation
from xml.dom.ext import PrettyPrint
except:
raise exceptions.ImportError, "PyXML is required for using the dom implementation"
#The implementation is used for the creating the XML document.
#The prettyprint module is used for converting the xml document object to a xml file
import sys
assert sys.version_info[0]>=2
if sys.version_info[1]<2:
True=1
False=0
file=open
sys.setrecursionlimit=50
#The recursion limit is set conservative so mistakes like s=svg() s.addElement(s)
#won't eat up too much processor time.
#the following code is pasted form xml.sax.saxutils
#it makes it possible to run the code without the xml sax package installed
#To make it possible to have <rubbish> in your text elements, it is necessary to escape the texts
def _escape(data, entities={}):
"""Escape &, <, and > in a string of data.
You can escape other strings of data by passing a dictionary as
the optional entities parameter. The keys and values must all be
strings; each key will be replaced with its corresponding value.
"""
#data = data.replace("&", "&")
data = data.replace("<", "<")
data = data.replace(">", ">")
for chars, entity in entities.items():
data = data.replace(chars, entity)
return data
def _quoteattr(data, entities={}):
"""Escape and quote an attribute value.
Escape &, <, and > in a string of data, then quote it for use as
an attribute value. The \" character will be escaped as well, if
necessary.
You can escape other strings of data by passing a dictionary as
the optional entities parameter. The keys and values must all be
strings; each key will be replaced with its corresponding value.
"""
data = _escape(data, entities)
if '"' in data:
if "'" in data:
data = '"%s"' % data.replace('"', """)
else:
data = "'%s'" % data
else:
data = '"%s"' % data
return data
def _xypointlist(a):
"""formats a list of xy pairs"""
s=''
for e in a: #this could be done more elegant
s+=str(e)[1:-1] +' '
return s
def _viewboxlist(a):
"""formats a tuple"""
s=''
for e in a:
s+=str(e)+' '
return s
def _pointlist(a):
"""formats a list of numbers"""
return str(a)[1:-1]
class pathdata:
"""class used to create a pathdata object which can be used for a path.
although most methods are pretty straightforward it might be useful to look at the SVG specification."""
#I didn't test the methods below.
def __init__(self,x=None,y=None):
self.path=[]
if x is not None and y is not None:
self.path.append('M '+str(x)+' '+str(y))
def closepath(self):
"""ends the path"""
self.path.append('z')
def move(self,x,y):
"""move to absolute"""
self.path.append('M '+str(x)+' '+str(y))
def relmove(self,x,y):
"""move to relative"""
self.path.append('m '+str(x)+' '+str(y))
def line(self,x,y):
"""line to absolute"""
self.path.append('L '+str(x)+' '+str(y))
def relline(self,x,y):
"""line to relative"""
self.path.append('l '+str(x)+' '+str(y))
def hline(self,x):
"""horizontal line to absolute"""
self.path.append('H'+str(x))
def relhline(self,x):
"""horizontal line to relative"""
self.path.append('h'+str(x))
def vline(self,y):
"""verical line to absolute"""
self.path.append('V'+str(y))
def relvline(self,y):
"""vertical line to relative"""
self.path.append('v'+str(y))
def bezier(self,x1,y1,x2,y2,x,y):
"""bezier with xy1 and xy2 to xy absolut"""
self.path.append('C'+str(x1)+','+str(y1)+' '+str(x2)+','+str(y2)+' '+str(x)+','+str(y))
def relbezier(self,x1,y1,x2,y2,x,y):
"""bezier with xy1 and xy2 to xy relative"""
self.path.append('c'+str(x1)+','+str(y1)+' '+str(x2)+','+str(y2)+' '+str(x)+','+str(y))
def smbezier(self,x2,y2,x,y):
"""smooth bezier with xy2 to xy absolut"""
self.path.append('S'+str(x2)+','+str(y2)+' '+str(x)+','+str(y))
def relsmbezier(self,x2,y2,x,y):
"""smooth bezier with xy2 to xy relative"""
self.path.append('s'+str(x2)+','+str(y2)+' '+str(x)+','+str(y))
def qbezier(self,x1,y1,x,y):
"""quadratic bezier with xy1 to xy absolut"""
self.path.append('Q'+str(x1)+','+str(y1)+' '+str(x)+','+str(y))
def relqbezier(self,x1,y1,x,y):
"""quadratic bezier with xy1 to xy relative"""
self.path.append('q'+str(x1)+','+str(y1)+' '+str(x)+','+str(y))
def smqbezier(self,x,y):
"""smooth quadratic bezier to xy absolut"""
self.path.append('T'+str(x)+','+str(y))
def relsmqbezier(self,x,y):
"""smooth quadratic bezier to xy relative"""
self.path.append('t'+str(x)+','+str(y))
def ellarc(self,rx,ry,xrot,laf,sf,x,y):
"""elliptival arc with rx and ry rotating with xrot using large-arc-flag and sweep-flag to xy absolut"""
self.path.append('A'+str(rx)+','+str(ry)+' '+str(xrot)+' '+str(laf)+' '+str(sf)+' '+str(x)+' '+str(y))
def relellarc(self,rx,ry,xrot,laf,sf,x,y):
"""elliptival arc with rx and ry rotating with xrot using large-arc-flag and sweep-flag to xy relative"""
self.path.append('a'+str(rx)+','+str(ry)+' '+str(xrot)+' '+str(laf)+' '+str(sf)+' '+str(x)+' '+str(y))
def __repr__(self):
return ' '.join(self.path)
class SVGelement:
"""SVGelement(type,attributes,elements,text,namespace,**args)
Creates a arbitrary svg element and is intended to be subclassed not used on its own.
This element is the base of every svg element it defines a class which resembles
a xml-element. The main advantage of this kind of implementation is that you don't
have to create a toXML method for every different graph object. Every element
consists of a type, attribute, optional subelements, optional text and an optional
namespace. Note the elements==None, if elements = None:self.elements=[] construction.
This is done because if you default to elements=[] every object has a reference
to the same empty list."""
def __init__(self,type='',attributes=None,elements=None,text='',namespace='',cdata=None, **args):
self.type=type
if attributes==None:
self.attributes={}
else:
self.attributes=attributes
if elements==None:
self.elements=[]
else:
self.elements=elements
self.text=text
self.namespace=namespace
self.cdata=cdata
for arg in args.keys():
arg2 = arg.replace("__", ":")
arg2 = arg2.replace("_", "-")
self.attributes[arg2]=args[arg]
def addElement(self,SVGelement):
"""adds an element to a SVGelement
SVGelement.addElement(SVGelement)
"""
self.elements.append(SVGelement)
def toXml(self,level,f):
f.write('\t'*level)
f.write('<'+self.type)
for attkey in self.attributes.keys():
f.write(' '+_escape(str(attkey))+'='+_quoteattr(str(self.attributes[attkey])))
if self.namespace:
f.write(' xmlns="'+ _escape(str(self.namespace))+'" xmlns:xlink="http://www.w3.org/1999/xlink"')
if self.elements or self.text or self.cdata:
f.write('>')
if self.elements:
f.write('\n')
for element in self.elements:
element.toXml(level+1,f)
if self.cdata:
f.write('\n'+'\t'*(level+1)+'<![CDATA[')
for line in self.cdata.splitlines():
f.write('\n'+'\t'*(level+2)+line)
f.write('\n'+'\t'*(level+1)+']]>\n')
if self.text:
if type(self.text)==type(''): #If the text is only text
f.write(_escape(str(self.text)))
else: #If the text is a spannedtext class
f.write(str(self.text))
if self.elements:
f.write('\t'*level+'</'+self.type+'>\n')
elif self.text:
f.write('</'+self.type+'>\n')
elif self.cdata:
f.write('\t'*level+'</'+self.type+'>\n')
else:
f.write('/>\n')
class tspan(SVGelement):
"""ts=tspan(text='',**args)
a tspan element can be used for applying formatting to a textsection
usage:
ts=tspan('this text is bold')
ts.attributes['font-weight']='bold'
st=spannedtext()
st.addtspan(ts)
t=text(3,5,st)
"""
def __init__(self,text=None,**args):
SVGelement.__init__(self,'tspan',**args)
if self.text<>None:
self.text=text
def __repr__(self):
s="<tspan"
for key,value in self.attributes.items():
s+= ' %s="%s"' % (key,value)
s+='>'
s+=self.text
s+='</tspan>'
return s
class tref(SVGelement):
"""tr=tref(link='',**args)
a tref element can be used for referencing text by a link to its id.
usage:
tr=tref('#linktotext')
st=spannedtext()
st.addtref(tr)
t=text(3,5,st)
"""
def __init__(self,link,**args):
SVGelement.__init__(self,'tref',{'xlink:href':link},**args)
def __repr__(self):
s="<tref"
for key,value in self.attributes.items():
s+= ' %s="%s"' % (key,value)
s+='/>'
return s
class spannedtext:
"""st=spannedtext(textlist=[])
a spannedtext can be used for text which consists of text, tspan's and tref's
You can use it to add to a text element or path element. Don't add it directly
to a svg or a group element.
usage:
ts=tspan('this text is bold')
ts.attributes['font-weight']='bold'
tr=tref('#linktotext')
tr.attributes['fill']='red'
st=spannedtext()
st.addtspan(ts)
st.addtref(tr)
st.addtext('This text is not bold')
t=text(3,5,st)
"""
def __init__(self,textlist=None):
if textlist==None:
self.textlist=[]
else:
self.textlist=textlist
def addtext(self,text=''):
self.textlist.append(text)
def addtspan(self,tspan):
self.textlist.append(tspan)
def addtref(self,tref):
self.textlist.append(tref)
def __repr__(self):
s=""
for element in self.textlist:
s+=str(element)
return s
class rect(SVGelement):
"""r=rect(width,height,x,y,fill,stroke,stroke_width,**args)
a rectangle is defined by a width and height and a xy pair
"""
def __init__(self,x=None,y=None,width=None,height=None,fill=None,stroke=None,stroke_width=None,**args):
if width==None or height==None:
if width<>None:
raise ValueError, 'height is required'
if height<>None:
raise ValueError, 'width is required'
else:
raise ValueError, 'both height and width are required'
SVGelement.__init__(self,'rect',{'width':width,'height':height},**args)
if x<>None:
self.attributes['x']=x
if y<>None:
self.attributes['y']=y
if fill<>None:
self.attributes['fill']=fill
if stroke<>None:
self.attributes['stroke']=stroke
if stroke_width<>None:
self.attributes['stroke-width']=stroke_width
class ellipse(SVGelement):
"""e=ellipse(rx,ry,x,y,fill,stroke,stroke_width,**args)
an ellipse is defined as a center and a x and y radius.
"""
def __init__(self,cx=None,cy=None,rx=None,ry=None,fill=None,stroke=None,stroke_width=None,**args):
if rx==None or ry== None:
if rx<>None:
raise ValueError, 'rx is required'
if ry<>None:
raise ValueError, 'ry is required'
else:
raise ValueError, 'both rx and ry are required'
SVGelement.__init__(self,'ellipse',{'rx':rx,'ry':ry},**args)
if cx<>None:
self.attributes['cx']=cx
if cy<>None:
self.attributes['cy']=cy
if fill<>None:
self.attributes['fill']=fill
if stroke<>None:
self.attributes['stroke']=stroke
if stroke_width<>None:
self.attributes['stroke-width']=stroke_width
class circle(SVGelement):
"""c=circle(x,y,radius,fill,stroke,stroke_width,**args)
The circle creates an element using a x, y and radius values eg
"""
def __init__(self,cx=None,cy=None,r=None,fill=None,stroke=None,stroke_width=None,**args):
if r==None:
raise ValueError, 'r is required'
SVGelement.__init__(self,'circle',{'r':r},**args)
if cx<>None:
self.attributes['cx']=cx
if cy<>None:
self.attributes['cy']=cy
if fill<>None:
self.attributes['fill']=fill
if stroke<>None:
self.attributes['stroke']=stroke
if stroke_width<>None:
self.attributes['stroke-width']=stroke_width
class point(circle):
"""p=point(x,y,color)
A point is defined as a circle with a size 1 radius. It may be more efficient to use a
very small rectangle if you use many points because a circle is difficult to render.
"""
def __init__(self,x,y,fill='black',**args):
circle.__init__(self,x,y,1,fill,**args)
class line(SVGelement):
"""l=line(x1,y1,x2,y2,stroke,stroke_width,**args)
A line is defined by a begin x,y pair and an end x,y pair
"""
def __init__(self,x1=None,y1=None,x2=None,y2=None,stroke=None,stroke_width=None,**args):
SVGelement.__init__(self,'line',**args)
if x1<>None:
self.attributes['x1']=x1
if y1<>None:
self.attributes['y1']=y1
if x2<>None:
self.attributes['x2']=x2
if y2<>None:
self.attributes['y2']=y2
if stroke_width<>None:
self.attributes['stroke-width']=stroke_width
if stroke<>None:
self.attributes['stroke']=stroke
class polyline(SVGelement):
"""pl=polyline([[x1,y1],[x2,y2],...],fill,stroke,stroke_width,**args)
a polyline is defined by a list of xy pairs
"""
def __init__(self,points,fill=None,stroke=None,stroke_width=None,**args):
SVGelement.__init__(self,'polyline',{'points':_xypointlist(points)},**args)
if fill<>None:
self.attributes['fill']=fill
if stroke_width<>None:
self.attributes['stroke-width']=stroke_width
if stroke<>None:
self.attributes['stroke']=stroke
class polygon(SVGelement):
"""pl=polyline([[x1,y1],[x2,y2],...],fill,stroke,stroke_width,**args)
a polygon is defined by a list of xy pairs
"""
def __init__(self,points,fill=None,stroke=None,stroke_width=None,**args):
SVGelement.__init__(self,'polygon',{'points':_xypointlist(points)},**args)
if fill<>None:
self.attributes['fill']=fill
if stroke_width<>None:
self.attributes['stroke-width']=stroke_width
if stroke<>None:
self.attributes['stroke']=stroke
class path(SVGelement):
"""p=path(path,fill,stroke,stroke_width,**args)
a path is defined by a path object and optional width, stroke and fillcolor
"""
def __init__(self,pathdata,fill=None,stroke=None,stroke_width=None,id=None,**args):
SVGelement.__init__(self,'path',{'d':str(pathdata)},**args)
if stroke<>None:
self.attributes['stroke']=stroke
if fill<>None:
self.attributes['fill']=fill
if stroke_width<>None:
self.attributes['stroke-width']=stroke_width
if id<>None:
self.attributes['id']=id
class text(SVGelement):
"""t=text(x,y,text,font_size,font_family,**args)
a text element can bge used for displaying text on the screen
"""
def __init__(self,x=None,y=None,text=None,font_size=None,font_family=None,text_anchor=None,**args):
SVGelement.__init__(self,'text',**args)
if x<>None:
self.attributes['x']=x
if y<>None:
self.attributes['y']=y
if font_size<>None:
self.attributes['font-size']=font_size
if font_family<>None:
self.attributes['font-family']=font_family
if text<>None:
self.text=text
if text_anchor<>None:
self.attributes['text-anchor']=text_anchor
class textpath(SVGelement):
"""tp=textpath(text,link,**args)
a textpath places a text on a path which is referenced by a link.
"""
def __init__(self,link,text=None,**args):
SVGelement.__init__(self,'textPath',{'xlink:href':link},**args)
if text<>None:
self.text=text
class pattern(SVGelement):
"""p=pattern(x,y,width,height,patternUnits,**args)
A pattern is used to fill or stroke an object using a pre-defined
graphic object which can be replicated ("tiled") at fixed intervals
in x and y to cover the areas to be painted.
"""
def __init__(self,x=None,y=None,width=None,height=None,patternUnits=None,**args):
SVGelement.__init__(self,'pattern',**args)
if x<>None:
self.attributes['x']=x
if y<>None:
self.attributes['y']=y
if width<>None:
self.attributes['width']=width
if height<>None:
self.attributes['height']=height
if patternUnits<>None:
self.attributes['patternUnits']=patternUnits
class title(SVGelement):
"""t=title(text,**args)
a title is a text element. The text is displayed in the title bar
add at least one to the root svg element
"""
def __init__(self,text=None,**args):
SVGelement.__init__(self,'title',**args)
if text<>None:
self.text=text
class description(SVGelement):
"""d=description(text,**args)
a description can be added to any element and is used for a tooltip
Add this element before adding other elements.
"""
def __init__(self,text=None,**args):
SVGelement.__init__(self,'desc',**args)
if text<>None:
self.text=text
class lineargradient(SVGelement):
"""lg=lineargradient(x1,y1,x2,y2,id,**args)
defines a lineargradient using two xy pairs.
stop elements van be added to define the gradient colors.
"""
def __init__(self,x1=None,y1=None,x2=None,y2=None,id=None,**args):
SVGelement.__init__(self,'linearGradient',**args)
if x1<>None:
self.attributes['x1']=x1
if y1<>None:
self.attributes['y1']=y1
if x2<>None:
self.attributes['x2']=x2
if y2<>None:
self.attributes['y2']=y2
if id<>None:
self.attributes['id']=id
class radialgradient(SVGelement):
"""rg=radialgradient(cx,cy,r,fx,fy,id,**args)
defines a radial gradient using a outer circle which are defined by a cx,cy and r and by using a focalpoint.
stop elements van be added to define the gradient colors.
"""
def __init__(self,cx=None,cy=None,r=None,fx=None,fy=None,id=None,**args):
SVGelement.__init__(self,'radialGradient',**args)
if cx<>None:
self.attributes['cx']=cx
if cy<>None:
self.attributes['cy']=cy
if r<>None:
self.attributes['r']=r
if fx<>None:
self.attributes['fx']=fx
if fy<>None:
self.attributes['fy']=fy
if id<>None:
self.attributes['id']=id
class stop(SVGelement):
"""st=stop(offset,stop_color,**args)
Puts a stop color at the specified radius
"""
def __init__(self,offset,stop_color=None,**args):
SVGelement.__init__(self,'stop',{'offset':offset},**args)
if stop_color<>None:
self.attributes['stop-color']=stop_color
class style(SVGelement):
"""st=style(type,cdata=None,**args)
Add a CDATA element to this element for defing in line stylesheets etc..
"""
def __init__(self,type,cdata=None,**args):
SVGelement.__init__(self,'style',{'type':type},cdata=cdata, **args)
class image(SVGelement):
"""im=image(url,width,height,x,y,**args)
adds an image to the drawing. Supported formats are .png, .jpg and .svg.
"""
def __init__(self,url,x=None,y=None,width=None,height=None,**args):
if width==None or height==None:
if width<>None:
raise ValueError, 'height is required'
if height<>None:
raise ValueError, 'width is required'
else:
raise ValueError, 'both height and width are required'
SVGelement.__init__(self,'image',{'xlink:href':url,'width':width,'height':height},**args)
if x<>None:
self.attributes['x']=x
if y<>None:
self.attributes['y']=y
class cursor(SVGelement):
"""c=cursor(url,**args)
defines a custom cursor for a element or a drawing
"""
def __init__(self,url,**args):
SVGelement.__init__(self,'cursor',{'xlink:href':url},**args)
class marker(SVGelement):
"""m=marker(id,viewbox,refX,refY,markerWidth,markerHeight,**args)
defines a marker which can be used as an endpoint for a line or other pathtypes
add an element to it which should be used as a marker.
"""
def __init__(self,id=None,viewBox=None,refx=None,refy=None,markerWidth=None,markerHeight=None,**args):
SVGelement.__init__(self,'marker',**args)
if id<>None:
self.attributes['id']=id
if viewBox<>None:
self.attributes['viewBox']=_viewboxlist(viewBox)
if refx<>None:
self.attributes['refX']=refx
if refy<>None:
self.attributes['refY']=refy
if markerWidth<>None:
self.attributes['markerWidth']=markerWidth
if markerHeight<>None:
self.attributes['markerHeight']=markerHeight
class group(SVGelement):
"""g=group(id,**args)
a group is defined by an id and is used to contain elements
g.addElement(SVGelement)
"""
def __init__(self,id=None,**args):
SVGelement.__init__(self,'g',**args)
if id<>None:
self.attributes['id']=id
class symbol(SVGelement):
"""sy=symbol(id,viewbox,**args)
defines a symbol which can be used on different places in your graph using
the use element. A symbol is not rendered but you can use 'use' elements to
display it by referencing its id.
sy.addElement(SVGelement)
"""
def __init__(self,id=None,viewBox=None,**args):
SVGelement.__init__(self,'symbol',**args)
if id<>None:
self.attributes['id']=id
if viewBox<>None:
self.attributes['viewBox']=_viewboxlist(viewBox)
class defs(SVGelement):
"""d=defs(**args)
container for defining elements
"""
def __init__(self,**args):
SVGelement.__init__(self,'defs',**args)
class switch(SVGelement):
"""sw=switch(**args)
Elements added to a switch element which are "switched" by the attributes
requiredFeatures, requiredExtensions and systemLanguage.
Refer to the SVG specification for details.
"""
def __init__(self,**args):
SVGelement.__init__(self,'switch',**args)
class use(SVGelement):
"""u=use(link,x,y,width,height,**args)
references a symbol by linking to its id and its position, height and width
"""
def __init__(self,link,x=None,y=None,width=None,height=None,**args):
SVGelement.__init__(self,'use',{'xlink:href':link},**args)
if x<>None:
self.attributes['x']=x
if y<>None:
self.attributes['y']=y
if width<>None:
self.attributes['width']=width
if height<>None:
self.attributes['height']=height
class link(SVGelement):
"""a=link(url,**args)
a link is defined by a hyperlink. add elements which have to be linked
a.addElement(SVGelement)
"""
def __init__(self,link='',**args):
SVGelement.__init__(self,'a',{'xlink:href':link},**args)
class view(SVGelement):
"""v=view(id,**args)
a view can be used to create a view with different attributes"""
def __init__(self,id=None,**args):
SVGelement.__init__(self,'view',**args)
if id<>None:
self.attributes['id']=id
class script(SVGelement):
"""sc=script(type,type,cdata,**args)
adds a script element which contains CDATA to the SVG drawing
"""
def __init__(self,type,cdata=None,**args):
SVGelement.__init__(self,'script',{'type':type},cdata=cdata,**args)
class animate(SVGelement):
"""an=animate(attribute,from,to,during,**args)
animates an attribute.
"""
def __init__(self,attribute,fr=None,to=None,dur=None,**args):
SVGelement.__init__(self,'animate',{'attributeName':attribute},**args)
if fr<>None:
self.attributes['from']=fr
if to<>None:
self.attributes['to']=to
if dur<>None:
self.attributes['dur']=dur
class animateMotion(SVGelement):
"""an=animateMotion(pathdata,dur,**args)
animates a SVGelement over the given path in dur seconds
"""
def __init__(self,pathdata,dur,**args):
SVGelement.__init__(self,'animateMotion',**args)
if pathdata<>None:
self.attributes['path']=str(pathdata)
if dur<>None:
self.attributes['dur']=dur
class animateTransform(SVGelement):
"""antr=animateTransform(type,from,to,dur,**args)
transform an element from and to a value.
"""
def __init__(self,type=None,fr=None,to=None,dur=None,**args):
SVGelement.__init__(self,'animateTransform',{'attributeName':'transform'},**args)
#As far as I know the attributeName is always transform
if type<>None:
self.attributes['type']=type
if fr<>None:
self.attributes['from']=fr
if to<>None:
self.attributes['to']=to
if dur<>None:
self.attributes['dur']=dur
class animateColor(SVGelement):
"""ac=animateColor(attribute,type,from,to,dur,**args)
Animates the color of a element
"""
def __init__(self,attribute,type=None,fr=None,to=None,dur=None,**args):
SVGelement.__init__(self,'animateColor',{'attributeName':attribute},**args)
if type<>None:
self.attributes['type']=type
if fr<>None:
self.attributes['from']=fr
if to<>None:
self.attributes['to']=to
if dur<>None:
self.attributes['dur']=dur
class set(SVGelement):
"""st=set(attribute,to,during,**args)
sets an attribute to a value for a
"""
def __init__(self,attribute,to=None,dur=None,**args):
SVGelement.__init__(self,'set',{'attributeName':attribute},**args)
if to<>None:
self.attributes['to']=to
if dur<>None:
self.attributes['dur']=dur
class svg(SVGelement):
"""s=svg(viewbox,width,height,**args)
a svg or element is the root of a drawing add all elements to a svg element.
You can have different svg elements in one svg file
s.addElement(SVGelement)
eg
d=drawing()
s=svg((0,0,100,100),'100%','100%')
c=circle(50,50,20)
s.addElement(c)
d.setSVG(s)
d.toXml()
"""
def __init__(self,viewBox=None, width=None, height=None,**args):
SVGelement.__init__(self,'svg',**args)
if viewBox<>None:
self.attributes['viewBox']=_viewboxlist(viewBox)
if width<>None:
self.attributes['width']=width
if height<>None:
self.attributes['height']=height
self.namespace="http://www.w3.org/2000/svg"
class drawing:
"""d=drawing()
this is the actual SVG document. It needs a svg element as a root.
Use the addSVG method to set the svg to the root. Use the toXml method to write the SVG
source to the screen or to a file
d=drawing()
d.addSVG(svg)
d.toXml(optionalfilename)
"""
def __init__(self, entity={}):
self.svg=None
self.entity = entity
def setSVG(self,svg):
self.svg=svg
#Voeg een element toe aan de grafiek toe.
if use_dom_implementation==0:
def toXml(self, filename='',compress=False):
import cStringIO
xml=cStringIO.StringIO()
xml.write("<?xml version='1.0' encoding='UTF-8'?>\n")
xml.write("<!DOCTYPE svg PUBLIC \"-//W3C//DTD SVG 1.0//EN\" \"http://www.w3.org/TR/2001/REC-SVG-20010904/DTD/svg10.dtd\"")
if self.entity:
xml.write(" [\n")
for item in self.entity.keys():
xml.write("<!ENTITY %s \"%s\">\n" % (item, self.entity[item]))
xml.write("]")
xml.write(">\n")
self.svg.toXml(0,xml)
if not filename:
if compress:
import gzip
f=cStringIO.StringIO()
zf=gzip.GzipFile(fileobj=f,mode='wb')
zf.write(xml.getvalue())
zf.close()
f.seek(0)
return f.read()
else:
return xml.getvalue()
else:
if filename[-4:]=='svgz':
import gzip
f=gzip.GzipFile(filename=filename,mode="wb", compresslevel=9)
f.write(xml.getvalue())
f.close()
else:
f=file(filename,'w')
f.write(xml.getvalue())
f.close()
else:
def toXml(self,filename='',compress=False):
"""drawing.toXml() ---->to the screen
drawing.toXml(filename)---->to the file
writes a svg drawing to the screen or to a file
compresses if filename ends with svgz or if compress is true
"""
doctype = implementation.createDocumentType('svg',"-//W3C//DTD SVG 1.0//EN""",'http://www.w3.org/TR/2001/REC-SVG-20010904/DTD/svg10.dtd ')
global root
#root is defined global so it can be used by the appender. Its also possible to use it as an arugument but
#that is a bit messy.
root=implementation.createDocument(None,None,doctype)
#Create the xml document.
global appender
def appender(element,elementroot):
"""This recursive function appends elements to an element and sets the attributes
and type. It stops when alle elements have been appended"""
if element.namespace:
e=root.createElementNS(element.namespace,element.type)
else:
e=root.createElement(element.type)
if element.text:
textnode=root.createTextNode(element.text)
e.appendChild(textnode)
for attribute in element.attributes.keys(): #in element.attributes is supported from python 2.2
e.setAttribute(attribute,str(element.attributes[attribute]))
if element.elements:
for el in element.elements:
e=appender(el,e)
elementroot.appendChild(e)
return elementroot
root=appender(self.svg,root)
if not filename:
import cStringIO
xml=cStringIO.StringIO()
PrettyPrint(root,xml)
if compress:
import gzip
f=cStringIO.StringIO()
zf=gzip.GzipFile(fileobj=f,mode='wb')
zf.write(xml.getvalue())
zf.close()
f.seek(0)
return f.read()
else:
return xml.getvalue()
else:
try:
if filename[-4:]=='svgz':
import gzip
import cStringIO
xml=cStringIO.StringIO()
PrettyPrint(root,xml)
f=gzip.GzipFile(filename=filename,mode='wb',compresslevel=9)
f.write(xml.getvalue())
f.close()
else:
f=open(filename,'w')
PrettyPrint(root,f)
f.close()
except:
print "Cannot write SVG file: " + filename
def validate(self):
try:
import xml.parsers.xmlproc.xmlval
except:
raise exceptions.ImportError,'PyXml is required for validating SVG'
svg=self.toXml()
xv=xml.parsers.xmlproc.xmlval.XMLValidator()
try:
xv.feed(svg)
except:
raise "SVG is not well formed, see messages above"
else:
print "SVG well formed"
if __name__=='__main__':
d=drawing()
s=svg((0,0,100,100))
r=rect(-100,-100,300,300,'cyan')
s.addElement(r)
t=title('SVGdraw Demo')
s.addElement(t)
g=group('animations')
e=ellipse(0,0,5,2)
g.addElement(e)
c=circle(0,0,1,'red')
g.addElement(c)
pd=pathdata(0,-10)
for i in range(6):
pd.relsmbezier(10,5,0,10)
pd.relsmbezier(-10,5,0,10)
an=animateMotion(pd,10)
an.attributes['rotate']='auto-reverse'
an.attributes['repeatCount']="indefinite"
g.addElement(an)
s.addElement(g)
for i in range(20,120,20):
u=use('#animations',i,0)
s.addElement(u)
for i in range(0,120,20):
for j in range(5,105,10):
c=circle(i,j,1,'red','black',.5)
s.addElement(c)
d.setSVG(s)
print d.toXml() | unknown | codeparrot/codeparrot-clean | ||
# frozen_string_literal: true
module Jekyll
module Utils
# Based on the pattern and code from
# https://emptysqua.re/blog/an-event-synchronization-primitive-for-ruby/
class ThreadEvent
attr_reader :flag
def initialize
@lock = Mutex.new
@cond = ConditionVariable.new
@flag = false
end
def set
@lock.synchronize do
yield if block_given?
@flag = true
@cond.broadcast
end
end
def wait
@lock.synchronize do
@cond.wait(@lock) unless @flag
end
end
end
end
end | ruby | github | https://github.com/jekyll/jekyll | lib/jekyll/utils/thread_event.rb |
'''
Update the transformation files of active transformations given an InputDataQuery fetched from the Transformation Service.
Possibility to speedup the query time by only fetching files that were added since the last iteration.
Use the CS option RefreshOnly (False by default) and set the DateKey (empty by default) to the meta data
key set in the DIRAC FileCatalog.
'''
import time
import datetime
from DIRAC import S_OK, gLogger
from DIRAC.FrameworkSystem.Client.MonitoringClient import gMonitor
from DIRAC.Core.Base.AgentModule import AgentModule
from DIRAC.TransformationSystem.Client.TransformationClient import TransformationClient
from DIRAC.Resources.Catalog.FileCatalogClient import FileCatalogClient
from DIRAC.ConfigurationSystem.Client.Helpers.Operations import Operations
__RCSID__ = "$Id$"
AGENT_NAME = 'Transformation/InputDataAgent'
class InputDataAgent( AgentModule ):
def __init__( self, *args, **kwargs ):
''' c'tor
'''
AgentModule.__init__( self, *args, **kwargs )
self.fileLog = {}
self.timeLog = {}
self.fullTimeLog = {}
self.pollingTime = self.am_getOption( 'PollingTime', 120 )
self.fullUpdatePeriod = self.am_getOption( 'FullUpdatePeriod', 86400 )
self.refreshonly = self.am_getOption( 'RefreshOnly', False )
self.dateKey = self.am_getOption( 'DateKey', None )
self.transClient = TransformationClient()
self.metadataClient = FileCatalogClient()
self.transformationTypes = None
#############################################################################
def initialize( self ):
''' Make the necessary initializations
'''
gMonitor.registerActivity( "Iteration", "Agent Loops", AGENT_NAME, "Loops/min", gMonitor.OP_SUM )
agentTSTypes = self.am_getOption( 'TransformationTypes', [] )
if agentTSTypes:
self.transformationTypes = sorted( agentTSTypes )
else:
dataProc = Operations().getValue( 'Transformations/DataProcessing', ['MCSimulation', 'Merge'] )
dataManip = Operations().getValue( 'Transformations/DataManipulation', ['Replication', 'Removal'] )
self.transformationTypes = sorted( dataProc + dataManip )
extendables = Operations().getValue( 'Transformations/ExtendableTransfTypes', [] )
if extendables:
for extendable in extendables:
if extendable in self.transformationTypes:
self.transformationTypes.remove( extendable )
# This is because the Extendables do not use this Agent (have no Input data query)
return S_OK()
##############################################################################
def execute( self ):
''' Main execution method
'''
gMonitor.addMark( 'Iteration', 1 )
# Get all the transformations
result = self.transClient.getTransformations( {'Status' : 'Active',
'Type' : self.transformationTypes } )
if not result['OK']:
gLogger.error( "InputDataAgent.execute: Failed to get transformations.", result['Message'] )
return S_OK()
# Process each transformation
for transDict in result['Value']:
transID = long( transDict['TransformationID'] )
res = self.transClient.getTransformationInputDataQuery( transID )
if not res['OK']:
if res['Message'] == 'No InputDataQuery found for transformation':
gLogger.info( "InputDataAgent.execute: No input data query found for transformation %d" % transID )
else:
gLogger.error( "InputDataAgent.execute: Failed to get input data query for %d" % transID, res['Message'] )
continue
inputDataQuery = res['Value']
if self.refreshonly:
# Determine the correct time stamp to use for this transformation
if transID in self.timeLog:
if transID in self.fullTimeLog:
# If it is more than a day since the last reduced query, make a full query just in case
if ( datetime.datetime.utcnow() - self.fullTimeLog[transID] ) < datetime.timedelta( seconds = self.fullUpdatePeriod ):
timeStamp = self.timeLog[transID]
if self.dateKey:
inputDataQuery[self.dateKey] = ( timeStamp - datetime.timedelta( seconds = 10 ) ).strftime( '%Y-%m-%d %H:%M:%S' )
else:
gLogger.error( "DateKey was not set in the CS, cannot use the RefreshOnly" )
else:
self.fullTimeLog[transID] = datetime.datetime.utcnow()
self.timeLog[transID] = datetime.datetime.utcnow()
if transID not in self.fullTimeLog:
self.fullTimeLog[transID] = datetime.datetime.utcnow()
# Perform the query to the metadata catalog
gLogger.verbose( "Using input data query for transformation %d: %s" % ( transID, str( inputDataQuery ) ) )
start = time.time()
result = self.metadataClient.findFilesByMetadata( inputDataQuery )
rtime = time.time() - start
gLogger.verbose( "Metadata catalog query time: %.2f seconds." % ( rtime ) )
if not result['OK']:
gLogger.error( "InputDataAgent.execute: Failed to get response from the metadata catalog", result['Message'] )
continue
lfnList = result['Value']
# Check if the number of files has changed since the last cycle
nlfns = len( lfnList )
gLogger.info( "%d files returned for transformation %d from the metadata catalog" % ( nlfns, int( transID ) ) )
if nlfns == self.fileLog.get( transID ):
gLogger.verbose( 'No new files in metadata catalog since last check' )
self.fileLog[transID] = nlfns
# Add any new files to the transformation
addedLfns = []
if lfnList:
gLogger.verbose( 'Processing %d lfns for transformation %d' % ( len( lfnList ), transID ) )
# Add the files to the transformation
gLogger.verbose( 'Adding %d lfns for transformation %d' % ( len( lfnList ), transID ) )
result = self.transClient.addFilesToTransformation( transID, sorted( lfnList ) )
if not result['OK']:
gLogger.warn( "InputDataAgent.execute: failed to add lfns to transformation", result['Message'] )
self.fileLog[transID] = 0
else:
if result['Value']['Failed']:
for lfn, error in res['Value']['Failed'].items():
gLogger.warn( "InputDataAgent.execute: Failed to add %s to transformation" % lfn, error )
if result['Value']['Successful']:
for lfn, status in result['Value']['Successful'].items():
if status == 'Added':
addedLfns.append( lfn )
gLogger.info( "InputDataAgent.execute: Added %d files to transformation" % len( addedLfns ) )
return S_OK() | unknown | codeparrot/codeparrot-clean | ||
# Copyright 2015 0xc0170
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import yaml
from .project import FILES_EXTENSIONS
import logging
import bisect
def _determine_tool(linker_ext):
if "sct" in linker_ext or "lin" in linker_ext:
return "uvision"
elif "ld" in linker_ext:
return "make_gcc_arm"
elif "icf" in linker_ext:
return "iar_arm"
def _scan(section, root, directory, extensions):
if section == "sources":
data_dict = {}
else:
data_dict = []
for dirpath, dirnames, files in os.walk(directory):
for filename in files:
ext = filename.split('.')[-1]
relpath = os.path.relpath(dirpath, root)
if ext in extensions:
if section == "sources":
dir = directory.split(os.path.sep)[-1] if dirpath == directory else dirpath.replace(directory,'').split(os.path.sep)[1]
if dir in data_dict and relpath not in data_dict[dir]:
bisect.insort(data_dict[dir], relpath)
else:
data_dict[dir] = [(relpath)]
elif section == 'includes':
dirs = relpath.split(os.path.sep)
for i in range(1, len(dirs)+1):
data_dict.append(os.path.sep.join(dirs[:i]))
else:
data_dict.append(os.path.join(relpath, filename))
if section == "sources":
return data_dict
l = list(set(data_dict))
l.sort()
return l
def _generate_file(filename,root,directory,data):
logging.debug('Generating yaml file')
overwrite = False
if os.path.isfile(os.path.join(directory, filename)):
print("Project file " +filename+ " already exists")
while True:
answer = raw_input('Should I overwrite it? (Y/n)')
try:
overwrite = answer.lower() in ('y', 'yes')
if not overwrite:
logging.critical('Unable to save project file')
return -1
break
except ValueError:
continue
if overwrite:
with open(os.path.join(root, filename), 'r+') as f:
f.write(yaml.dump(data, default_flow_style=False))
else:
with open(os.path.join(root, filename), 'w+') as f:
f.write(yaml.dump(data, default_flow_style=False))
p = os.popen('attrib +h ' + filename)
p.close()
def create_yaml(root, directory, project_name, board):
common_section = {
'linker_file': FILES_EXTENSIONS['linker_file'],
'sources': FILES_EXTENSIONS['source_files_c'] + FILES_EXTENSIONS['source_files_cpp'] +
FILES_EXTENSIONS['source_files_s'] + FILES_EXTENSIONS['source_files_obj'],
'includes': FILES_EXTENSIONS['includes'],
'target': [],
}
projects_yaml = {
'projects': {
project_name: ['project.yaml']
}
}
project_yaml = {
'common': {},
'tool_specific': {}
}
for section in common_section:
if len(common_section[section]) > 0:
project_yaml['common'][section] = _scan(section, root, directory,common_section[section])
project_yaml['common']['target'] = [board]
tool = _determine_tool(str(project_yaml['common']['linker_file']).split('.')[-1])
project_yaml['tool_specific'] = {
tool: {
'linker_file': project_yaml['common']['linker_file']
}
}
_generate_file("projects.yaml", root, directory, projects_yaml)
_generate_file("project.yaml", root, directory, project_yaml)
return 0 | unknown | codeparrot/codeparrot-clean | ||
import wx
import peringatan
import edit_data_kemiskinan
import frm_sideka_menu
def create(parent):
return Dialog1(parent)
[wxID_DIALOG1, wxID_DIALOG1BUTTON1, wxID_DIALOG1BUTTON2,
wxID_DIALOG1STATICLINE1, wxID_DIALOG1STATICTEXT1, wxID_DIALOG1STATICTEXT2,
wxID_DIALOG1TEXTCTRL1,
] = [wx.NewId() for _init_ctrls in range(7)]
class Dialog1(wx.Dialog):
def _init_ctrls(self, prnt):
# generated method, don't edit
wx.Dialog.__init__(self, id=wxID_DIALOG1, name='', parent=prnt,
pos=wx.Point(515, 304), size=wx.Size(402, 140), style=wx.CAPTION,
title=u'Otentifikasi')
self.SetClientSize(wx.Size(402, 140))
self.staticText1 = wx.StaticText(id=wxID_DIALOG1STATICTEXT1,
label=u'Password', name='staticText1', parent=self,
pos=wx.Point(16, 64), size=wx.Size(60, 17), style=0)
self.textCtrl1 = wx.TextCtrl(id=wxID_DIALOG1TEXTCTRL1, name='textCtrl1',
parent=self, pos=wx.Point(96, 56), size=wx.Size(296, 25),
style=wx.TE_PASSWORD, value='')
self.staticText2 = wx.StaticText(id=wxID_DIALOG1STATICTEXT2,
label=u'MASUKAN PASSWORD DAHULU', name='staticText2', parent=self,
pos=wx.Point(104, 16), size=wx.Size(203, 17), style=0)
self.button1 = wx.Button(id=wxID_DIALOG1BUTTON1, label=u'Lanjutkan',
name='button1', parent=self, pos=wx.Point(208, 96),
size=wx.Size(184, 30), style=0)
self.button1.Bind(wx.EVT_BUTTON, self.OnButton1Button,
id=wxID_DIALOG1BUTTON1)
self.button2 = wx.Button(id=wxID_DIALOG1BUTTON2,
label=u'Kembali Ke Menu', name='button2', parent=self,
pos=wx.Point(16, 96), size=wx.Size(184, 30), style=0)
self.button2.Bind(wx.EVT_BUTTON, self.OnButton2Button,
id=wxID_DIALOG1BUTTON2)
self.staticLine1 = wx.StaticLine(id=wxID_DIALOG1STATICLINE1,
name='staticLine1', parent=self, pos=wx.Point(16, 40),
size=wx.Size(368, 2), style=0)
def __init__(self, parent):
self._init_ctrls(parent)
def OnButton1Button(self, event):
oneng = 'andri'
user_password = self.textCtrl1.GetValue()
if user_password == oneng:
self.main=edit_data_kemiskinan.create(None)
self.main.Show()
self.Close()
else:
self.Close()
self.main=peringatan.create(None)
self.main.Show()
def OnButton2Button(self, event):
self.Close()
self.main=frm_sideka_menu.create(None)
self.main.Show() | unknown | codeparrot/codeparrot-clean | ||
/*
Copyright 2020 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package validation
import (
"crypto"
"crypto/ecdsa"
"crypto/ed25519"
"crypto/elliptic"
"crypto/rand"
"crypto/rsa"
"crypto/x509"
"crypto/x509/pkix"
"encoding/pem"
"fmt"
"math/big"
mathrand "math/rand"
"reflect"
"regexp"
"strings"
"testing"
"time"
"github.com/google/go-cmp/cmp"
apimachineryvalidation "k8s.io/apimachinery/pkg/api/validation"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/apimachinery/pkg/util/validation/field"
"k8s.io/client-go/util/certificate/csr"
capi "k8s.io/kubernetes/pkg/apis/certificates"
"k8s.io/kubernetes/pkg/apis/core"
testclock "k8s.io/utils/clock/testing"
"k8s.io/utils/ptr"
)
var (
validObjectMeta = metav1.ObjectMeta{Name: "testcsr"}
validSignerName = "example.com/valid-name"
validUsages = []capi.KeyUsage{capi.UsageKeyEncipherment}
)
func TestValidateCertificateSigningRequestCreate(t *testing.T) {
specPath := field.NewPath("spec")
// maxLengthSignerName is a signerName that is of maximum length, utilising
// the max length specifications defined in validation.go.
// It is of the form <fqdn(253)>/<resource-namespace(63)>.<resource-name(253)>
maxLengthFQDN := fmt.Sprintf("%s.%s.%s.%s", repeatString("a", 63), repeatString("a", 63), repeatString("a", 63), repeatString("a", 61))
maxLengthSignerName := fmt.Sprintf("%s/%s.%s", maxLengthFQDN, repeatString("a", 63), repeatString("a", 253))
tests := map[string]struct {
csr capi.CertificateSigningRequest
errs field.ErrorList
}{
"CSR with empty request data should fail": {
csr: capi.CertificateSigningRequest{
ObjectMeta: validObjectMeta,
Spec: capi.CertificateSigningRequestSpec{
Usages: validUsages,
SignerName: validSignerName,
},
},
errs: field.ErrorList{
field.Invalid(specPath.Child("request"), []byte(nil), "PEM block type must be CERTIFICATE REQUEST"),
},
},
"CSR with invalid request data should fail": {
csr: capi.CertificateSigningRequest{
ObjectMeta: validObjectMeta,
Spec: capi.CertificateSigningRequestSpec{
Usages: validUsages,
SignerName: validSignerName,
Request: []byte("invalid data"),
},
},
errs: field.ErrorList{
field.Invalid(specPath.Child("request"), []byte("invalid data"), "PEM block type must be CERTIFICATE REQUEST"),
},
},
"CSR with no usages should fail": {
csr: capi.CertificateSigningRequest{
ObjectMeta: validObjectMeta,
Spec: capi.CertificateSigningRequestSpec{
SignerName: validSignerName,
Request: newCSRPEM(t),
},
},
errs: field.ErrorList{
field.Required(specPath.Child("usages"), ""),
},
},
"CSR with no signerName set should fail": {
csr: capi.CertificateSigningRequest{
ObjectMeta: validObjectMeta,
Spec: capi.CertificateSigningRequestSpec{
Usages: validUsages,
Request: newCSRPEM(t),
},
},
errs: field.ErrorList{
field.Required(specPath.Child("signerName"), ""),
},
},
"signerName contains no '/'": {
csr: capi.CertificateSigningRequest{
ObjectMeta: validObjectMeta,
Spec: capi.CertificateSigningRequestSpec{
Usages: validUsages,
Request: newCSRPEM(t),
SignerName: "an-invalid-signer-name",
},
},
errs: field.ErrorList{
field.Invalid(specPath.Child("signerName"), "an-invalid-signer-name", "must be a fully qualified domain and path of the form 'example.com/signer-name'"),
},
},
"signerName contains two '/'": {
csr: capi.CertificateSigningRequest{
ObjectMeta: validObjectMeta,
Spec: capi.CertificateSigningRequestSpec{
Usages: validUsages,
Request: newCSRPEM(t),
SignerName: "an-invalid-signer-name.com/something/else",
},
},
errs: field.ErrorList{
field.Invalid(specPath.Child("signerName"), "an-invalid-signer-name.com/something/else", "must be a fully qualified domain and path of the form 'example.com/signer-name'"),
},
},
"signerName domain component is not fully qualified": {
csr: capi.CertificateSigningRequest{
ObjectMeta: validObjectMeta,
Spec: capi.CertificateSigningRequestSpec{
Usages: validUsages,
Request: newCSRPEM(t),
SignerName: "example/some-signer-name",
},
},
errs: field.ErrorList{
field.Invalid(specPath.Child("signerName"), "example", "should be a domain with at least two segments separated by dots"),
},
},
"signerName path component is empty": {
csr: capi.CertificateSigningRequest{
ObjectMeta: validObjectMeta,
Spec: capi.CertificateSigningRequestSpec{
Usages: validUsages,
Request: newCSRPEM(t),
SignerName: "example.com/",
},
},
errs: field.ErrorList{
field.Invalid(specPath.Child("signerName"), "", `validating label "": a lowercase RFC 1123 subdomain must consist of lower case alphanumeric characters, '-' or '.', and must start and end with an alphanumeric character (e.g. 'example.com', regex used for validation is '[a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*')`),
},
},
"signerName path component ends with a symbol": {
csr: capi.CertificateSigningRequest{
ObjectMeta: validObjectMeta,
Spec: capi.CertificateSigningRequestSpec{
Usages: validUsages,
Request: newCSRPEM(t),
SignerName: "example.com/something-",
},
},
errs: field.ErrorList{
field.Invalid(specPath.Child("signerName"), "something-", `validating label "something-": a lowercase RFC 1123 subdomain must consist of lower case alphanumeric characters, '-' or '.', and must start and end with an alphanumeric character (e.g. 'example.com', regex used for validation is '[a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*')`),
},
},
"signerName path component is a symbol": {
csr: capi.CertificateSigningRequest{
ObjectMeta: validObjectMeta,
Spec: capi.CertificateSigningRequestSpec{
Usages: validUsages,
Request: newCSRPEM(t),
SignerName: "example.com/-",
},
},
errs: field.ErrorList{
field.Invalid(specPath.Child("signerName"), "-", `validating label "-": a lowercase RFC 1123 subdomain must consist of lower case alphanumeric characters, '-' or '.', and must start and end with an alphanumeric character (e.g. 'example.com', regex used for validation is '[a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*')`),
},
},
"signerName path component contains no '.' but is valid": {
csr: capi.CertificateSigningRequest{
ObjectMeta: validObjectMeta,
Spec: capi.CertificateSigningRequestSpec{
Usages: validUsages,
Request: newCSRPEM(t),
SignerName: "example.com/some-signer-name",
},
},
},
"signerName with a total length greater than 571 characters should be rejected": {
csr: capi.CertificateSigningRequest{
ObjectMeta: validObjectMeta,
Spec: capi.CertificateSigningRequestSpec{
Usages: validUsages,
Request: newCSRPEM(t),
// this string is longer than the max signerName limit (635 chars)
SignerName: maxLengthSignerName + ".toolong",
},
},
errs: field.ErrorList{
field.TooLong(specPath.Child("signerName"), "" /*unused*/, len(maxLengthSignerName)),
},
},
"signerName with a fqdn greater than 253 characters should be rejected": {
csr: capi.CertificateSigningRequest{
ObjectMeta: validObjectMeta,
Spec: capi.CertificateSigningRequestSpec{
Usages: validUsages,
Request: newCSRPEM(t),
// this string is longer than the max signerName limit (635 chars)
SignerName: fmt.Sprintf("%s.extra/valid-path", maxLengthFQDN),
},
},
errs: field.ErrorList{
field.TooLong(specPath.Child("signerName"), "" /*unused*/, len(maxLengthFQDN)),
},
},
"signerName can have a longer path if the domain component is less than the max length": {
csr: capi.CertificateSigningRequest{
ObjectMeta: validObjectMeta,
Spec: capi.CertificateSigningRequestSpec{
Usages: validUsages,
Request: newCSRPEM(t),
SignerName: fmt.Sprintf("abc.io/%s.%s", repeatString("a", 253), repeatString("a", 253)),
},
},
},
"signerName with a domain label greater than 63 characters will fail": {
csr: capi.CertificateSigningRequest{
ObjectMeta: validObjectMeta,
Spec: capi.CertificateSigningRequestSpec{
Usages: validUsages,
Request: newCSRPEM(t),
SignerName: fmt.Sprintf("%s.example.io/valid-path", repeatString("a", 66)),
},
},
errs: field.ErrorList{
field.Invalid(specPath.Child("signerName"), fmt.Sprintf("%s.example.io", repeatString("a", 66)), fmt.Sprintf(`validating label "%s": must be no more than 63 characters`, repeatString("a", 66))),
},
},
"signerName of max length in format <fully-qualified-domain-name>/<resource-namespace>.<resource-name> is valid": {
// ensure signerName is of the form domain.com/something and up to 571 characters.
// This length and format is specified to accommodate signerNames like:
// <fqdn>/<resource-namespace>.<resource-name>.
// The max length of a FQDN is 253 characters (DNS1123Subdomain max length)
// The max length of a namespace name is 63 characters (DNS1123Label max length)
// The max length of a resource name is 253 characters (DNS1123Subdomain max length)
// We then add an additional 2 characters to account for the one '.' and one '/'.
csr: capi.CertificateSigningRequest{
ObjectMeta: validObjectMeta,
Spec: capi.CertificateSigningRequestSpec{
Usages: validUsages,
Request: newCSRPEM(t),
SignerName: maxLengthSignerName,
},
},
},
"negative duration": {
csr: capi.CertificateSigningRequest{
ObjectMeta: validObjectMeta,
Spec: capi.CertificateSigningRequestSpec{
Usages: validUsages,
Request: newCSRPEM(t),
SignerName: validSignerName,
ExpirationSeconds: ptr.To[int32](-1),
},
},
errs: field.ErrorList{
field.Invalid(specPath.Child("expirationSeconds"), int32(-1), "may not specify a duration less than 600 seconds (10 minutes)"),
},
},
"zero duration": {
csr: capi.CertificateSigningRequest{
ObjectMeta: validObjectMeta,
Spec: capi.CertificateSigningRequestSpec{
Usages: validUsages,
Request: newCSRPEM(t),
SignerName: validSignerName,
ExpirationSeconds: ptr.To[int32](0),
},
},
errs: field.ErrorList{
field.Invalid(specPath.Child("expirationSeconds"), int32(0), "may not specify a duration less than 600 seconds (10 minutes)"),
},
},
"one duration": {
csr: capi.CertificateSigningRequest{
ObjectMeta: validObjectMeta,
Spec: capi.CertificateSigningRequestSpec{
Usages: validUsages,
Request: newCSRPEM(t),
SignerName: validSignerName,
ExpirationSeconds: ptr.To[int32](1),
},
},
errs: field.ErrorList{
field.Invalid(specPath.Child("expirationSeconds"), int32(1), "may not specify a duration less than 600 seconds (10 minutes)"),
},
},
"too short duration": {
csr: capi.CertificateSigningRequest{
ObjectMeta: validObjectMeta,
Spec: capi.CertificateSigningRequestSpec{
Usages: validUsages,
Request: newCSRPEM(t),
SignerName: validSignerName,
ExpirationSeconds: csr.DurationToExpirationSeconds(time.Minute),
},
},
errs: field.ErrorList{
field.Invalid(specPath.Child("expirationSeconds"), *csr.DurationToExpirationSeconds(time.Minute), "may not specify a duration less than 600 seconds (10 minutes)"),
},
},
"valid duration": {
csr: capi.CertificateSigningRequest{
ObjectMeta: validObjectMeta,
Spec: capi.CertificateSigningRequestSpec{
Usages: validUsages,
Request: newCSRPEM(t),
SignerName: validSignerName,
ExpirationSeconds: csr.DurationToExpirationSeconds(10 * time.Minute),
},
},
},
"missing usages": {
csr: capi.CertificateSigningRequest{
ObjectMeta: validObjectMeta,
Spec: capi.CertificateSigningRequestSpec{
Usages: []capi.KeyUsage{},
Request: newCSRPEM(t),
SignerName: validSignerName,
},
},
errs: field.ErrorList{
field.Required(specPath.Child("usages"), ""),
},
},
"unknown and duplicate usages": {
csr: capi.CertificateSigningRequest{
ObjectMeta: validObjectMeta,
Spec: capi.CertificateSigningRequestSpec{
Usages: []capi.KeyUsage{"unknown", "unknown"},
Request: newCSRPEM(t),
SignerName: validSignerName,
},
},
errs: field.ErrorList{
field.NotSupported(specPath.Child("usages").Index(0), capi.KeyUsage("unknown"), allValidUsages.List()),
field.NotSupported(specPath.Child("usages").Index(1), capi.KeyUsage("unknown"), allValidUsages.List()),
field.Duplicate(specPath.Child("usages").Index(1), capi.KeyUsage("unknown")),
},
},
"approved condition only": {
csr: capi.CertificateSigningRequest{
ObjectMeta: validObjectMeta,
Spec: capi.CertificateSigningRequestSpec{
Usages: validUsages,
Request: newCSRPEM(t),
SignerName: validSignerName,
},
Status: capi.CertificateSigningRequestStatus{
Conditions: []capi.CertificateSigningRequestCondition{
{Type: capi.CertificateApproved, Status: core.ConditionTrue},
},
},
},
},
"denied condition only": {
csr: capi.CertificateSigningRequest{
ObjectMeta: validObjectMeta,
Spec: capi.CertificateSigningRequestSpec{
Usages: validUsages,
Request: newCSRPEM(t),
SignerName: validSignerName,
},
Status: capi.CertificateSigningRequestStatus{
Conditions: []capi.CertificateSigningRequestCondition{
{Type: capi.CertificateDenied, Status: core.ConditionTrue},
},
},
},
},
"both approved and denied conditions": {
csr: capi.CertificateSigningRequest{
ObjectMeta: validObjectMeta,
Spec: capi.CertificateSigningRequestSpec{
Usages: validUsages,
Request: newCSRPEM(t),
SignerName: validSignerName,
},
Status: capi.CertificateSigningRequestStatus{
Conditions: []capi.CertificateSigningRequestCondition{
{Type: capi.CertificateApproved, Status: core.ConditionTrue},
{Type: capi.CertificateDenied, Status: core.ConditionTrue},
},
},
},
errs: field.ErrorList{
field.Invalid(field.NewPath("status", "conditions"), capi.CertificateDenied, "Approved and Denied conditions are mutually exclusive").WithOrigin("zeroOrOneOf").MarkCoveredByDeclarative(),
},
},
"approved and failed conditions allowed": {
csr: capi.CertificateSigningRequest{
ObjectMeta: validObjectMeta,
Spec: capi.CertificateSigningRequestSpec{
Usages: validUsages,
Request: newCSRPEM(t),
SignerName: validSignerName,
},
Status: capi.CertificateSigningRequestStatus{
Conditions: []capi.CertificateSigningRequestCondition{
{Type: capi.CertificateApproved, Status: core.ConditionTrue},
{Type: capi.CertificateFailed, Status: core.ConditionTrue},
},
},
},
},
}
for name, test := range tests {
t.Run(name, func(t *testing.T) {
el := ValidateCertificateSigningRequestCreate(&test.csr)
if !reflect.DeepEqual(el, test.errs) {
t.Errorf("returned and expected errors did not match - expected\n%v\nbut got\n%v", test.errs.ToAggregate(), el.ToAggregate())
}
})
}
}
func repeatString(s string, num int) string {
l := make([]string, num)
for i := 0; i < num; i++ {
l[i] = s
}
return strings.Join(l, "")
}
func newCSRPEM(t *testing.T) []byte {
template := &x509.CertificateRequest{
Subject: pkix.Name{
Organization: []string{"testing-org"},
},
}
_, key, err := ed25519.GenerateKey(rand.Reader)
if err != nil {
t.Fatal(err)
}
csrDER, err := x509.CreateCertificateRequest(rand.Reader, template, key)
if err != nil {
t.Fatal(err)
}
csrPemBlock := &pem.Block{
Type: "CERTIFICATE REQUEST",
Bytes: csrDER,
}
p := pem.EncodeToMemory(csrPemBlock)
if p == nil {
t.Fatal("invalid pem block")
}
return p
}
func Test_getValidationOptions(t *testing.T) {
tests := []struct {
name string
newCSR *capi.CertificateSigningRequest
oldCSR *capi.CertificateSigningRequest
want certificateValidationOptions
}{{
name: "strict create",
oldCSR: nil,
want: certificateValidationOptions{},
}, {
name: "strict update",
oldCSR: &capi.CertificateSigningRequest{},
want: certificateValidationOptions{},
}, {
name: "compatible update, approved+denied",
oldCSR: &capi.CertificateSigningRequest{Status: capi.CertificateSigningRequestStatus{
Conditions: []capi.CertificateSigningRequestCondition{{Type: capi.CertificateApproved}, {Type: capi.CertificateDenied}},
}},
want: certificateValidationOptions{
allowBothApprovedAndDenied: true,
},
}, {
name: "compatible update, legacy signerName",
oldCSR: &capi.CertificateSigningRequest{Spec: capi.CertificateSigningRequestSpec{SignerName: capi.LegacyUnknownSignerName}},
want: certificateValidationOptions{
allowLegacySignerName: true,
},
}, {
name: "compatible update, duplicate condition types",
oldCSR: &capi.CertificateSigningRequest{Status: capi.CertificateSigningRequestStatus{
Conditions: []capi.CertificateSigningRequestCondition{{Type: capi.CertificateApproved}, {Type: capi.CertificateApproved}},
}},
want: certificateValidationOptions{
allowDuplicateConditionTypes: true,
},
}, {
name: "compatible update, empty condition types",
oldCSR: &capi.CertificateSigningRequest{Status: capi.CertificateSigningRequestStatus{
Conditions: []capi.CertificateSigningRequestCondition{{}},
}},
want: certificateValidationOptions{
allowEmptyConditionType: true,
},
}, {
name: "compatible update, no diff to certificate",
newCSR: &capi.CertificateSigningRequest{Status: capi.CertificateSigningRequestStatus{
Certificate: validCertificate,
}},
oldCSR: &capi.CertificateSigningRequest{Status: capi.CertificateSigningRequestStatus{
Certificate: validCertificate,
}},
want: certificateValidationOptions{
allowArbitraryCertificate: true,
},
}, {
name: "compatible update, existing invalid certificate",
newCSR: &capi.CertificateSigningRequest{Status: capi.CertificateSigningRequestStatus{
Certificate: []byte(`new - no PEM blocks`),
}},
oldCSR: &capi.CertificateSigningRequest{Status: capi.CertificateSigningRequestStatus{
Certificate: []byte(`old - no PEM blocks`),
}},
want: certificateValidationOptions{
allowArbitraryCertificate: true,
},
}, {
name: "compatible update, existing unknown usages",
oldCSR: &capi.CertificateSigningRequest{Spec: capi.CertificateSigningRequestSpec{Usages: []capi.KeyUsage{"unknown"}}},
want: certificateValidationOptions{
allowUnknownUsages: true,
},
}, {
name: "compatible update, existing duplicate usages",
oldCSR: &capi.CertificateSigningRequest{Spec: capi.CertificateSigningRequestSpec{Usages: []capi.KeyUsage{"any", "any"}}},
want: certificateValidationOptions{
allowDuplicateUsages: true,
},
}}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
if got := getValidationOptions(tt.newCSR, tt.oldCSR); !reflect.DeepEqual(got, tt.want) {
t.Errorf("got %#v\nwant %#v", got, tt.want)
}
})
}
}
func TestValidateCertificateSigningRequestUpdate(t *testing.T) {
validUpdateMeta := validObjectMeta
validUpdateMeta.ResourceVersion = "1"
validUpdateMetaWithFinalizers := validUpdateMeta
validUpdateMetaWithFinalizers.Finalizers = []string{"foo"}
validSpec := capi.CertificateSigningRequestSpec{
Usages: validUsages,
Request: newCSRPEM(t),
SignerName: "example.com/something",
}
tests := []struct {
name string
newCSR *capi.CertificateSigningRequest
oldCSR *capi.CertificateSigningRequest
errs []string
}{{
name: "no-op",
newCSR: &capi.CertificateSigningRequest{ObjectMeta: validUpdateMeta, Spec: validSpec},
oldCSR: &capi.CertificateSigningRequest{ObjectMeta: validUpdateMeta, Spec: validSpec},
}, {
name: "finalizer change with invalid status",
newCSR: &capi.CertificateSigningRequest{ObjectMeta: validUpdateMeta, Spec: validSpec, Status: capi.CertificateSigningRequestStatus{Certificate: invalidCertificateNoPEM}},
oldCSR: &capi.CertificateSigningRequest{ObjectMeta: validUpdateMetaWithFinalizers, Spec: validSpec, Status: capi.CertificateSigningRequestStatus{Certificate: invalidCertificateNoPEM}},
}, {
name: "add Approved condition",
newCSR: &capi.CertificateSigningRequest{ObjectMeta: validUpdateMeta, Spec: validSpec, Status: capi.CertificateSigningRequestStatus{
Conditions: []capi.CertificateSigningRequestCondition{{Type: capi.CertificateApproved, Status: core.ConditionTrue}},
}},
oldCSR: &capi.CertificateSigningRequest{ObjectMeta: validUpdateMetaWithFinalizers, Spec: validSpec},
errs: []string{
`status.conditions: Forbidden: updates may not add a condition of type "Approved"`,
},
}, {
name: "remove Approved condition",
newCSR: &capi.CertificateSigningRequest{ObjectMeta: validUpdateMeta, Spec: validSpec},
oldCSR: &capi.CertificateSigningRequest{ObjectMeta: validUpdateMetaWithFinalizers, Spec: validSpec, Status: capi.CertificateSigningRequestStatus{
Conditions: []capi.CertificateSigningRequestCondition{{Type: capi.CertificateApproved, Status: core.ConditionTrue}},
}},
errs: []string{
`status.conditions: Forbidden: updates may not remove a condition of type "Approved"`,
},
}, {
name: "add Denied condition",
newCSR: &capi.CertificateSigningRequest{ObjectMeta: validUpdateMeta, Spec: validSpec, Status: capi.CertificateSigningRequestStatus{
Conditions: []capi.CertificateSigningRequestCondition{{Type: capi.CertificateDenied, Status: core.ConditionTrue}},
}},
oldCSR: &capi.CertificateSigningRequest{ObjectMeta: validUpdateMetaWithFinalizers, Spec: validSpec},
errs: []string{
`status.conditions: Forbidden: updates may not add a condition of type "Denied"`,
},
}, {
name: "remove Denied condition",
newCSR: &capi.CertificateSigningRequest{ObjectMeta: validUpdateMeta, Spec: validSpec},
oldCSR: &capi.CertificateSigningRequest{ObjectMeta: validUpdateMetaWithFinalizers, Spec: validSpec, Status: capi.CertificateSigningRequestStatus{
Conditions: []capi.CertificateSigningRequestCondition{{Type: capi.CertificateDenied, Status: core.ConditionTrue}},
}},
errs: []string{
`status.conditions: Forbidden: updates may not remove a condition of type "Denied"`,
},
}, {
name: "add Failed condition",
newCSR: &capi.CertificateSigningRequest{ObjectMeta: validUpdateMeta, Spec: validSpec, Status: capi.CertificateSigningRequestStatus{
Conditions: []capi.CertificateSigningRequestCondition{{Type: capi.CertificateFailed, Status: core.ConditionTrue}},
}},
oldCSR: &capi.CertificateSigningRequest{ObjectMeta: validUpdateMetaWithFinalizers, Spec: validSpec},
errs: []string{},
}, {
name: "remove Failed condition",
newCSR: &capi.CertificateSigningRequest{ObjectMeta: validUpdateMeta, Spec: validSpec},
oldCSR: &capi.CertificateSigningRequest{ObjectMeta: validUpdateMetaWithFinalizers, Spec: validSpec, Status: capi.CertificateSigningRequestStatus{
Conditions: []capi.CertificateSigningRequestCondition{{Type: capi.CertificateFailed, Status: core.ConditionTrue}},
}},
errs: []string{
`status.conditions: Forbidden: updates may not remove a condition of type "Failed"`,
},
}, {
name: "set certificate",
newCSR: &capi.CertificateSigningRequest{ObjectMeta: validUpdateMeta, Spec: validSpec, Status: capi.CertificateSigningRequestStatus{
Certificate: validCertificate,
}},
oldCSR: &capi.CertificateSigningRequest{ObjectMeta: validUpdateMetaWithFinalizers, Spec: validSpec},
errs: []string{
`status.certificate: Forbidden: updates may not set certificate content`,
},
}, {
name: "add both approved and denied conditions",
newCSR: &capi.CertificateSigningRequest{
ObjectMeta: validUpdateMeta,
Spec: validSpec,
Status: capi.CertificateSigningRequestStatus{
Conditions: []capi.CertificateSigningRequestCondition{
{Type: capi.CertificateApproved, Status: core.ConditionTrue},
{Type: capi.CertificateDenied, Status: core.ConditionTrue},
},
},
},
oldCSR: &capi.CertificateSigningRequest{
ObjectMeta: validUpdateMetaWithFinalizers,
Spec: validSpec,
},
errs: []string{
`status.conditions: Forbidden: updates may not add a condition of type "Approved"`,
`status.conditions: Forbidden: updates may not add a condition of type "Denied"`,
`status.conditions: Invalid value: "Denied": Approved and Denied conditions are mutually exclusive`,
},
}}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
gotErrs := sets.NewString()
for _, err := range ValidateCertificateSigningRequestUpdate(tt.newCSR, tt.oldCSR) {
gotErrs.Insert(err.Error())
}
wantErrs := sets.NewString(tt.errs...)
for _, missing := range wantErrs.Difference(gotErrs).List() {
t.Errorf("missing expected error: %s", missing)
}
for _, unexpected := range gotErrs.Difference(wantErrs).List() {
t.Errorf("unexpected error: %s", unexpected)
}
})
}
}
func TestValidateCertificateSigningRequestStatusUpdate(t *testing.T) {
validUpdateMeta := validObjectMeta
validUpdateMeta.ResourceVersion = "1"
validUpdateMetaWithFinalizers := validUpdateMeta
validUpdateMetaWithFinalizers.Finalizers = []string{"foo"}
validSpec := capi.CertificateSigningRequestSpec{
Usages: validUsages,
Request: newCSRPEM(t),
SignerName: "example.com/something",
}
tests := []struct {
name string
newCSR *capi.CertificateSigningRequest
oldCSR *capi.CertificateSigningRequest
errs []string
}{{
name: "no-op",
newCSR: &capi.CertificateSigningRequest{ObjectMeta: validUpdateMeta, Spec: validSpec},
oldCSR: &capi.CertificateSigningRequest{ObjectMeta: validUpdateMeta, Spec: validSpec},
}, {
name: "finalizer change with invalid status",
newCSR: &capi.CertificateSigningRequest{ObjectMeta: validUpdateMeta, Spec: validSpec, Status: capi.CertificateSigningRequestStatus{Certificate: invalidCertificateNoPEM}},
oldCSR: &capi.CertificateSigningRequest{ObjectMeta: validUpdateMetaWithFinalizers, Spec: validSpec, Status: capi.CertificateSigningRequestStatus{Certificate: invalidCertificateNoPEM}},
}, {
name: "finalizer change with duplicate and unknown usages",
newCSR: &capi.CertificateSigningRequest{ObjectMeta: validUpdateMeta, Spec: capi.CertificateSigningRequestSpec{
Usages: []capi.KeyUsage{"unknown", "unknown"},
Request: newCSRPEM(t),
SignerName: validSignerName,
}},
oldCSR: &capi.CertificateSigningRequest{ObjectMeta: validUpdateMetaWithFinalizers, Spec: capi.CertificateSigningRequestSpec{
Usages: []capi.KeyUsage{"unknown", "unknown"},
Request: newCSRPEM(t),
SignerName: validSignerName,
}},
}, {
name: "add Approved condition",
newCSR: &capi.CertificateSigningRequest{ObjectMeta: validUpdateMeta, Spec: validSpec, Status: capi.CertificateSigningRequestStatus{
Conditions: []capi.CertificateSigningRequestCondition{{Type: capi.CertificateApproved, Status: core.ConditionTrue}},
}},
oldCSR: &capi.CertificateSigningRequest{ObjectMeta: validUpdateMetaWithFinalizers, Spec: validSpec},
errs: []string{
`status.conditions: Forbidden: updates may not add a condition of type "Approved"`,
},
}, {
name: "remove Approved condition",
newCSR: &capi.CertificateSigningRequest{ObjectMeta: validUpdateMeta, Spec: validSpec},
oldCSR: &capi.CertificateSigningRequest{ObjectMeta: validUpdateMetaWithFinalizers, Spec: validSpec, Status: capi.CertificateSigningRequestStatus{
Conditions: []capi.CertificateSigningRequestCondition{{Type: capi.CertificateApproved, Status: core.ConditionTrue}},
}},
errs: []string{
`status.conditions: Forbidden: updates may not remove a condition of type "Approved"`,
},
}, {
name: "add Denied condition",
newCSR: &capi.CertificateSigningRequest{ObjectMeta: validUpdateMeta, Spec: validSpec, Status: capi.CertificateSigningRequestStatus{
Conditions: []capi.CertificateSigningRequestCondition{{Type: capi.CertificateDenied, Status: core.ConditionTrue}},
}},
oldCSR: &capi.CertificateSigningRequest{ObjectMeta: validUpdateMetaWithFinalizers, Spec: validSpec},
errs: []string{
`status.conditions: Forbidden: updates may not add a condition of type "Denied"`,
},
}, {
name: "remove Denied condition",
newCSR: &capi.CertificateSigningRequest{ObjectMeta: validUpdateMeta, Spec: validSpec},
oldCSR: &capi.CertificateSigningRequest{ObjectMeta: validUpdateMetaWithFinalizers, Spec: validSpec, Status: capi.CertificateSigningRequestStatus{
Conditions: []capi.CertificateSigningRequestCondition{{Type: capi.CertificateDenied, Status: core.ConditionTrue}},
}},
errs: []string{
`status.conditions: Forbidden: updates may not remove a condition of type "Denied"`,
},
}, {
name: "add Failed condition",
newCSR: &capi.CertificateSigningRequest{ObjectMeta: validUpdateMeta, Spec: validSpec, Status: capi.CertificateSigningRequestStatus{
Conditions: []capi.CertificateSigningRequestCondition{{Type: capi.CertificateFailed, Status: core.ConditionTrue}},
}},
oldCSR: &capi.CertificateSigningRequest{ObjectMeta: validUpdateMetaWithFinalizers, Spec: validSpec},
errs: []string{},
}, {
name: "remove Failed condition",
newCSR: &capi.CertificateSigningRequest{ObjectMeta: validUpdateMeta, Spec: validSpec},
oldCSR: &capi.CertificateSigningRequest{ObjectMeta: validUpdateMetaWithFinalizers, Spec: validSpec, Status: capi.CertificateSigningRequestStatus{
Conditions: []capi.CertificateSigningRequestCondition{{Type: capi.CertificateFailed, Status: core.ConditionTrue}},
}},
errs: []string{
`status.conditions: Forbidden: updates may not remove a condition of type "Failed"`,
},
}, {
name: "set valid certificate",
newCSR: &capi.CertificateSigningRequest{ObjectMeta: validUpdateMeta, Spec: validSpec, Status: capi.CertificateSigningRequestStatus{
Certificate: validCertificate,
}},
oldCSR: &capi.CertificateSigningRequest{ObjectMeta: validUpdateMetaWithFinalizers, Spec: validSpec},
errs: []string{},
}, {
name: "set invalid certificate",
newCSR: &capi.CertificateSigningRequest{ObjectMeta: validUpdateMeta, Spec: validSpec, Status: capi.CertificateSigningRequestStatus{
Certificate: invalidCertificateNoPEM,
}},
oldCSR: &capi.CertificateSigningRequest{ObjectMeta: validUpdateMetaWithFinalizers, Spec: validSpec},
errs: []string{
`status.certificate: Invalid value: "<certificate data>": must contain at least one CERTIFICATE PEM block`,
},
}, {
name: "reset certificate",
newCSR: &capi.CertificateSigningRequest{ObjectMeta: validUpdateMeta, Spec: validSpec, Status: capi.CertificateSigningRequestStatus{
Certificate: invalidCertificateNonCertificatePEM,
}},
oldCSR: &capi.CertificateSigningRequest{ObjectMeta: validUpdateMetaWithFinalizers, Spec: validSpec, Status: capi.CertificateSigningRequestStatus{
Certificate: invalidCertificateNoPEM,
}},
errs: []string{
`status.certificate: Forbidden: updates may not modify existing certificate content`,
},
}}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
gotErrs := sets.NewString()
for _, err := range ValidateCertificateSigningRequestStatusUpdate(tt.newCSR, tt.oldCSR) {
gotErrs.Insert(err.Error())
}
wantErrs := sets.NewString(tt.errs...)
for _, missing := range wantErrs.Difference(gotErrs).List() {
t.Errorf("missing expected error: %s", missing)
}
for _, unexpected := range gotErrs.Difference(wantErrs).List() {
t.Errorf("unexpected error: %s", unexpected)
}
})
}
}
func TestValidateCertificateSigningRequestApprovalUpdate(t *testing.T) {
validUpdateMeta := validObjectMeta
validUpdateMeta.ResourceVersion = "1"
validUpdateMetaWithFinalizers := validUpdateMeta
validUpdateMetaWithFinalizers.Finalizers = []string{"foo"}
validSpec := capi.CertificateSigningRequestSpec{
Usages: validUsages,
Request: newCSRPEM(t),
SignerName: "example.com/something",
}
tests := []struct {
name string
newCSR *capi.CertificateSigningRequest
oldCSR *capi.CertificateSigningRequest
errs []string
}{{
name: "no-op",
newCSR: &capi.CertificateSigningRequest{ObjectMeta: validUpdateMeta, Spec: validSpec},
oldCSR: &capi.CertificateSigningRequest{ObjectMeta: validUpdateMeta, Spec: validSpec},
}, {
name: "finalizer change with invalid certificate",
newCSR: &capi.CertificateSigningRequest{ObjectMeta: validUpdateMeta, Spec: validSpec, Status: capi.CertificateSigningRequestStatus{Certificate: invalidCertificateNoPEM}},
oldCSR: &capi.CertificateSigningRequest{ObjectMeta: validUpdateMetaWithFinalizers, Spec: validSpec, Status: capi.CertificateSigningRequestStatus{Certificate: invalidCertificateNoPEM}},
}, {
name: "add Approved condition",
newCSR: &capi.CertificateSigningRequest{ObjectMeta: validUpdateMeta, Spec: validSpec, Status: capi.CertificateSigningRequestStatus{
Conditions: []capi.CertificateSigningRequestCondition{{Type: capi.CertificateApproved, Status: core.ConditionTrue}},
}},
oldCSR: &capi.CertificateSigningRequest{ObjectMeta: validUpdateMetaWithFinalizers, Spec: validSpec},
}, {
name: "remove Approved condition",
newCSR: &capi.CertificateSigningRequest{ObjectMeta: validUpdateMeta, Spec: validSpec},
oldCSR: &capi.CertificateSigningRequest{ObjectMeta: validUpdateMetaWithFinalizers, Spec: validSpec, Status: capi.CertificateSigningRequestStatus{
Conditions: []capi.CertificateSigningRequestCondition{{Type: capi.CertificateApproved, Status: core.ConditionTrue}},
}},
errs: []string{
`status.conditions: Forbidden: updates may not remove a condition of type "Approved"`,
},
}, {
name: "add Denied condition",
newCSR: &capi.CertificateSigningRequest{ObjectMeta: validUpdateMeta, Spec: validSpec, Status: capi.CertificateSigningRequestStatus{
Conditions: []capi.CertificateSigningRequestCondition{{Type: capi.CertificateDenied, Status: core.ConditionTrue}},
}},
oldCSR: &capi.CertificateSigningRequest{ObjectMeta: validUpdateMetaWithFinalizers, Spec: validSpec},
}, {
name: "remove Denied condition",
newCSR: &capi.CertificateSigningRequest{ObjectMeta: validUpdateMeta, Spec: validSpec},
oldCSR: &capi.CertificateSigningRequest{ObjectMeta: validUpdateMetaWithFinalizers, Spec: validSpec, Status: capi.CertificateSigningRequestStatus{
Conditions: []capi.CertificateSigningRequestCondition{{Type: capi.CertificateDenied, Status: core.ConditionTrue}},
}},
errs: []string{
`status.conditions: Forbidden: updates may not remove a condition of type "Denied"`,
},
}, {
name: "add Failed condition",
newCSR: &capi.CertificateSigningRequest{ObjectMeta: validUpdateMeta, Spec: validSpec, Status: capi.CertificateSigningRequestStatus{
Conditions: []capi.CertificateSigningRequestCondition{{Type: capi.CertificateFailed, Status: core.ConditionTrue}},
}},
oldCSR: &capi.CertificateSigningRequest{ObjectMeta: validUpdateMetaWithFinalizers, Spec: validSpec},
errs: []string{},
}, {
name: "remove Failed condition",
newCSR: &capi.CertificateSigningRequest{ObjectMeta: validUpdateMeta, Spec: validSpec},
oldCSR: &capi.CertificateSigningRequest{ObjectMeta: validUpdateMetaWithFinalizers, Spec: validSpec, Status: capi.CertificateSigningRequestStatus{
Conditions: []capi.CertificateSigningRequestCondition{{Type: capi.CertificateFailed, Status: core.ConditionTrue}},
}},
errs: []string{
`status.conditions: Forbidden: updates may not remove a condition of type "Failed"`,
},
}, {
name: "set certificate",
newCSR: &capi.CertificateSigningRequest{ObjectMeta: validUpdateMeta, Spec: validSpec, Status: capi.CertificateSigningRequestStatus{
Certificate: validCertificate,
}},
oldCSR: &capi.CertificateSigningRequest{ObjectMeta: validUpdateMetaWithFinalizers, Spec: validSpec},
errs: []string{
`status.certificate: Forbidden: updates may not set certificate content`,
},
}}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
gotErrs := sets.NewString()
for _, err := range ValidateCertificateSigningRequestApprovalUpdate(tt.newCSR, tt.oldCSR) {
gotErrs.Insert(err.Error())
}
wantErrs := sets.NewString(tt.errs...)
for _, missing := range wantErrs.Difference(gotErrs).List() {
t.Errorf("missing expected error: %s", missing)
}
for _, unexpected := range gotErrs.Difference(wantErrs).List() {
t.Errorf("unexpected error: %s", unexpected)
}
})
}
}
// Test_validateCertificateSigningRequestOptions verifies validation options are effective in tolerating specific aspects of CSRs
func Test_validateCertificateSigningRequestOptions(t *testing.T) {
validSpec := capi.CertificateSigningRequestSpec{
Usages: validUsages,
Request: newCSRPEM(t),
SignerName: "example.com/something",
}
tests := []struct {
// testcase name
name string
// csr being validated
csr *capi.CertificateSigningRequest
// options that allow the csr to pass validation
lenientOpts certificateValidationOptions
// regexes matching expected errors when validating strictly
strictRegexes []regexp.Regexp
// expected errors (after filtering out errors matched by strictRegexes) when validating strictly
strictErrs []string
}{
// valid strict cases
{
name: "no status",
csr: &capi.CertificateSigningRequest{ObjectMeta: validObjectMeta, Spec: validSpec},
}, {
name: "approved condition",
csr: &capi.CertificateSigningRequest{ObjectMeta: validObjectMeta, Spec: validSpec,
Status: capi.CertificateSigningRequestStatus{
Conditions: []capi.CertificateSigningRequestCondition{{Type: capi.CertificateApproved, Status: core.ConditionTrue}},
},
},
}, {
name: "denied condition",
csr: &capi.CertificateSigningRequest{ObjectMeta: validObjectMeta, Spec: validSpec,
Status: capi.CertificateSigningRequestStatus{
Conditions: []capi.CertificateSigningRequestCondition{{Type: capi.CertificateDenied, Status: core.ConditionTrue}},
},
},
}, {
name: "failed condition",
csr: &capi.CertificateSigningRequest{ObjectMeta: validObjectMeta, Spec: validSpec,
Status: capi.CertificateSigningRequestStatus{
Conditions: []capi.CertificateSigningRequestCondition{{Type: capi.CertificateFailed, Status: core.ConditionTrue}},
},
},
}, {
name: "approved+issued",
csr: &capi.CertificateSigningRequest{ObjectMeta: validObjectMeta, Spec: validSpec,
Status: capi.CertificateSigningRequestStatus{
Conditions: []capi.CertificateSigningRequestCondition{{Type: capi.CertificateApproved, Status: core.ConditionTrue}},
Certificate: validCertificate,
},
},
},
// legacy signer
{
name: "legacy signer",
csr: &capi.CertificateSigningRequest{
ObjectMeta: validObjectMeta,
Spec: func() capi.CertificateSigningRequestSpec {
specCopy := validSpec
specCopy.SignerName = capi.LegacyUnknownSignerName
return specCopy
}(),
},
lenientOpts: certificateValidationOptions{allowLegacySignerName: true},
strictErrs: []string{`spec.signerName: Invalid value: "kubernetes.io/legacy-unknown": the legacy signerName is not allowed via this API version`},
},
// invalid condition cases
{
name: "empty condition type",
csr: &capi.CertificateSigningRequest{ObjectMeta: validObjectMeta, Spec: validSpec,
Status: capi.CertificateSigningRequestStatus{
Conditions: []capi.CertificateSigningRequestCondition{{Status: core.ConditionTrue}},
},
},
lenientOpts: certificateValidationOptions{allowEmptyConditionType: true},
strictErrs: []string{`status.conditions[0].type: Required value`},
}, {
name: "approved and denied",
csr: &capi.CertificateSigningRequest{ObjectMeta: validObjectMeta, Spec: validSpec,
Status: capi.CertificateSigningRequestStatus{
Conditions: []capi.CertificateSigningRequestCondition{{Type: capi.CertificateApproved, Status: core.ConditionTrue}, {Type: capi.CertificateDenied, Status: core.ConditionTrue}},
},
},
lenientOpts: certificateValidationOptions{allowBothApprovedAndDenied: true},
strictErrs: []string{`status.conditions: Invalid value: "Denied": Approved and Denied conditions are mutually exclusive`},
}, {
name: "duplicate condition",
csr: &capi.CertificateSigningRequest{ObjectMeta: validObjectMeta, Spec: validSpec,
Status: capi.CertificateSigningRequestStatus{
Conditions: []capi.CertificateSigningRequestCondition{{Type: capi.CertificateApproved, Status: core.ConditionTrue}, {Type: capi.CertificateApproved, Status: core.ConditionTrue}},
},
},
lenientOpts: certificateValidationOptions{allowDuplicateConditionTypes: true},
strictErrs: []string{`status.conditions[1].type: Duplicate value: "Approved"`},
},
// invalid allowArbitraryCertificate cases
{
name: "status.certificate, no PEM",
csr: &capi.CertificateSigningRequest{ObjectMeta: validObjectMeta, Spec: validSpec,
Status: capi.CertificateSigningRequestStatus{
Conditions: []capi.CertificateSigningRequestCondition{{Type: capi.CertificateApproved, Status: core.ConditionTrue}},
Certificate: invalidCertificateNoPEM,
},
},
lenientOpts: certificateValidationOptions{allowArbitraryCertificate: true},
strictErrs: []string{`status.certificate: Invalid value: "<certificate data>": must contain at least one CERTIFICATE PEM block`},
}, {
name: "status.certificate, non-CERTIFICATE PEM",
csr: &capi.CertificateSigningRequest{ObjectMeta: validObjectMeta, Spec: validSpec,
Status: capi.CertificateSigningRequestStatus{
Conditions: []capi.CertificateSigningRequestCondition{{Type: capi.CertificateApproved, Status: core.ConditionTrue}},
Certificate: invalidCertificateNonCertificatePEM,
},
},
lenientOpts: certificateValidationOptions{allowArbitraryCertificate: true},
strictErrs: []string{`status.certificate: Invalid value: "<certificate data>": only CERTIFICATE PEM blocks are allowed, found "CERTIFICATE1"`},
}, {
name: "status.certificate, PEM headers",
csr: &capi.CertificateSigningRequest{ObjectMeta: validObjectMeta, Spec: validSpec,
Status: capi.CertificateSigningRequestStatus{
Conditions: []capi.CertificateSigningRequestCondition{{Type: capi.CertificateApproved, Status: core.ConditionTrue}},
Certificate: invalidCertificatePEMHeaders,
},
},
lenientOpts: certificateValidationOptions{allowArbitraryCertificate: true},
strictErrs: []string{`status.certificate: Invalid value: "<certificate data>": no PEM block headers are permitted`},
}, {
name: "status.certificate, non-base64 PEM",
csr: &capi.CertificateSigningRequest{ObjectMeta: validObjectMeta, Spec: validSpec,
Status: capi.CertificateSigningRequestStatus{
Conditions: []capi.CertificateSigningRequestCondition{{Type: capi.CertificateApproved, Status: core.ConditionTrue}},
Certificate: invalidCertificateNonBase64PEM,
},
},
lenientOpts: certificateValidationOptions{allowArbitraryCertificate: true},
strictErrs: []string{`status.certificate: Invalid value: "<certificate data>": must contain at least one CERTIFICATE PEM block`},
}, {
name: "status.certificate, empty PEM block",
csr: &capi.CertificateSigningRequest{ObjectMeta: validObjectMeta, Spec: validSpec,
Status: capi.CertificateSigningRequestStatus{
Conditions: []capi.CertificateSigningRequestCondition{{Type: capi.CertificateApproved, Status: core.ConditionTrue}},
Certificate: invalidCertificateEmptyPEM,
},
},
lenientOpts: certificateValidationOptions{allowArbitraryCertificate: true},
strictErrs: []string{`status.certificate: Invalid value: "<certificate data>": found CERTIFICATE PEM block containing 0 certificates`},
}, {
name: "status.certificate, non-ASN1 data",
csr: &capi.CertificateSigningRequest{ObjectMeta: validObjectMeta, Spec: validSpec,
Status: capi.CertificateSigningRequestStatus{
Conditions: []capi.CertificateSigningRequestCondition{{Type: capi.CertificateApproved, Status: core.ConditionTrue}},
Certificate: invalidCertificateNonASN1Data,
},
},
lenientOpts: certificateValidationOptions{allowArbitraryCertificate: true},
strictRegexes: []regexp.Regexp{*regexp.MustCompile(`status.certificate: Invalid value: "\<certificate data\>": (asn1: structure error: sequence tag mismatch|x509: invalid RDNSequence)`)},
}, {
name: "approved and denied",
csr: &capi.CertificateSigningRequest{ObjectMeta: validObjectMeta, Spec: validSpec,
Status: capi.CertificateSigningRequestStatus{
Conditions: []capi.CertificateSigningRequestCondition{{Type: capi.CertificateApproved, Status: core.ConditionTrue}, {Type: capi.CertificateDenied, Status: core.ConditionTrue}},
},
},
lenientOpts: certificateValidationOptions{allowBothApprovedAndDenied: true},
strictErrs: []string{`status.conditions: Invalid value: "Denied": Approved and Denied conditions are mutually exclusive`},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
// make sure the lenient options validate with no errors
for _, err := range validateCertificateSigningRequest(tt.csr, tt.lenientOpts) {
t.Errorf("unexpected error with lenient options: %s", err.Error())
}
// make sure the strict options produce the expected errors
gotErrs := sets.NewString()
for _, err := range validateCertificateSigningRequest(tt.csr, certificateValidationOptions{}) {
gotErrs.Insert(err.Error())
}
// filter errors matching strictRegexes and ensure every strictRegex matches at least one error
for _, expectedRegex := range tt.strictRegexes {
matched := false
for _, err := range gotErrs.List() {
if expectedRegex.MatchString(err) {
gotErrs.Delete(err)
matched = true
}
}
if !matched {
t.Errorf("missing expected error matching: %s", expectedRegex.String())
}
}
wantErrs := sets.NewString(tt.strictErrs...)
for _, missing := range wantErrs.Difference(gotErrs).List() {
t.Errorf("missing expected strict error: %s", missing)
}
for _, unexpected := range gotErrs.Difference(wantErrs).List() {
t.Errorf("unexpected errors: %s", unexpected)
}
})
}
}
func mustMakeCertificate(t *testing.T, template *x509.Certificate) []byte {
gen := mathrand.New(mathrand.NewSource(12345))
pub, priv, err := ed25519.GenerateKey(gen)
if err != nil {
t.Fatalf("Error while generating key: %v", err)
}
cert, err := x509.CreateCertificate(gen, template, template, pub, priv)
if err != nil {
t.Fatalf("Error while making certificate: %v", err)
}
return cert
}
func mustMakePEMBlock(blockType string, headers map[string]string, data []byte) string {
return string(pem.EncodeToMemory(&pem.Block{
Type: blockType,
Headers: headers,
Bytes: data,
}))
}
func TestValidateClusterTrustBundle(t *testing.T) {
goodCert1 := mustMakeCertificate(t, &x509.Certificate{
SerialNumber: big.NewInt(0),
Subject: pkix.Name{
CommonName: "root1",
},
IsCA: true,
BasicConstraintsValid: true,
})
goodCert2 := mustMakeCertificate(t, &x509.Certificate{
SerialNumber: big.NewInt(0),
Subject: pkix.Name{
CommonName: "root2",
},
IsCA: true,
BasicConstraintsValid: true,
})
badNotCACert := mustMakeCertificate(t, &x509.Certificate{
SerialNumber: big.NewInt(0),
Subject: pkix.Name{
CommonName: "root3",
},
})
goodCert1Block := string(mustMakePEMBlock("CERTIFICATE", nil, goodCert1))
goodCert2Block := string(mustMakePEMBlock("CERTIFICATE", nil, goodCert2))
goodCert1AlternateBlock := strings.ReplaceAll(goodCert1Block, "\n", "\n\t\n")
badNotCACertBlock := string(mustMakePEMBlock("CERTIFICATE", nil, badNotCACert))
badBlockHeadersBlock := string(mustMakePEMBlock("CERTIFICATE", map[string]string{"key": "value"}, goodCert1))
badBlockTypeBlock := string(mustMakePEMBlock("NOTACERTIFICATE", nil, goodCert1))
badNonParseableBlock := string(mustMakePEMBlock("CERTIFICATE", nil, []byte("this is not a certificate")))
badTooBigBundle := ""
for i := 0; i < (core.MaxSecretSize/len(goodCert1Block))+1; i++ {
badTooBigBundle += goodCert1Block + "\n"
}
testCases := []struct {
description string
bundle *capi.ClusterTrustBundle
opts ValidateClusterTrustBundleOptions
wantErrors field.ErrorList
}{
{
description: "valid, no signer name",
bundle: &capi.ClusterTrustBundle{
ObjectMeta: metav1.ObjectMeta{
Name: "foo",
},
Spec: capi.ClusterTrustBundleSpec{
TrustBundle: goodCert1Block,
},
},
},
{
description: "invalid, too big",
bundle: &capi.ClusterTrustBundle{
ObjectMeta: metav1.ObjectMeta{
Name: "foo",
},
Spec: capi.ClusterTrustBundleSpec{
TrustBundle: badTooBigBundle,
},
},
wantErrors: field.ErrorList{
field.TooLong(field.NewPath("spec", "trustBundle"), "" /*unused*/, core.MaxSecretSize),
},
},
{
description: "invalid, no signer name, invalid name",
bundle: &capi.ClusterTrustBundle{
ObjectMeta: metav1.ObjectMeta{
Name: "k8s.io:bar:foo",
},
Spec: capi.ClusterTrustBundleSpec{
TrustBundle: goodCert1Block,
},
},
wantErrors: field.ErrorList{
field.Invalid(field.NewPath("metadata", "name"), "k8s.io:bar:foo", "ClusterTrustBundle without signer name must not have \":\" in its name"),
},
}, {
description: "valid, with signer name",
bundle: &capi.ClusterTrustBundle{
ObjectMeta: metav1.ObjectMeta{
Name: "k8s.io:foo:bar",
},
Spec: capi.ClusterTrustBundleSpec{
SignerName: "k8s.io/foo",
TrustBundle: goodCert1Block,
},
},
}, {
description: "invalid, with signer name, missing name prefix",
bundle: &capi.ClusterTrustBundle{
ObjectMeta: metav1.ObjectMeta{
Name: "look-ma-no-prefix",
},
Spec: capi.ClusterTrustBundleSpec{
SignerName: "k8s.io/foo",
TrustBundle: goodCert1Block,
},
},
wantErrors: field.ErrorList{
field.Invalid(field.NewPath("metadata", "name"), "look-ma-no-prefix", "ClusterTrustBundle for signerName k8s.io/foo must be named with prefix k8s.io:foo:"),
},
}, {
description: "invalid, with signer name, empty name suffix",
bundle: &capi.ClusterTrustBundle{
ObjectMeta: metav1.ObjectMeta{
Name: "k8s.io:foo:",
},
Spec: capi.ClusterTrustBundleSpec{
SignerName: "k8s.io/foo",
TrustBundle: goodCert1Block,
},
},
wantErrors: field.ErrorList{
field.Invalid(field.NewPath("metadata", "name"), "k8s.io:foo:", `a lowercase RFC 1123 subdomain must consist of lower case alphanumeric characters, '-' or '.', and must start and end with an alphanumeric character (e.g. 'example.com', regex used for validation is '[a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*')`),
},
}, {
description: "invalid, with signer name, bad name suffix",
bundle: &capi.ClusterTrustBundle{
ObjectMeta: metav1.ObjectMeta{
Name: "k8s.io:foo:123notvalidDNSSubdomain",
},
Spec: capi.ClusterTrustBundleSpec{
SignerName: "k8s.io/foo",
TrustBundle: goodCert1Block,
},
},
wantErrors: field.ErrorList{
field.Invalid(field.NewPath("metadata", "name"), "k8s.io:foo:123notvalidDNSSubdomain", `a lowercase RFC 1123 subdomain must consist of lower case alphanumeric characters, '-' or '.', and must start and end with an alphanumeric character (e.g. 'example.com', regex used for validation is '[a-z0-9]([-a-z0-9]*[a-z0-9])?(\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*')`),
},
}, {
description: "valid, with signer name, with inter-block garbage",
bundle: &capi.ClusterTrustBundle{
ObjectMeta: metav1.ObjectMeta{
Name: "k8s.io:foo:abc",
},
Spec: capi.ClusterTrustBundleSpec{
SignerName: "k8s.io/foo",
TrustBundle: "garbage\n" + goodCert1Block + "\ngarbage\n" + goodCert2Block,
},
},
}, {
description: "invalid, no signer name, no trust anchors",
bundle: &capi.ClusterTrustBundle{
ObjectMeta: metav1.ObjectMeta{
Name: "foo",
},
Spec: capi.ClusterTrustBundleSpec{},
},
wantErrors: field.ErrorList{
field.Invalid(field.NewPath("spec", "trustBundle"), "<value omitted>", "at least one trust anchor must be provided"),
},
}, {
description: "invalid, no trust anchors",
bundle: &capi.ClusterTrustBundle{
ObjectMeta: metav1.ObjectMeta{
Name: "k8s.io:foo:abc",
},
Spec: capi.ClusterTrustBundleSpec{
SignerName: "k8s.io/foo",
},
},
wantErrors: field.ErrorList{
field.Invalid(field.NewPath("spec", "trustBundle"), "<value omitted>", "at least one trust anchor must be provided"),
},
}, {
description: "invalid, bad signer name",
bundle: &capi.ClusterTrustBundle{
ObjectMeta: metav1.ObjectMeta{
Name: "invalid:foo",
},
Spec: capi.ClusterTrustBundleSpec{
SignerName: "invalid",
TrustBundle: goodCert1Block,
},
},
wantErrors: field.ErrorList{
field.Invalid(field.NewPath("spec", "signerName"), "invalid", "must be a fully qualified domain and path of the form 'example.com/signer-name'"),
},
}, {
description: "invalid, no blocks",
bundle: &capi.ClusterTrustBundle{
ObjectMeta: metav1.ObjectMeta{
Name: "foo",
},
Spec: capi.ClusterTrustBundleSpec{
TrustBundle: "non block garbage",
},
},
wantErrors: field.ErrorList{
field.Invalid(field.NewPath("spec", "trustBundle"), "<value omitted>", "at least one trust anchor must be provided"),
},
}, {
description: "invalid, bad block type",
bundle: &capi.ClusterTrustBundle{
ObjectMeta: metav1.ObjectMeta{
Name: "foo",
},
Spec: capi.ClusterTrustBundleSpec{
TrustBundle: goodCert1Block + "\n" + badBlockTypeBlock,
},
},
wantErrors: field.ErrorList{
field.Invalid(field.NewPath("spec", "trustBundle"), "<value omitted>", "entry 1 has bad block type: NOTACERTIFICATE"),
},
}, {
description: "invalid, block with headers",
bundle: &capi.ClusterTrustBundle{
ObjectMeta: metav1.ObjectMeta{
Name: "foo",
},
Spec: capi.ClusterTrustBundleSpec{
TrustBundle: goodCert1Block + "\n" + badBlockHeadersBlock,
},
},
wantErrors: field.ErrorList{
field.Invalid(field.NewPath("spec", "trustBundle"), "<value omitted>", "entry 1 has PEM block headers"),
},
}, {
description: "invalid, cert is not a CA cert",
bundle: &capi.ClusterTrustBundle{
ObjectMeta: metav1.ObjectMeta{
Name: "foo",
},
Spec: capi.ClusterTrustBundleSpec{
TrustBundle: badNotCACertBlock,
},
},
wantErrors: field.ErrorList{
field.Invalid(field.NewPath("spec", "trustBundle"), "<value omitted>", "entry 0 does not have the CA bit set"),
},
}, {
description: "invalid, duplicated blocks",
bundle: &capi.ClusterTrustBundle{
ObjectMeta: metav1.ObjectMeta{
Name: "foo",
},
Spec: capi.ClusterTrustBundleSpec{
TrustBundle: goodCert1Block + "\n" + goodCert1AlternateBlock,
},
},
wantErrors: field.ErrorList{
field.Invalid(field.NewPath("spec", "trustBundle"), "<value omitted>", "duplicate trust anchor (indices [0 1])"),
},
}, {
description: "invalid, non-certificate entry",
bundle: &capi.ClusterTrustBundle{
ObjectMeta: metav1.ObjectMeta{
Name: "foo",
},
Spec: capi.ClusterTrustBundleSpec{
TrustBundle: goodCert1Block + "\n" + badNonParseableBlock,
},
},
wantErrors: field.ErrorList{
field.Invalid(field.NewPath("spec", "trustBundle"), "<value omitted>", "entry 1 does not parse as X.509"),
},
}, {
description: "allow any old garbage in the PEM field if we suppress parsing",
bundle: &capi.ClusterTrustBundle{
ObjectMeta: metav1.ObjectMeta{
Name: "foo",
},
Spec: capi.ClusterTrustBundleSpec{
TrustBundle: "garbage",
},
},
opts: ValidateClusterTrustBundleOptions{
SuppressBundleParsing: true,
},
}}
for _, tc := range testCases {
t.Run(tc.description, func(t *testing.T) {
gotErrors := ValidateClusterTrustBundle(tc.bundle, tc.opts)
if diff := cmp.Diff(gotErrors, tc.wantErrors); diff != "" {
t.Fatalf("Unexpected error output from Validate; diff (-got +want)\n%s", diff)
}
// When there are no changes to the object,
// ValidateClusterTrustBundleUpdate should not report errors about
// the TrustBundle field.
tc.bundle.ObjectMeta.ResourceVersion = "1"
newBundle := tc.bundle.DeepCopy()
newBundle.ObjectMeta.ResourceVersion = "2"
gotErrors = ValidateClusterTrustBundleUpdate(newBundle, tc.bundle)
var filteredWantErrors field.ErrorList
for _, err := range tc.wantErrors {
if err.Field != "spec.trustBundle" {
filteredWantErrors = append(filteredWantErrors, err)
}
}
if diff := cmp.Diff(gotErrors, filteredWantErrors); diff != "" {
t.Fatalf("Unexpected error output from ValidateUpdate; diff (-got +want)\n%s", diff)
}
})
}
}
func TestValidateClusterTrustBundleUpdate(t *testing.T) {
goodCert1 := mustMakeCertificate(t, &x509.Certificate{
SerialNumber: big.NewInt(0),
Subject: pkix.Name{
CommonName: "root1",
},
IsCA: true,
BasicConstraintsValid: true,
})
goodCert2 := mustMakeCertificate(t, &x509.Certificate{
SerialNumber: big.NewInt(0),
Subject: pkix.Name{
CommonName: "root2",
},
IsCA: true,
BasicConstraintsValid: true,
})
goodCert1Block := string(mustMakePEMBlock("CERTIFICATE", nil, goodCert1))
goodCert2Block := string(mustMakePEMBlock("CERTIFICATE", nil, goodCert2))
testCases := []struct {
description string
oldBundle, newBundle *capi.ClusterTrustBundle
wantErrors field.ErrorList
}{{
description: "changing signer name disallowed",
oldBundle: &capi.ClusterTrustBundle{
ObjectMeta: metav1.ObjectMeta{
Name: "k8s.io:foo:bar",
},
Spec: capi.ClusterTrustBundleSpec{
SignerName: "k8s.io/foo",
TrustBundle: goodCert1Block,
},
},
newBundle: &capi.ClusterTrustBundle{
ObjectMeta: metav1.ObjectMeta{
Name: "k8s.io:foo:bar",
},
Spec: capi.ClusterTrustBundleSpec{
SignerName: "k8s.io/bar",
TrustBundle: goodCert1Block,
},
},
wantErrors: field.ErrorList{
field.Invalid(field.NewPath("metadata", "name"), "k8s.io:foo:bar", "ClusterTrustBundle for signerName k8s.io/bar must be named with prefix k8s.io:bar:"),
field.Invalid(field.NewPath("spec", "signerName"), "k8s.io/bar", "field is immutable"),
},
}, {
description: "adding certificate allowed",
oldBundle: &capi.ClusterTrustBundle{
ObjectMeta: metav1.ObjectMeta{
Name: "k8s.io:foo:bar",
},
Spec: capi.ClusterTrustBundleSpec{
SignerName: "k8s.io/foo",
TrustBundle: goodCert1Block,
},
},
newBundle: &capi.ClusterTrustBundle{
ObjectMeta: metav1.ObjectMeta{
Name: "k8s.io:foo:bar",
},
Spec: capi.ClusterTrustBundleSpec{
SignerName: "k8s.io/foo",
TrustBundle: goodCert1Block + "\n" + goodCert2Block,
},
},
}, {
description: "emptying trustBundle disallowed",
oldBundle: &capi.ClusterTrustBundle{
ObjectMeta: metav1.ObjectMeta{
Name: "k8s.io:foo:bar",
},
Spec: capi.ClusterTrustBundleSpec{
SignerName: "k8s.io/foo",
TrustBundle: goodCert1Block,
},
},
newBundle: &capi.ClusterTrustBundle{
ObjectMeta: metav1.ObjectMeta{
Name: "k8s.io:foo:bar",
},
Spec: capi.ClusterTrustBundleSpec{
SignerName: "k8s.io/foo",
TrustBundle: "",
},
},
wantErrors: field.ErrorList{
field.Invalid(field.NewPath("spec", "trustBundle"), "<value omitted>", "at least one trust anchor must be provided"),
},
}, {
description: "emptying trustBundle (replace with non-block garbage) disallowed",
oldBundle: &capi.ClusterTrustBundle{
ObjectMeta: metav1.ObjectMeta{
Name: "k8s.io:foo:bar",
},
Spec: capi.ClusterTrustBundleSpec{
SignerName: "k8s.io/foo",
TrustBundle: goodCert1Block,
},
},
newBundle: &capi.ClusterTrustBundle{
ObjectMeta: metav1.ObjectMeta{
Name: "k8s.io:foo:bar",
},
Spec: capi.ClusterTrustBundleSpec{
SignerName: "k8s.io/foo",
TrustBundle: "non block garbage",
},
},
wantErrors: field.ErrorList{
field.Invalid(field.NewPath("spec", "trustBundle"), "<value omitted>", "at least one trust anchor must be provided"),
},
}}
for _, tc := range testCases {
t.Run(tc.description, func(t *testing.T) {
tc.oldBundle.ObjectMeta.ResourceVersion = "1"
tc.newBundle.ObjectMeta.ResourceVersion = "2"
gotErrors := ValidateClusterTrustBundleUpdate(tc.newBundle, tc.oldBundle)
if diff := cmp.Diff(gotErrors, tc.wantErrors); diff != "" {
t.Errorf("Unexpected error output from ValidateUpdate; diff (-got +want)\n%s", diff)
}
})
}
}
func TestValidatePodCertificateRequestCreate(t *testing.T) {
podUID1 := "pod-uid-1"
_, _, ed25519PubPKIX1, ed25519Proof1, ed25519CSR1 := mustMakeEd25519KeyAndProof(t, []byte(podUID1), []string{})
_, _, ed25519PubPKIX2, ed25519Proof2, _ := mustMakeEd25519KeyAndProof(t, []byte("other-value"), []string{})
_, _, _, ed25519Proof3, _ := mustMakeEd25519KeyAndProof(t, []byte(podUID1), []string{})
_, _, ecdsaP224PubPKIX1, ecdsaP224Proof1 := mustMakeECDSAKeyAndProof(t, elliptic.P224(), []byte(podUID1))
_, _, ecdsaP256PubPKIX1, ecdsaP256Proof1 := mustMakeECDSAKeyAndProof(t, elliptic.P256(), []byte(podUID1))
_, _, ecdsaP384PubPKIX1, ecdsaP384Proof1 := mustMakeECDSAKeyAndProof(t, elliptic.P384(), []byte(podUID1))
_, _, ecdsaP521PubPKIX1, ecdsaP521Proof1 := mustMakeECDSAKeyAndProof(t, elliptic.P521(), []byte(podUID1))
_, _, ecdsaWrongProofPKIX, ecdsaWrongProof := mustMakeECDSAKeyAndProof(t, elliptic.P384(), []byte("other-value"))
_, _, rsa2048PubPKIX1, rsa2048Proof1 := mustMakeRSAKeyAndProof(t, 2048, []byte(podUID1))
_, _, rsa3072PubPKIX1, rsa3072Proof1 := mustMakeRSAKeyAndProof(t, 3072, []byte(podUID1))
_, _, rsa4096PubPKIX1, rsa4096Proof1 := mustMakeRSAKeyAndProof(t, 4096, []byte(podUID1))
_, _, rsaWrongProofPKIX, rsaWrongProof := mustMakeRSAKeyAndProof(t, 3072, []byte("other-value"))
podUIDEmpty := ""
_, _, pubPKIXEmpty, proofEmpty, _ := mustMakeEd25519KeyAndProof(t, []byte(podUIDEmpty), []string{})
testCases := []struct {
description string
pcr *capi.PodCertificateRequest
wantErrors field.ErrorList
}{
{
description: "valid Ed25519 PCR",
pcr: &capi.PodCertificateRequest{
ObjectMeta: metav1.ObjectMeta{
Namespace: "foo",
Name: "bar",
},
Spec: capi.PodCertificateRequestSpec{
SignerName: "foo.com/abc",
PodName: "pod-1",
PodUID: types.UID(podUID1),
ServiceAccountName: "sa-1",
ServiceAccountUID: "sa-uid-1",
NodeName: "node-1",
NodeUID: "node-uid-1",
MaxExpirationSeconds: ptr.To[int32](86400),
PKIXPublicKey: ed25519PubPKIX1,
ProofOfPossession: ed25519Proof1,
},
},
wantErrors: nil,
},
{
description: "valid Ed25519 PCR (using PKCS#10)",
pcr: &capi.PodCertificateRequest{
ObjectMeta: metav1.ObjectMeta{
Namespace: "foo",
Name: "bar",
},
Spec: capi.PodCertificateRequestSpec{
SignerName: "foo.com/abc",
PodName: "pod-1",
PodUID: types.UID(podUID1),
ServiceAccountName: "sa-1",
ServiceAccountUID: "sa-uid-1",
NodeName: "node-1",
NodeUID: "node-uid-1",
MaxExpirationSeconds: ptr.To[int32](86400),
StubPKCS10Request: ed25519CSR1,
},
},
wantErrors: nil,
},
{
description: "invalid Ed25519 PCR (both StubPKCS10Request and PKIXPublicKey set)",
pcr: &capi.PodCertificateRequest{
ObjectMeta: metav1.ObjectMeta{
Namespace: "foo",
Name: "bar",
},
Spec: capi.PodCertificateRequestSpec{
SignerName: "foo.com/abc",
PodName: "pod-1",
PodUID: types.UID(podUID1),
ServiceAccountName: "sa-1",
ServiceAccountUID: "sa-uid-1",
NodeName: "node-1",
NodeUID: "node-uid-1",
MaxExpirationSeconds: ptr.To[int32](86400),
PKIXPublicKey: ed25519PubPKIX1,
ProofOfPossession: ed25519Proof1,
StubPKCS10Request: ed25519CSR1,
},
},
wantErrors: field.ErrorList{
field.Invalid(field.NewPath("spec"), field.OmitValueType{}, "exactly one of (stubPKCS10Request) or (pkixPublicKey, proofOfPossession) must be set"),
},
},
{
description: "invalid Ed25519 proof of possession (correct key signed wrong message)",
pcr: &capi.PodCertificateRequest{
ObjectMeta: metav1.ObjectMeta{
Namespace: "foo",
Name: "bar",
},
Spec: capi.PodCertificateRequestSpec{
SignerName: "foo.com/abc",
PodName: "pod-1",
PodUID: types.UID(podUID1),
ServiceAccountName: "sa-1",
ServiceAccountUID: "sa-uid-1",
NodeName: "node-1",
NodeUID: "node-uid-1",
MaxExpirationSeconds: ptr.To[int32](86400),
PKIXPublicKey: ed25519PubPKIX2,
ProofOfPossession: ed25519Proof2,
},
},
wantErrors: field.ErrorList{
field.Invalid(field.NewPath("spec", "proofOfPossession"), field.OmitValueType{}, "could not verify proof-of-possession signature"),
},
},
{
description: "invalid Ed25519 proof of possession (signed by different key)",
pcr: &capi.PodCertificateRequest{
ObjectMeta: metav1.ObjectMeta{
Namespace: "foo",
Name: "bar",
},
Spec: capi.PodCertificateRequestSpec{
SignerName: "foo.com/abc",
PodName: "pod-1",
PodUID: types.UID(podUID1),
ServiceAccountName: "sa-1",
ServiceAccountUID: "sa-uid-1",
NodeName: "node-1",
NodeUID: "node-uid-1",
MaxExpirationSeconds: ptr.To[int32](86400),
PKIXPublicKey: ed25519PubPKIX1,
ProofOfPossession: ed25519Proof3,
},
},
wantErrors: field.ErrorList{
field.Invalid(field.NewPath("spec", "proofOfPossession"), field.OmitValueType{}, "could not verify proof-of-possession signature"),
},
},
{
description: "invalid ECDSA P224 PCR",
pcr: &capi.PodCertificateRequest{
ObjectMeta: metav1.ObjectMeta{
Namespace: "foo",
Name: "bar",
},
Spec: capi.PodCertificateRequestSpec{
SignerName: "foo.com/abc",
PodName: "pod-1",
PodUID: types.UID(podUID1),
ServiceAccountName: "sa-1",
ServiceAccountUID: "sa-uid-1",
NodeName: "node-1",
NodeUID: "node-uid-1",
MaxExpirationSeconds: ptr.To[int32](86400),
PKIXPublicKey: ecdsaP224PubPKIX1,
ProofOfPossession: ecdsaP224Proof1,
},
},
wantErrors: field.ErrorList{
field.Invalid(field.NewPath("spec", "pkixPublicKey"), "curve P-224", "elliptic public keys must use curve P256 or P384"),
},
},
{
description: "valid ECDSA P256 PCR",
pcr: &capi.PodCertificateRequest{
ObjectMeta: metav1.ObjectMeta{
Namespace: "foo",
Name: "bar",
},
Spec: capi.PodCertificateRequestSpec{
SignerName: "foo.com/abc",
PodName: "pod-1",
PodUID: types.UID(podUID1),
ServiceAccountName: "sa-1",
ServiceAccountUID: "sa-uid-1",
NodeName: "node-1",
NodeUID: "node-uid-1",
MaxExpirationSeconds: ptr.To[int32](86400),
PKIXPublicKey: ecdsaP256PubPKIX1,
ProofOfPossession: ecdsaP256Proof1,
},
},
wantErrors: nil,
},
{
description: "valid ECDSA P384 PCR",
pcr: &capi.PodCertificateRequest{
ObjectMeta: metav1.ObjectMeta{
Namespace: "foo",
Name: "bar",
},
Spec: capi.PodCertificateRequestSpec{
SignerName: "foo.com/abc",
PodName: "pod-1",
PodUID: types.UID(podUID1),
ServiceAccountName: "sa-1",
ServiceAccountUID: "sa-uid-1",
NodeName: "node-1",
NodeUID: "node-uid-1",
MaxExpirationSeconds: ptr.To[int32](86400),
PKIXPublicKey: ecdsaP384PubPKIX1,
ProofOfPossession: ecdsaP384Proof1,
},
},
wantErrors: nil,
},
{
description: "valid ECDSA P521 PCR",
pcr: &capi.PodCertificateRequest{
ObjectMeta: metav1.ObjectMeta{
Namespace: "foo",
Name: "bar",
},
Spec: capi.PodCertificateRequestSpec{
SignerName: "foo.com/abc",
PodName: "pod-1",
PodUID: types.UID(podUID1),
ServiceAccountName: "sa-1",
ServiceAccountUID: "sa-uid-1",
NodeName: "node-1",
NodeUID: "node-uid-1",
MaxExpirationSeconds: ptr.To[int32](86400),
PKIXPublicKey: ecdsaP521PubPKIX1,
ProofOfPossession: ecdsaP521Proof1,
},
},
wantErrors: nil,
},
{
description: "invalid ECDSA proof of possession (correct key signed wrong message)",
pcr: &capi.PodCertificateRequest{
ObjectMeta: metav1.ObjectMeta{
Namespace: "foo",
Name: "bar",
},
Spec: capi.PodCertificateRequestSpec{
SignerName: "foo.com/abc",
PodName: "pod-1",
PodUID: types.UID(podUID1),
ServiceAccountName: "sa-1",
ServiceAccountUID: "sa-uid-1",
NodeName: "node-1",
NodeUID: "node-uid-1",
MaxExpirationSeconds: ptr.To[int32](86400),
PKIXPublicKey: ecdsaWrongProofPKIX,
ProofOfPossession: ecdsaWrongProof,
},
},
wantErrors: field.ErrorList{
field.Invalid(field.NewPath("spec", "proofOfPossession"), field.OmitValueType{}, "could not verify proof-of-possession signature"),
},
},
{
description: "invalid RSA 2048 PCR",
pcr: &capi.PodCertificateRequest{
ObjectMeta: metav1.ObjectMeta{
Namespace: "foo",
Name: "bar",
},
Spec: capi.PodCertificateRequestSpec{
SignerName: "foo.com/abc",
PodName: "pod-1",
PodUID: types.UID(podUID1),
ServiceAccountName: "sa-1",
ServiceAccountUID: "sa-uid-1",
NodeName: "node-1",
NodeUID: "node-uid-1",
MaxExpirationSeconds: ptr.To[int32](86400),
PKIXPublicKey: rsa2048PubPKIX1,
ProofOfPossession: rsa2048Proof1,
},
},
wantErrors: field.ErrorList{
field.Invalid(field.NewPath("spec", "pkixPublicKey"), "2048-bit modulus", "RSA keys must have modulus size 3072 or 4096"),
},
},
{
description: "valid RSA 3072 PCR",
pcr: &capi.PodCertificateRequest{
ObjectMeta: metav1.ObjectMeta{
Namespace: "foo",
Name: "bar",
},
Spec: capi.PodCertificateRequestSpec{
SignerName: "foo.com/abc",
PodName: "pod-1",
PodUID: types.UID(podUID1),
ServiceAccountName: "sa-1",
ServiceAccountUID: "sa-uid-1",
NodeName: "node-1",
NodeUID: "node-uid-1",
MaxExpirationSeconds: ptr.To[int32](86400),
PKIXPublicKey: rsa3072PubPKIX1,
ProofOfPossession: rsa3072Proof1,
},
},
wantErrors: nil,
},
{
description: "valid RSA 4096 PCR",
pcr: &capi.PodCertificateRequest{
ObjectMeta: metav1.ObjectMeta{
Namespace: "foo",
Name: "bar",
},
Spec: capi.PodCertificateRequestSpec{
SignerName: "foo.com/abc",
PodName: "pod-1",
PodUID: types.UID(podUID1),
ServiceAccountName: "sa-1",
ServiceAccountUID: "sa-uid-1",
NodeName: "node-1",
NodeUID: "node-uid-1",
MaxExpirationSeconds: ptr.To[int32](86400),
PKIXPublicKey: rsa4096PubPKIX1,
ProofOfPossession: rsa4096Proof1,
},
},
wantErrors: nil,
},
{
description: "invalid RSA proof of possession (correct key signed wrong message)",
pcr: &capi.PodCertificateRequest{
ObjectMeta: metav1.ObjectMeta{
Namespace: "foo",
Name: "bar",
},
Spec: capi.PodCertificateRequestSpec{
SignerName: "foo.com/abc",
PodName: "pod-1",
PodUID: types.UID(podUID1),
ServiceAccountName: "sa-1",
ServiceAccountUID: "sa-uid-1",
NodeName: "node-1",
NodeUID: "node-uid-1",
MaxExpirationSeconds: ptr.To[int32](86400),
PKIXPublicKey: rsaWrongProofPKIX,
ProofOfPossession: rsaWrongProof,
},
},
wantErrors: field.ErrorList{
field.Invalid(field.NewPath("spec", "proofOfPossession"), field.OmitValueType{}, "could not verify proof-of-possession signature"),
},
},
{
description: "bad signer name",
pcr: &capi.PodCertificateRequest{
ObjectMeta: metav1.ObjectMeta{
Namespace: "foo",
Name: "bar",
},
Spec: capi.PodCertificateRequestSpec{
SignerName: "not-valid-signername",
PodName: "pod-1",
PodUID: types.UID(podUID1),
ServiceAccountName: "sa-1",
ServiceAccountUID: "sa-uid-1",
NodeName: "node-1",
NodeUID: "node-uid-1",
MaxExpirationSeconds: ptr.To[int32](86400),
PKIXPublicKey: ed25519PubPKIX1,
ProofOfPossession: ed25519Proof1,
},
},
wantErrors: field.ErrorList{
field.Invalid(field.NewPath("spec", "signerName"), "not-valid-signername", "must be a fully qualified domain and path of the form 'example.com/signer-name'"),
},
},
{
description: "bad pod name",
pcr: &capi.PodCertificateRequest{
ObjectMeta: metav1.ObjectMeta{
Namespace: "foo",
Name: "bar",
},
Spec: capi.PodCertificateRequestSpec{
SignerName: "foo.com/abc",
PodName: "pod-1-bad!!!!!",
PodUID: types.UID(podUID1),
ServiceAccountName: "sa-1",
ServiceAccountUID: "sa-uid-1",
NodeName: "node-1",
NodeUID: "node-uid-1",
MaxExpirationSeconds: ptr.To[int32](86400),
PKIXPublicKey: ed25519PubPKIX1,
ProofOfPossession: ed25519Proof1,
},
},
wantErrors: field.ErrorList{
field.Invalid(field.NewPath("spec", "podName"), "pod-1-bad!!!!!", "a lowercase RFC 1123 subdomain must consist of lower case alphanumeric characters, '-' or '.', and must start and end with an alphanumeric character (e.g. 'example.com', regex used for validation is '[a-z0-9]([-a-z0-9]*[a-z0-9])?(\\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*')"),
},
},
{
description: "bad pod uid",
pcr: &capi.PodCertificateRequest{
ObjectMeta: metav1.ObjectMeta{
Namespace: "foo",
Name: "bar",
},
Spec: capi.PodCertificateRequestSpec{
SignerName: "foo.com/abc",
PodName: "pod-1",
PodUID: types.UID(""),
ServiceAccountName: "sa-1",
ServiceAccountUID: "sa-uid-1",
NodeName: "node-1",
NodeUID: "node-uid-1",
MaxExpirationSeconds: ptr.To[int32](86400),
PKIXPublicKey: pubPKIXEmpty,
ProofOfPossession: proofEmpty,
},
},
wantErrors: field.ErrorList{
field.Invalid(field.NewPath("spec", "podUID"), types.UID(""), "must not be empty"),
},
},
{
description: "bad service account name",
pcr: &capi.PodCertificateRequest{
ObjectMeta: metav1.ObjectMeta{
Namespace: "foo",
Name: "bar",
},
Spec: capi.PodCertificateRequestSpec{
SignerName: "foo.com/abc",
PodName: "pod-1",
PodUID: types.UID(podUID1),
ServiceAccountName: "sa-1-bad!!!!!",
ServiceAccountUID: "sa-uid-1",
NodeName: "node-1",
NodeUID: "node-uid-1",
MaxExpirationSeconds: ptr.To[int32](86400),
PKIXPublicKey: ed25519PubPKIX1,
ProofOfPossession: ed25519Proof1,
},
},
wantErrors: field.ErrorList{
field.Invalid(field.NewPath("spec", "serviceAccountName"), "sa-1-bad!!!!!", "a lowercase RFC 1123 subdomain must consist of lower case alphanumeric characters, '-' or '.', and must start and end with an alphanumeric character (e.g. 'example.com', regex used for validation is '[a-z0-9]([-a-z0-9]*[a-z0-9])?(\\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*')"),
},
},
{
description: "bad service account uid",
pcr: &capi.PodCertificateRequest{
ObjectMeta: metav1.ObjectMeta{
Namespace: "foo",
Name: "bar",
},
Spec: capi.PodCertificateRequestSpec{
SignerName: "foo.com/abc",
PodName: "pod-1",
PodUID: types.UID(podUID1),
ServiceAccountName: "sa-1",
ServiceAccountUID: "",
NodeName: "node-1",
NodeUID: "node-uid-1",
MaxExpirationSeconds: ptr.To[int32](86400),
PKIXPublicKey: ed25519PubPKIX1,
ProofOfPossession: ed25519Proof1,
},
},
wantErrors: field.ErrorList{
field.Invalid(field.NewPath("spec", "serviceAccountUID"), types.UID(""), "must not be empty"),
},
},
{
description: "bad node name",
pcr: &capi.PodCertificateRequest{
ObjectMeta: metav1.ObjectMeta{
Namespace: "foo",
Name: "bar",
},
Spec: capi.PodCertificateRequestSpec{
SignerName: "foo.com/abc",
PodName: "pod-1",
PodUID: types.UID(podUID1),
ServiceAccountName: "sa-1",
ServiceAccountUID: "sa-uid-1",
NodeName: "node-1-bad!!!!!",
NodeUID: "node-uid-1",
MaxExpirationSeconds: ptr.To[int32](86400),
PKIXPublicKey: ed25519PubPKIX1,
ProofOfPossession: ed25519Proof1,
},
},
wantErrors: field.ErrorList{
field.Invalid(field.NewPath("spec", "nodeName"), types.NodeName("node-1-bad!!!!!"), "a lowercase RFC 1123 subdomain must consist of lower case alphanumeric characters, '-' or '.', and must start and end with an alphanumeric character (e.g. 'example.com', regex used for validation is '[a-z0-9]([-a-z0-9]*[a-z0-9])?(\\.[a-z0-9]([-a-z0-9]*[a-z0-9])?)*')"),
},
},
{
description: "bad node uid",
pcr: &capi.PodCertificateRequest{
ObjectMeta: metav1.ObjectMeta{
Namespace: "foo",
Name: "bar",
},
Spec: capi.PodCertificateRequestSpec{
SignerName: "foo.com/abc",
PodName: "pod-1",
PodUID: types.UID(podUID1),
ServiceAccountName: "sa-1",
ServiceAccountUID: "sa-uid-1",
NodeName: "node-1",
NodeUID: "",
MaxExpirationSeconds: ptr.To[int32](86400),
PKIXPublicKey: ed25519PubPKIX1,
ProofOfPossession: ed25519Proof1,
},
},
wantErrors: field.ErrorList{
field.Invalid(field.NewPath("spec", "nodeUID"), types.UID(""), "must not be empty"),
},
},
{
description: "maxExpirationSeconds missing",
pcr: &capi.PodCertificateRequest{
ObjectMeta: metav1.ObjectMeta{
Namespace: "foo",
Name: "bar",
},
Spec: capi.PodCertificateRequestSpec{
SignerName: "foo.com/abc",
PodName: "pod-1",
PodUID: types.UID(podUID1),
ServiceAccountName: "sa-1",
ServiceAccountUID: "sa-uid-1",
NodeName: "node-1",
NodeUID: "node-uid-1",
PKIXPublicKey: ed25519PubPKIX1,
ProofOfPossession: ed25519Proof1,
},
},
wantErrors: field.ErrorList{
field.Required(field.NewPath("spec", "maxExpirationSeconds"), "must be set"),
},
},
{
description: "maxExpirationSeconds too large",
pcr: &capi.PodCertificateRequest{
ObjectMeta: metav1.ObjectMeta{
Namespace: "foo",
Name: "bar",
},
Spec: capi.PodCertificateRequestSpec{
SignerName: "foo.com/abc",
PodName: "pod-1",
PodUID: types.UID(podUID1),
ServiceAccountName: "sa-1",
ServiceAccountUID: "sa-uid-1",
NodeName: "node-1",
NodeUID: "node-uid-1",
MaxExpirationSeconds: ptr.To[int32](91*86400 + 1),
PKIXPublicKey: ed25519PubPKIX1,
ProofOfPossession: ed25519Proof1,
},
},
wantErrors: field.ErrorList{
field.Invalid(field.NewPath("spec", "maxExpirationSeconds"), ptr.To[int32](91*86400+1), fmt.Sprintf("must be in the range [%d, %d]", capi.MinMaxExpirationSeconds, capi.MaxMaxExpirationSeconds)),
},
},
{
description: "maxExpirationSeconds too large (Kubernetes signer)",
pcr: &capi.PodCertificateRequest{
ObjectMeta: metav1.ObjectMeta{
Namespace: "foo",
Name: "bar",
},
Spec: capi.PodCertificateRequestSpec{
SignerName: "kubernetes.io/abc",
PodName: "pod-1",
PodUID: types.UID(podUID1),
ServiceAccountName: "sa-1",
ServiceAccountUID: "sa-uid-1",
NodeName: "node-1",
NodeUID: "node-uid-1",
MaxExpirationSeconds: ptr.To[int32](86401),
PKIXPublicKey: ed25519PubPKIX1,
ProofOfPossession: ed25519Proof1,
},
},
wantErrors: field.ErrorList{
field.Invalid(field.NewPath("spec", "maxExpirationSeconds"), ptr.To[int32](86401), fmt.Sprintf("must be in the range [%d, %d]", capi.MinMaxExpirationSeconds, capi.KubernetesMaxMaxExpirationSeconds)),
},
},
{
description: "maxExpirationSeconds too small",
pcr: &capi.PodCertificateRequest{
ObjectMeta: metav1.ObjectMeta{
Namespace: "foo",
Name: "bar",
},
Spec: capi.PodCertificateRequestSpec{
SignerName: "foo.com/abc",
PodName: "pod-1",
PodUID: types.UID(podUID1),
ServiceAccountName: "sa-1",
ServiceAccountUID: "sa-uid-1",
NodeName: "node-1",
NodeUID: "node-uid-1",
MaxExpirationSeconds: ptr.To[int32](3600 - 1),
PKIXPublicKey: ed25519PubPKIX1,
ProofOfPossession: ed25519Proof1,
},
},
wantErrors: field.ErrorList{
field.Invalid(field.NewPath("spec", "maxExpirationSeconds"), ptr.To[int32](3600-1), fmt.Sprintf("must be in the range [%d, %d]", capi.MinMaxExpirationSeconds, capi.MaxMaxExpirationSeconds)),
},
},
{
description: "pkixPublicKey too long",
pcr: &capi.PodCertificateRequest{
ObjectMeta: metav1.ObjectMeta{
Namespace: "foo",
Name: "bar",
},
Spec: capi.PodCertificateRequestSpec{
SignerName: "foo.com/abc",
PodName: "pod-1",
PodUID: types.UID(podUID1),
ServiceAccountName: "sa-1",
ServiceAccountUID: "sa-uid-1",
NodeName: "node-1",
NodeUID: "node-uid-1",
MaxExpirationSeconds: ptr.To[int32](86400),
PKIXPublicKey: make([]byte, capi.MaxPKIXPublicKeySize+1),
ProofOfPossession: []byte("abc"),
},
},
wantErrors: field.ErrorList{
field.TooLong(field.NewPath("spec", "pkixPublicKey"), make([]byte, capi.MaxPKIXPublicKeySize+1), capi.MaxPKIXPublicKeySize),
},
},
{
description: "proofOfPossession too long",
pcr: &capi.PodCertificateRequest{
ObjectMeta: metav1.ObjectMeta{
Namespace: "foo",
Name: "bar",
},
Spec: capi.PodCertificateRequestSpec{
SignerName: "foo.com/abc",
PodName: "pod-1",
PodUID: types.UID(podUID1),
ServiceAccountName: "sa-1",
ServiceAccountUID: "sa-uid-1",
NodeName: "node-1",
NodeUID: "node-uid-1",
MaxExpirationSeconds: ptr.To[int32](86400),
PKIXPublicKey: ed25519PubPKIX1,
ProofOfPossession: make([]byte, capi.MaxProofOfPossessionSize+1),
},
},
wantErrors: field.ErrorList{
field.TooLong(field.NewPath("spec", "proofOfPossession"), make([]byte, capi.MaxProofOfPossessionSize+1), capi.MaxProofOfPossessionSize),
},
},
{
description: "bad user annotations key name",
pcr: &capi.PodCertificateRequest{
ObjectMeta: metav1.ObjectMeta{
Namespace: "foo",
Name: "bar",
},
Spec: capi.PodCertificateRequestSpec{
SignerName: "foo.com/abc",
PodName: "pod-1",
PodUID: types.UID(podUID1),
ServiceAccountName: "sa-1",
ServiceAccountUID: "sa-uid-1",
NodeName: "node-1",
NodeUID: "node-uid-1",
MaxExpirationSeconds: ptr.To[int32](86400),
PKIXPublicKey: ed25519PubPKIX1,
ProofOfPossession: ed25519Proof1,
UnverifiedUserAnnotations: map[string]string{"test/domain/foo": "bar"},
},
},
wantErrors: field.ErrorList{
field.Invalid(field.NewPath("spec", "unverifiedUserAnnotations"), "test/domain/foo", "a valid label key must consist of alphanumeric characters, '-', '_' or '.', and must start and end with an alphanumeric character (e.g. 'MyName', or 'my.name', or '123-abc', regex used for validation is '([A-Za-z0-9][-A-Za-z0-9_.]*)?[A-Za-z0-9]') with an optional DNS subdomain prefix and '/' (e.g. 'example.com/MyName')"),
},
},
{
description: "bad user annotations key prefix too long",
pcr: &capi.PodCertificateRequest{
ObjectMeta: metav1.ObjectMeta{
Namespace: "foo",
Name: "bar",
},
Spec: capi.PodCertificateRequestSpec{
SignerName: "foo.com/abc",
PodName: "pod-1",
PodUID: types.UID(podUID1),
ServiceAccountName: "sa-1",
ServiceAccountUID: "sa-uid-1",
NodeName: "node-1",
NodeUID: "node-uid-1",
MaxExpirationSeconds: ptr.To[int32](86400),
PKIXPublicKey: ed25519PubPKIX1,
ProofOfPossession: ed25519Proof1,
UnverifiedUserAnnotations: map[string]string{strings.Repeat("a", 254) + "/foo": "bar"},
},
},
wantErrors: field.ErrorList{
field.Invalid(field.NewPath("spec", "unverifiedUserAnnotations"), strings.Repeat("a", 254)+"/foo", "prefix part must be no more than 253 bytes"),
},
},
{
description: "bad user annotations key/value total size too long",
pcr: &capi.PodCertificateRequest{
ObjectMeta: metav1.ObjectMeta{
Namespace: "foo",
Name: "bar",
},
Spec: capi.PodCertificateRequestSpec{
SignerName: "foo.com/abc",
PodName: "pod-1",
PodUID: types.UID(podUID1),
ServiceAccountName: "sa-1",
ServiceAccountUID: "sa-uid-1",
NodeName: "node-1",
NodeUID: "node-uid-1",
MaxExpirationSeconds: ptr.To[int32](86400),
PKIXPublicKey: ed25519PubPKIX1,
ProofOfPossession: ed25519Proof1,
UnverifiedUserAnnotations: map[string]string{"foo/bar": strings.Repeat("d", apimachineryvalidation.TotalAnnotationSizeLimitB)},
},
},
wantErrors: field.ErrorList{
field.TooLong(field.NewPath("spec", "unverifiedUserAnnotations"), "", apimachineryvalidation.TotalAnnotationSizeLimitB),
},
},
}
for _, tc := range testCases {
t.Run(tc.description, func(t *testing.T) {
gotErrors := ValidatePodCertificateRequestCreate(tc.pcr)
if diff := cmp.Diff(gotErrors, tc.wantErrors); diff != "" {
t.Errorf("Unexpected error output from ValidatePodCertificateRequestCreate; diff (-got +want)\n%s", diff)
t.Logf("Got errors: %+v", gotErrors)
}
})
}
}
func TestValidatePodCertificateRequestUpdate(t *testing.T) {
podUID1 := "pod-uid-1"
_, _, pubPKIX1, proof1, _ := mustMakeEd25519KeyAndProof(t, []byte(podUID1), []string{})
testCases := []struct {
description string
oldPCR, newPCR *capi.PodCertificateRequest
wantErrors field.ErrorList
}{
{
description: "changing spec fields disallowed",
oldPCR: &capi.PodCertificateRequest{
ObjectMeta: metav1.ObjectMeta{
Namespace: "foo",
Name: "bar",
},
Spec: capi.PodCertificateRequestSpec{
SignerName: "foo.com/abc",
PodName: "pod-1",
PodUID: types.UID(podUID1),
ServiceAccountName: "sa-1",
ServiceAccountUID: "sa-uid-1",
NodeName: "node-1",
NodeUID: "node-uid-1",
MaxExpirationSeconds: ptr.To[int32](86400),
PKIXPublicKey: pubPKIX1,
ProofOfPossession: proof1,
UnverifiedUserAnnotations: map[string]string{"test.domain/foo": "bar"},
},
},
newPCR: &capi.PodCertificateRequest{
ObjectMeta: metav1.ObjectMeta{
Namespace: "foo",
Name: "bar",
},
Spec: capi.PodCertificateRequestSpec{
SignerName: "foo.com/new",
PodName: "new",
PodUID: types.UID("new"),
ServiceAccountName: "new",
ServiceAccountUID: "new",
NodeName: "new",
NodeUID: "new",
MaxExpirationSeconds: ptr.To[int32](86401),
PKIXPublicKey: pubPKIX1,
ProofOfPossession: proof1,
UnverifiedUserAnnotations: map[string]string{"test.domain/foo": "foo"},
},
},
wantErrors: field.ErrorList{
field.Invalid(
field.NewPath("spec"),
capi.PodCertificateRequestSpec{
SignerName: "foo.com/new",
PodName: "new",
PodUID: types.UID("new"),
ServiceAccountName: "new",
ServiceAccountUID: "new",
NodeName: "new",
NodeUID: "new",
MaxExpirationSeconds: ptr.To[int32](86401),
PKIXPublicKey: pubPKIX1,
ProofOfPossession: proof1,
UnverifiedUserAnnotations: map[string]string{"test.domain/foo": "foo"},
},
"field is immutable",
),
},
},
}
for _, tc := range testCases {
t.Run(tc.description, func(t *testing.T) {
tc.oldPCR.ObjectMeta.ResourceVersion = "1"
tc.newPCR.ObjectMeta.ResourceVersion = "2"
gotErrors := ValidatePodCertificateRequestUpdate(tc.newPCR, tc.oldPCR)
if diff := cmp.Diff(gotErrors, tc.wantErrors); diff != "" {
t.Errorf("Unexpected error output from ValidatePodCertificateRequestUpdate; diff (-got +want)\n%s", diff)
}
})
}
}
func TestValidatePodCertificateRequestStatusUpdate(t *testing.T) {
caCertDER, caPrivKey := mustMakeCA(t)
intermediateCACertDER, intermediateCAPrivKey := mustMakeIntermediateCA(t, caCertDER, caPrivKey)
podUID1 := "pod-uid-1"
_, pub1, pubPKIX1, proof1, _ := mustMakeEd25519KeyAndProof(t, []byte(podUID1), []string{})
pod1Cert1 := mustSignCertForPublicKey(t, 24*time.Hour, pub1, caCertDER, caPrivKey, false, "", "")
pod1Cert2 := mustSignCertForPublicKey(t, 18*time.Hour, pub1, caCertDER, caPrivKey, false, "", "")
badCertTooShort := mustSignCertForPublicKey(t, 50*time.Minute, pub1, caCertDER, caPrivKey, false, "", "")
badCertTooLong := mustSignCertForPublicKey(t, 25*time.Hour, pub1, caCertDER, caPrivKey, false, "", "")
certWithBadDNSName1 := mustSignCertForPublicKey(t, 24*time.Hour, pub1, caCertDER, caPrivKey, true, "", "")
certWithBadDNSName2 := mustSignCertForPublicKey(t, 24*time.Hour, pub1, caCertDER, caPrivKey, true, "test-name..example", "")
certWithBadDNSName3 := mustSignCertForPublicKey(t, 24*time.Hour, pub1, caCertDER, caPrivKey, true, ".example", "")
certWithBadDNSName4 := mustSignCertForPublicKey(t, 24*time.Hour, pub1, caCertDER, caPrivKey, true, "example.", "")
certWithBadEmailAddress := mustSignCertForPublicKey(t, 24*time.Hour, pub1, caCertDER, caPrivKey, false, "", "email@@address")
certFromIntermediate := mustSignCertForPublicKey(t, 24*time.Hour, pub1, intermediateCACertDER, intermediateCAPrivKey, false, "", "")
testCases := []struct {
description string
oldPCR, newPCR *capi.PodCertificateRequest
wantErrors field.ErrorList
}{
{
description: "changing nothing is allowed",
oldPCR: &capi.PodCertificateRequest{
ObjectMeta: metav1.ObjectMeta{
Namespace: "foo",
Name: "bar",
},
Spec: capi.PodCertificateRequestSpec{
SignerName: "foo.com/abc",
PodName: "pod-1",
PodUID: types.UID(podUID1),
ServiceAccountName: "sa-1",
ServiceAccountUID: "sa-uid-1",
NodeName: "node-1",
NodeUID: "node-uid-1",
MaxExpirationSeconds: ptr.To[int32](86400),
PKIXPublicKey: pubPKIX1,
ProofOfPossession: proof1,
},
},
newPCR: &capi.PodCertificateRequest{
ObjectMeta: metav1.ObjectMeta{
Namespace: "foo",
Name: "bar",
},
Spec: capi.PodCertificateRequestSpec{
SignerName: "foo.com/abc",
PodName: "pod-1",
PodUID: types.UID(podUID1),
ServiceAccountName: "sa-1",
ServiceAccountUID: "sa-uid-1",
NodeName: "node-1",
NodeUID: "node-uid-1",
MaxExpirationSeconds: ptr.To[int32](86400),
PKIXPublicKey: pubPKIX1,
ProofOfPossession: proof1,
},
},
},
{
description: "adding unknown condition types is not allowed",
oldPCR: &capi.PodCertificateRequest{
ObjectMeta: metav1.ObjectMeta{
Namespace: "foo",
Name: "bar",
},
Spec: capi.PodCertificateRequestSpec{
SignerName: "foo.com/abc",
PodName: "pod-1",
PodUID: types.UID(podUID1),
ServiceAccountName: "sa-1",
ServiceAccountUID: "sa-uid-1",
NodeName: "node-1",
NodeUID: "node-uid-1",
MaxExpirationSeconds: ptr.To[int32](86400),
PKIXPublicKey: pubPKIX1,
ProofOfPossession: proof1,
},
},
newPCR: &capi.PodCertificateRequest{
ObjectMeta: metav1.ObjectMeta{
Namespace: "foo",
Name: "bar",
},
Spec: capi.PodCertificateRequestSpec{
SignerName: "foo.com/abc",
PodName: "pod-1",
PodUID: types.UID(podUID1),
ServiceAccountName: "sa-1",
ServiceAccountUID: "sa-uid-1",
NodeName: "node-1",
NodeUID: "node-uid-1",
MaxExpirationSeconds: ptr.To[int32](86400),
PKIXPublicKey: pubPKIX1,
ProofOfPossession: proof1,
},
Status: capi.PodCertificateRequestStatus{
Conditions: []metav1.Condition{
{
Type: "Unknown",
Status: metav1.ConditionFalse,
Reason: "Foo",
Message: "Foo message",
LastTransitionTime: metav1.NewTime(time.Now()),
},
},
},
},
wantErrors: field.ErrorList{
field.NotSupported(field.NewPath("status", "conditions", "[0]", "type"), "Unknown", []string{capi.PodCertificateRequestConditionTypeIssued, capi.PodCertificateRequestConditionTypeDenied, capi.PodCertificateRequestConditionTypeFailed}),
},
},
{
description: "Issued must have status True",
oldPCR: &capi.PodCertificateRequest{
ObjectMeta: metav1.ObjectMeta{
Namespace: "foo",
Name: "bar",
},
Spec: capi.PodCertificateRequestSpec{
SignerName: "foo.com/abc",
PodName: "pod-1",
PodUID: types.UID(podUID1),
ServiceAccountName: "sa-1",
ServiceAccountUID: "sa-uid-1",
NodeName: "node-1",
NodeUID: "node-uid-1",
MaxExpirationSeconds: ptr.To[int32](86400),
PKIXPublicKey: pubPKIX1,
ProofOfPossession: proof1,
},
},
newPCR: &capi.PodCertificateRequest{
ObjectMeta: metav1.ObjectMeta{
Namespace: "foo",
Name: "bar",
},
Spec: capi.PodCertificateRequestSpec{
SignerName: "foo.com/abc",
PodName: "pod-1",
PodUID: types.UID(podUID1),
ServiceAccountName: "sa-1",
ServiceAccountUID: "sa-uid-1",
NodeName: "node-1",
NodeUID: "node-uid-1",
MaxExpirationSeconds: ptr.To[int32](86400),
PKIXPublicKey: pubPKIX1,
ProofOfPossession: proof1,
},
Status: capi.PodCertificateRequestStatus{
Conditions: []metav1.Condition{
{
Type: "Issued",
Status: metav1.ConditionFalse,
Reason: "Foo",
Message: "Foo message",
LastTransitionTime: metav1.NewTime(time.Now()),
},
},
},
},
wantErrors: field.ErrorList{
field.NotSupported(field.NewPath("status", "conditions", "[0]", "status"), metav1.ConditionFalse, []metav1.ConditionStatus{metav1.ConditionTrue}),
},
},
{
description: "Denied must have status True",
oldPCR: &capi.PodCertificateRequest{
ObjectMeta: metav1.ObjectMeta{
Namespace: "foo",
Name: "bar",
},
Spec: capi.PodCertificateRequestSpec{
SignerName: "foo.com/abc",
PodName: "pod-1",
PodUID: types.UID(podUID1),
ServiceAccountName: "sa-1",
ServiceAccountUID: "sa-uid-1",
NodeName: "node-1",
NodeUID: "node-uid-1",
MaxExpirationSeconds: ptr.To[int32](86400),
PKIXPublicKey: pubPKIX1,
ProofOfPossession: proof1,
},
},
newPCR: &capi.PodCertificateRequest{
ObjectMeta: metav1.ObjectMeta{
Namespace: "foo",
Name: "bar",
},
Spec: capi.PodCertificateRequestSpec{
SignerName: "foo.com/abc",
PodName: "pod-1",
PodUID: types.UID(podUID1),
ServiceAccountName: "sa-1",
ServiceAccountUID: "sa-uid-1",
NodeName: "node-1",
NodeUID: "node-uid-1",
MaxExpirationSeconds: ptr.To[int32](86400),
PKIXPublicKey: pubPKIX1,
ProofOfPossession: proof1,
},
Status: capi.PodCertificateRequestStatus{
Conditions: []metav1.Condition{
{
Type: "Denied",
Status: metav1.ConditionFalse,
Reason: "Foo",
Message: "Foo message",
LastTransitionTime: metav1.NewTime(time.Now()),
},
},
},
},
wantErrors: field.ErrorList{
field.NotSupported(field.NewPath("status", "conditions", "[0]", "status"), metav1.ConditionFalse, []metav1.ConditionStatus{metav1.ConditionTrue}),
},
},
{
description: "Failed must have status True",
oldPCR: &capi.PodCertificateRequest{
ObjectMeta: metav1.ObjectMeta{
Namespace: "foo",
Name: "bar",
},
Spec: capi.PodCertificateRequestSpec{
SignerName: "foo.com/abc",
PodName: "pod-1",
PodUID: types.UID(podUID1),
ServiceAccountName: "sa-1",
ServiceAccountUID: "sa-uid-1",
NodeName: "node-1",
NodeUID: "node-uid-1",
MaxExpirationSeconds: ptr.To[int32](86400),
PKIXPublicKey: pubPKIX1,
ProofOfPossession: proof1,
},
},
newPCR: &capi.PodCertificateRequest{
ObjectMeta: metav1.ObjectMeta{
Namespace: "foo",
Name: "bar",
},
Spec: capi.PodCertificateRequestSpec{
SignerName: "foo.com/abc",
PodName: "pod-1",
PodUID: types.UID(podUID1),
ServiceAccountName: "sa-1",
ServiceAccountUID: "sa-uid-1",
NodeName: "node-1",
NodeUID: "node-uid-1",
MaxExpirationSeconds: ptr.To[int32](86400),
PKIXPublicKey: pubPKIX1,
ProofOfPossession: proof1,
},
Status: capi.PodCertificateRequestStatus{
Conditions: []metav1.Condition{
{
Type: "Failed",
Status: metav1.ConditionFalse,
Reason: "Foo",
Message: "Foo message",
LastTransitionTime: metav1.NewTime(time.Now()),
},
},
},
},
wantErrors: field.ErrorList{
field.NotSupported(field.NewPath("status", "conditions", "[0]", "status"), metav1.ConditionFalse, []metav1.ConditionStatus{metav1.ConditionTrue}),
},
},
{
description: "transitioning to Denied status is allowed",
oldPCR: &capi.PodCertificateRequest{
ObjectMeta: metav1.ObjectMeta{
Namespace: "foo",
Name: "bar",
},
Spec: capi.PodCertificateRequestSpec{
SignerName: "foo.com/abc",
PodName: "pod-1",
PodUID: types.UID(podUID1),
ServiceAccountName: "sa-1",
ServiceAccountUID: "sa-uid-1",
NodeName: "node-1",
NodeUID: "node-uid-1",
MaxExpirationSeconds: ptr.To[int32](86400),
PKIXPublicKey: pubPKIX1,
ProofOfPossession: proof1,
},
},
newPCR: &capi.PodCertificateRequest{
ObjectMeta: metav1.ObjectMeta{
Namespace: "foo",
Name: "bar",
},
Spec: capi.PodCertificateRequestSpec{
SignerName: "foo.com/abc",
PodName: "pod-1",
PodUID: types.UID(podUID1),
ServiceAccountName: "sa-1",
ServiceAccountUID: "sa-uid-1",
NodeName: "node-1",
NodeUID: "node-uid-1",
MaxExpirationSeconds: ptr.To[int32](86400),
PKIXPublicKey: pubPKIX1,
ProofOfPossession: proof1,
},
Status: capi.PodCertificateRequestStatus{
Conditions: []metav1.Condition{
{
Type: capi.PodCertificateRequestConditionTypeDenied,
Status: metav1.ConditionTrue,
Reason: capi.PodCertificateRequestConditionUnsupportedKeyType,
Message: "Foo message",
LastTransitionTime: metav1.NewTime(time.Now()),
},
},
},
},
},
{
description: "you can't issue a certificate if you set Denied status",
oldPCR: &capi.PodCertificateRequest{
ObjectMeta: metav1.ObjectMeta{
Namespace: "foo",
Name: "bar",
},
Spec: capi.PodCertificateRequestSpec{
SignerName: "foo.com/abc",
PodName: "pod-1",
PodUID: types.UID(podUID1),
ServiceAccountName: "sa-1",
ServiceAccountUID: "sa-uid-1",
NodeName: "node-1",
NodeUID: "node-uid-1",
MaxExpirationSeconds: ptr.To[int32](86400),
PKIXPublicKey: pubPKIX1,
ProofOfPossession: proof1,
},
},
newPCR: &capi.PodCertificateRequest{
ObjectMeta: metav1.ObjectMeta{
Namespace: "foo",
Name: "bar",
},
Spec: capi.PodCertificateRequestSpec{
SignerName: "foo.com/abc",
PodName: "pod-1",
PodUID: types.UID(podUID1),
ServiceAccountName: "sa-1",
ServiceAccountUID: "sa-uid-1",
NodeName: "node-1",
NodeUID: "node-uid-1",
MaxExpirationSeconds: ptr.To[int32](86400),
PKIXPublicKey: pubPKIX1,
ProofOfPossession: proof1,
},
Status: capi.PodCertificateRequestStatus{
Conditions: []metav1.Condition{
{
Type: capi.PodCertificateRequestConditionTypeDenied,
Status: metav1.ConditionTrue,
Reason: "Whatever",
Message: "Foo message",
LastTransitionTime: metav1.NewTime(time.Now()),
},
},
CertificateChain: pod1Cert1,
},
},
wantErrors: field.ErrorList{
field.Invalid(
field.NewPath("status"),
field.OmitValueType{},
"non-condition status fields must be empty when denying or failing the PodCertificateRequest",
),
},
},
{
description: "transitioning to Failed status is allowed",
oldPCR: &capi.PodCertificateRequest{
ObjectMeta: metav1.ObjectMeta{
Namespace: "foo",
Name: "bar",
},
Spec: capi.PodCertificateRequestSpec{
SignerName: "foo.com/abc",
PodName: "pod-1",
PodUID: types.UID(podUID1),
ServiceAccountName: "sa-1",
ServiceAccountUID: "sa-uid-1",
NodeName: "node-1",
NodeUID: "node-uid-1",
MaxExpirationSeconds: ptr.To[int32](86400),
PKIXPublicKey: pubPKIX1,
ProofOfPossession: proof1,
},
},
newPCR: &capi.PodCertificateRequest{
ObjectMeta: metav1.ObjectMeta{
Namespace: "foo",
Name: "bar",
},
Spec: capi.PodCertificateRequestSpec{
SignerName: "foo.com/abc",
PodName: "pod-1",
PodUID: types.UID(podUID1),
ServiceAccountName: "sa-1",
ServiceAccountUID: "sa-uid-1",
NodeName: "node-1",
NodeUID: "node-uid-1",
MaxExpirationSeconds: ptr.To[int32](86400),
PKIXPublicKey: pubPKIX1,
ProofOfPossession: proof1,
},
Status: capi.PodCertificateRequestStatus{
Conditions: []metav1.Condition{
{
Type: capi.PodCertificateRequestConditionTypeFailed,
Status: metav1.ConditionTrue,
Reason: "Whatever",
Message: "Foo message",
LastTransitionTime: metav1.NewTime(time.Now()),
},
},
},
},
},
{
description: "you can't issue a certificate if you set Failed status",
oldPCR: &capi.PodCertificateRequest{
ObjectMeta: metav1.ObjectMeta{
Namespace: "foo",
Name: "bar",
},
Spec: capi.PodCertificateRequestSpec{
SignerName: "foo.com/abc",
PodName: "pod-1",
PodUID: types.UID(podUID1),
ServiceAccountName: "sa-1",
ServiceAccountUID: "sa-uid-1",
NodeName: "node-1",
NodeUID: "node-uid-1",
MaxExpirationSeconds: ptr.To[int32](86400),
PKIXPublicKey: pubPKIX1,
ProofOfPossession: proof1,
},
},
newPCR: &capi.PodCertificateRequest{
ObjectMeta: metav1.ObjectMeta{
Namespace: "foo",
Name: "bar",
},
Spec: capi.PodCertificateRequestSpec{
SignerName: "foo.com/abc",
PodName: "pod-1",
PodUID: types.UID(podUID1),
ServiceAccountName: "sa-1",
ServiceAccountUID: "sa-uid-1",
NodeName: "node-1",
NodeUID: "node-uid-1",
MaxExpirationSeconds: ptr.To[int32](86400),
PKIXPublicKey: pubPKIX1,
ProofOfPossession: proof1,
},
Status: capi.PodCertificateRequestStatus{
Conditions: []metav1.Condition{
{
Type: capi.PodCertificateRequestConditionTypeFailed,
Status: metav1.ConditionTrue,
Reason: "Whatever",
Message: "Foo message",
LastTransitionTime: metav1.NewTime(time.Now()),
},
},
CertificateChain: pod1Cert1,
},
},
wantErrors: field.ErrorList{
field.Invalid(
field.NewPath("status"),
field.OmitValueType{},
"non-condition status fields must be empty when denying or failing the PodCertificateRequest",
),
},
},
{
description: "valid issuance",
oldPCR: &capi.PodCertificateRequest{
ObjectMeta: metav1.ObjectMeta{
Namespace: "foo",
Name: "bar",
},
Spec: capi.PodCertificateRequestSpec{
SignerName: "foo.com/abc",
PodName: "pod-1",
PodUID: types.UID(podUID1),
ServiceAccountName: "sa-1",
ServiceAccountUID: "sa-uid-1",
NodeName: "node-1",
NodeUID: "node-uid-1",
MaxExpirationSeconds: ptr.To[int32](86400),
PKIXPublicKey: pubPKIX1,
ProofOfPossession: proof1,
},
},
newPCR: &capi.PodCertificateRequest{
ObjectMeta: metav1.ObjectMeta{
Namespace: "foo",
Name: "bar",
},
Spec: capi.PodCertificateRequestSpec{
SignerName: "foo.com/abc",
PodName: "pod-1",
PodUID: types.UID(podUID1),
ServiceAccountName: "sa-1",
ServiceAccountUID: "sa-uid-1",
NodeName: "node-1",
NodeUID: "node-uid-1",
MaxExpirationSeconds: ptr.To[int32](86400),
PKIXPublicKey: pubPKIX1,
ProofOfPossession: proof1,
},
Status: capi.PodCertificateRequestStatus{
Conditions: []metav1.Condition{
{
Type: capi.PodCertificateRequestConditionTypeIssued,
Status: metav1.ConditionTrue,
Reason: "Whatever",
Message: "Foo message",
LastTransitionTime: metav1.NewTime(time.Now()),
},
},
CertificateChain: pod1Cert1,
NotBefore: ptr.To(metav1.NewTime(mustParseTime(t, "1970-01-01T00:00:00Z"))),
BeginRefreshAt: ptr.To(metav1.NewTime(mustParseTime(t, "1970-01-01T12:00:00Z"))),
NotAfter: ptr.To(metav1.NewTime(mustParseTime(t, "1970-01-02T00:00:00Z"))),
},
},
},
{
description: "valid issuance with intermediate CA",
oldPCR: &capi.PodCertificateRequest{
ObjectMeta: metav1.ObjectMeta{
Namespace: "foo",
Name: "bar",
},
Spec: capi.PodCertificateRequestSpec{
SignerName: "foo.com/abc",
PodName: "pod-1",
PodUID: types.UID(podUID1),
ServiceAccountName: "sa-1",
ServiceAccountUID: "sa-uid-1",
NodeName: "node-1",
NodeUID: "node-uid-1",
MaxExpirationSeconds: ptr.To[int32](86400),
PKIXPublicKey: pubPKIX1,
ProofOfPossession: proof1,
},
},
newPCR: &capi.PodCertificateRequest{
ObjectMeta: metav1.ObjectMeta{
Namespace: "foo",
Name: "bar",
},
Spec: capi.PodCertificateRequestSpec{
SignerName: "foo.com/abc",
PodName: "pod-1",
PodUID: types.UID(podUID1),
ServiceAccountName: "sa-1",
ServiceAccountUID: "sa-uid-1",
NodeName: "node-1",
NodeUID: "node-uid-1",
MaxExpirationSeconds: ptr.To[int32](86400),
PKIXPublicKey: pubPKIX1,
ProofOfPossession: proof1,
},
Status: capi.PodCertificateRequestStatus{
Conditions: []metav1.Condition{
{
Type: capi.PodCertificateRequestConditionTypeIssued,
Status: metav1.ConditionTrue,
Reason: "Whatever",
Message: "Foo message",
LastTransitionTime: metav1.NewTime(time.Now()),
},
},
CertificateChain: certFromIntermediate + "\n" + pemEncode("CERTIFICATE", intermediateCACertDER),
NotBefore: ptr.To(metav1.NewTime(mustParseTime(t, "1970-01-01T00:00:00Z"))),
BeginRefreshAt: ptr.To(metav1.NewTime(mustParseTime(t, "1970-01-01T12:00:00Z"))),
NotAfter: ptr.To(metav1.NewTime(mustParseTime(t, "1970-01-02T00:00:00Z"))),
},
},
},
{
description: "Once issued, the certificate cannot be changed to a different valid certificate",
oldPCR: &capi.PodCertificateRequest{
ObjectMeta: metav1.ObjectMeta{
Namespace: "foo",
Name: "bar",
},
Spec: capi.PodCertificateRequestSpec{
SignerName: "foo.com/abc",
PodName: "pod-1",
PodUID: types.UID(podUID1),
ServiceAccountName: "sa-1",
ServiceAccountUID: "sa-uid-1",
NodeName: "node-1",
NodeUID: "node-uid-1",
MaxExpirationSeconds: ptr.To[int32](86400),
PKIXPublicKey: pubPKIX1,
ProofOfPossession: proof1,
},
Status: capi.PodCertificateRequestStatus{
Conditions: []metav1.Condition{
{
Type: capi.PodCertificateRequestConditionTypeIssued,
Status: metav1.ConditionTrue,
Reason: "Whatever",
Message: "Foo message",
LastTransitionTime: metav1.NewTime(time.Now()),
},
},
CertificateChain: pod1Cert1,
NotBefore: ptr.To(metav1.NewTime(mustParseTime(t, "1970-01-01T00:00:00Z"))),
BeginRefreshAt: ptr.To(metav1.NewTime(mustParseTime(t, "1970-01-01T12:00:00Z"))),
NotAfter: ptr.To(metav1.NewTime(mustParseTime(t, "1970-01-02T00:00:00Z"))),
},
},
newPCR: &capi.PodCertificateRequest{
ObjectMeta: metav1.ObjectMeta{
Namespace: "foo",
Name: "bar",
},
Spec: capi.PodCertificateRequestSpec{
SignerName: "foo.com/abc",
PodName: "pod-1",
PodUID: types.UID(podUID1),
ServiceAccountName: "sa-1",
ServiceAccountUID: "sa-uid-1",
NodeName: "node-1",
NodeUID: "node-uid-1",
MaxExpirationSeconds: ptr.To[int32](86400),
PKIXPublicKey: pubPKIX1,
ProofOfPossession: proof1,
},
Status: capi.PodCertificateRequestStatus{
Conditions: []metav1.Condition{
{
Type: capi.PodCertificateRequestConditionTypeIssued,
Status: metav1.ConditionTrue,
Reason: "Whatever",
Message: "Foo message",
LastTransitionTime: metav1.NewTime(time.Now()),
},
},
CertificateChain: pod1Cert2,
NotBefore: ptr.To(metav1.NewTime(mustParseTime(t, "1970-01-01T00:00:00Z"))),
BeginRefreshAt: ptr.To(metav1.NewTime(mustParseTime(t, "1970-01-01T12:00:00Z"))),
NotAfter: ptr.To(metav1.NewTime(mustParseTime(t, "1970-01-01T18:00:00Z"))),
},
},
wantErrors: field.ErrorList{
field.Invalid(field.NewPath("status"), field.OmitValueType{}, "immutable after PodCertificateRequest is issued, denied, or failed"),
},
},
{
description: "a request cannot be both Denied and Failed",
oldPCR: &capi.PodCertificateRequest{
ObjectMeta: metav1.ObjectMeta{
Namespace: "foo",
Name: "bar",
},
Spec: capi.PodCertificateRequestSpec{
SignerName: "foo.com/abc",
PodName: "pod-1",
PodUID: types.UID(podUID1),
ServiceAccountName: "sa-1",
ServiceAccountUID: "sa-uid-1",
NodeName: "node-1",
NodeUID: "node-uid-1",
MaxExpirationSeconds: ptr.To[int32](86400),
PKIXPublicKey: pubPKIX1,
ProofOfPossession: proof1,
},
},
newPCR: &capi.PodCertificateRequest{
ObjectMeta: metav1.ObjectMeta{
Namespace: "foo",
Name: "bar",
},
Spec: capi.PodCertificateRequestSpec{
SignerName: "foo.com/abc",
PodName: "pod-1",
PodUID: types.UID(podUID1),
ServiceAccountName: "sa-1",
ServiceAccountUID: "sa-uid-1",
NodeName: "node-1",
NodeUID: "node-uid-1",
MaxExpirationSeconds: ptr.To[int32](86400),
PKIXPublicKey: pubPKIX1,
ProofOfPossession: proof1,
},
Status: capi.PodCertificateRequestStatus{
Conditions: []metav1.Condition{
{
Type: capi.PodCertificateRequestConditionTypeDenied,
Status: metav1.ConditionTrue,
Reason: "Whatever",
Message: "Foo message",
LastTransitionTime: metav1.NewTime(time.Now()),
},
{
Type: capi.PodCertificateRequestConditionTypeFailed,
Status: metav1.ConditionTrue,
Reason: "Whatever",
Message: "Foo message",
LastTransitionTime: metav1.NewTime(time.Now()),
},
},
},
},
wantErrors: field.ErrorList{
field.Invalid(field.NewPath("status", "conditions", "[1]", "type"), "Failed", `There may be at most one condition with type "Issued", "Denied", or "Failed"`),
},
},
{
description: "certificate cannot be issued and denied",
oldPCR: &capi.PodCertificateRequest{
ObjectMeta: metav1.ObjectMeta{
Namespace: "foo",
Name: "bar",
},
Spec: capi.PodCertificateRequestSpec{
SignerName: "foo.com/abc",
PodName: "pod-1",
PodUID: types.UID(podUID1),
ServiceAccountName: "sa-1",
ServiceAccountUID: "sa-uid-1",
NodeName: "node-1",
NodeUID: "node-uid-1",
MaxExpirationSeconds: ptr.To[int32](86400),
PKIXPublicKey: pubPKIX1,
ProofOfPossession: proof1,
},
},
newPCR: &capi.PodCertificateRequest{
ObjectMeta: metav1.ObjectMeta{
Namespace: "foo",
Name: "bar",
},
Spec: capi.PodCertificateRequestSpec{
SignerName: "foo.com/abc",
PodName: "pod-1",
PodUID: types.UID(podUID1),
ServiceAccountName: "sa-1",
ServiceAccountUID: "sa-uid-1",
NodeName: "node-1",
NodeUID: "node-uid-1",
MaxExpirationSeconds: ptr.To[int32](86400),
PKIXPublicKey: pubPKIX1,
ProofOfPossession: proof1,
},
Status: capi.PodCertificateRequestStatus{
Conditions: []metav1.Condition{
{
Type: capi.PodCertificateRequestConditionTypeIssued,
Status: metav1.ConditionTrue,
Reason: "Whatever",
Message: "Foo message",
LastTransitionTime: metav1.NewTime(time.Now()),
},
{
Type: capi.PodCertificateRequestConditionTypeDenied,
Status: metav1.ConditionTrue,
Reason: "Whatever",
Message: "Foo message",
LastTransitionTime: metav1.NewTime(time.Now()),
},
},
CertificateChain: pod1Cert1,
NotBefore: ptr.To(metav1.NewTime(mustParseTime(t, "1970-01-01T00:00:00Z"))),
BeginRefreshAt: ptr.To(metav1.NewTime(mustParseTime(t, "1970-01-01T12:00:00Z"))),
NotAfter: ptr.To(metav1.NewTime(mustParseTime(t, "1970-01-02T00:00:00Z"))),
},
},
wantErrors: field.ErrorList{
field.Invalid(field.NewPath("status", "conditions", "[1]", "type"), "Denied", `There may be at most one condition with type "Issued", "Denied", or "Failed"`),
},
},
{
description: "certificate cannot be issued and failed",
oldPCR: &capi.PodCertificateRequest{
ObjectMeta: metav1.ObjectMeta{
Namespace: "foo",
Name: "bar",
},
Spec: capi.PodCertificateRequestSpec{
SignerName: "foo.com/abc",
PodName: "pod-1",
PodUID: types.UID(podUID1),
ServiceAccountName: "sa-1",
ServiceAccountUID: "sa-uid-1",
NodeName: "node-1",
NodeUID: "node-uid-1",
MaxExpirationSeconds: ptr.To[int32](86400),
PKIXPublicKey: pubPKIX1,
ProofOfPossession: proof1,
},
},
newPCR: &capi.PodCertificateRequest{
ObjectMeta: metav1.ObjectMeta{
Namespace: "foo",
Name: "bar",
},
Spec: capi.PodCertificateRequestSpec{
SignerName: "foo.com/abc",
PodName: "pod-1",
PodUID: types.UID(podUID1),
ServiceAccountName: "sa-1",
ServiceAccountUID: "sa-uid-1",
NodeName: "node-1",
NodeUID: "node-uid-1",
MaxExpirationSeconds: ptr.To[int32](86400),
PKIXPublicKey: pubPKIX1,
ProofOfPossession: proof1,
},
Status: capi.PodCertificateRequestStatus{
Conditions: []metav1.Condition{
{
Type: capi.PodCertificateRequestConditionTypeIssued,
Status: metav1.ConditionTrue,
Reason: "Whatever",
Message: "Foo message",
LastTransitionTime: metav1.NewTime(time.Now()),
},
{
Type: capi.PodCertificateRequestConditionTypeFailed,
Status: metav1.ConditionTrue,
Reason: "Whatever",
Message: "Foo message",
LastTransitionTime: metav1.NewTime(time.Now()),
},
},
CertificateChain: pod1Cert1,
NotBefore: ptr.To(metav1.NewTime(mustParseTime(t, "1970-01-01T00:00:00Z"))),
BeginRefreshAt: ptr.To(metav1.NewTime(mustParseTime(t, "1970-01-01T12:00:00Z"))),
NotAfter: ptr.To(metav1.NewTime(mustParseTime(t, "1970-01-02T00:00:00Z"))),
},
},
wantErrors: field.ErrorList{
field.Invalid(field.NewPath("status", "conditions", "[1]", "type"), "Failed", `There may be at most one condition with type "Issued", "Denied", or "Failed"`),
},
},
{
description: "a request cannot change from Denied to Failed",
oldPCR: &capi.PodCertificateRequest{
ObjectMeta: metav1.ObjectMeta{
Namespace: "foo",
Name: "bar",
},
Spec: capi.PodCertificateRequestSpec{
SignerName: "foo.com/abc",
PodName: "pod-1",
PodUID: types.UID(podUID1),
ServiceAccountName: "sa-1",
ServiceAccountUID: "sa-uid-1",
NodeName: "node-1",
NodeUID: "node-uid-1",
MaxExpirationSeconds: ptr.To[int32](86400),
PKIXPublicKey: pubPKIX1,
ProofOfPossession: proof1,
},
Status: capi.PodCertificateRequestStatus{
Conditions: []metav1.Condition{
{
Type: capi.PodCertificateRequestConditionTypeDenied,
Status: metav1.ConditionTrue,
Reason: "Whatever",
Message: "Foo message",
LastTransitionTime: metav1.NewTime(time.Now()),
},
},
},
},
newPCR: &capi.PodCertificateRequest{
ObjectMeta: metav1.ObjectMeta{
Namespace: "foo",
Name: "bar",
},
Spec: capi.PodCertificateRequestSpec{
SignerName: "foo.com/abc",
PodName: "pod-1",
PodUID: types.UID(podUID1),
ServiceAccountName: "sa-1",
ServiceAccountUID: "sa-uid-1",
NodeName: "node-1",
NodeUID: "node-uid-1",
MaxExpirationSeconds: ptr.To[int32](86400),
PKIXPublicKey: pubPKIX1,
ProofOfPossession: proof1,
},
Status: capi.PodCertificateRequestStatus{
Conditions: []metav1.Condition{
{
Type: capi.PodCertificateRequestConditionTypeFailed,
Status: metav1.ConditionTrue,
Reason: "Whatever",
Message: "Foo message",
LastTransitionTime: metav1.NewTime(time.Now()),
},
},
},
},
wantErrors: field.ErrorList{
field.Invalid(field.NewPath("status"), field.OmitValueType{}, `immutable after PodCertificateRequest is issued, denied, or failed`),
},
},
{
description: "a request cannot change from Failed to Denied",
oldPCR: &capi.PodCertificateRequest{
ObjectMeta: metav1.ObjectMeta{
Namespace: "foo",
Name: "bar",
},
Spec: capi.PodCertificateRequestSpec{
SignerName: "foo.com/abc",
PodName: "pod-1",
PodUID: types.UID(podUID1),
ServiceAccountName: "sa-1",
ServiceAccountUID: "sa-uid-1",
NodeName: "node-1",
NodeUID: "node-uid-1",
MaxExpirationSeconds: ptr.To[int32](86400),
PKIXPublicKey: pubPKIX1,
ProofOfPossession: proof1,
},
Status: capi.PodCertificateRequestStatus{
Conditions: []metav1.Condition{
{
Type: capi.PodCertificateRequestConditionTypeFailed,
Status: metav1.ConditionTrue,
Reason: "Whatever",
Message: "Foo message",
LastTransitionTime: metav1.NewTime(time.Now()),
},
},
},
},
newPCR: &capi.PodCertificateRequest{
ObjectMeta: metav1.ObjectMeta{
Namespace: "foo",
Name: "bar",
},
Spec: capi.PodCertificateRequestSpec{
SignerName: "foo.com/abc",
PodName: "pod-1",
PodUID: types.UID(podUID1),
ServiceAccountName: "sa-1",
ServiceAccountUID: "sa-uid-1",
NodeName: "node-1",
NodeUID: "node-uid-1",
MaxExpirationSeconds: ptr.To[int32](86400),
PKIXPublicKey: pubPKIX1,
ProofOfPossession: proof1,
},
Status: capi.PodCertificateRequestStatus{
Conditions: []metav1.Condition{
{
Type: capi.PodCertificateRequestConditionTypeDenied,
Status: metav1.ConditionTrue,
Reason: "Whatever",
Message: "Foo message",
LastTransitionTime: metav1.NewTime(time.Now()),
},
},
},
},
wantErrors: field.ErrorList{
field.Invalid(field.NewPath("status"), field.OmitValueType{}, `immutable after PodCertificateRequest is issued, denied, or failed`),
},
},
{
description: "a request cannot change from Denied to pending",
oldPCR: &capi.PodCertificateRequest{
ObjectMeta: metav1.ObjectMeta{
Namespace: "foo",
Name: "bar",
},
Spec: capi.PodCertificateRequestSpec{
SignerName: "foo.com/abc",
PodName: "pod-1",
PodUID: types.UID(podUID1),
ServiceAccountName: "sa-1",
ServiceAccountUID: "sa-uid-1",
NodeName: "node-1",
NodeUID: "node-uid-1",
MaxExpirationSeconds: ptr.To[int32](86400),
PKIXPublicKey: pubPKIX1,
ProofOfPossession: proof1,
},
Status: capi.PodCertificateRequestStatus{
Conditions: []metav1.Condition{
{
Type: capi.PodCertificateRequestConditionTypeDenied,
Status: metav1.ConditionTrue,
Reason: "Whatever",
Message: "Foo message",
LastTransitionTime: metav1.NewTime(time.Now()),
},
},
},
},
newPCR: &capi.PodCertificateRequest{
ObjectMeta: metav1.ObjectMeta{
Namespace: "foo",
Name: "bar",
},
Spec: capi.PodCertificateRequestSpec{
SignerName: "foo.com/abc",
PodName: "pod-1",
PodUID: types.UID(podUID1),
ServiceAccountName: "sa-1",
ServiceAccountUID: "sa-uid-1",
NodeName: "node-1",
NodeUID: "node-uid-1",
MaxExpirationSeconds: ptr.To[int32](86400),
PKIXPublicKey: pubPKIX1,
ProofOfPossession: proof1,
},
Status: capi.PodCertificateRequestStatus{
Conditions: []metav1.Condition{},
},
},
wantErrors: field.ErrorList{
field.Invalid(field.NewPath("status"), field.OmitValueType{}, `immutable after PodCertificateRequest is issued, denied, or failed`),
},
},
{
description: "a request cannot change from Failed to pending",
oldPCR: &capi.PodCertificateRequest{
ObjectMeta: metav1.ObjectMeta{
Namespace: "foo",
Name: "bar",
},
Spec: capi.PodCertificateRequestSpec{
SignerName: "foo.com/abc",
PodName: "pod-1",
PodUID: types.UID(podUID1),
ServiceAccountName: "sa-1",
ServiceAccountUID: "sa-uid-1",
NodeName: "node-1",
NodeUID: "node-uid-1",
MaxExpirationSeconds: ptr.To[int32](86400),
PKIXPublicKey: pubPKIX1,
ProofOfPossession: proof1,
},
Status: capi.PodCertificateRequestStatus{
Conditions: []metav1.Condition{
{
Type: capi.PodCertificateRequestConditionTypeFailed,
Status: metav1.ConditionTrue,
Reason: "Whatever",
Message: "Foo message",
LastTransitionTime: metav1.NewTime(time.Now()),
},
},
},
},
newPCR: &capi.PodCertificateRequest{
ObjectMeta: metav1.ObjectMeta{
Namespace: "foo",
Name: "bar",
},
Spec: capi.PodCertificateRequestSpec{
SignerName: "foo.com/abc",
PodName: "pod-1",
PodUID: types.UID(podUID1),
ServiceAccountName: "sa-1",
ServiceAccountUID: "sa-uid-1",
NodeName: "node-1",
NodeUID: "node-uid-1",
MaxExpirationSeconds: ptr.To[int32](86400),
PKIXPublicKey: pubPKIX1,
ProofOfPossession: proof1,
},
Status: capi.PodCertificateRequestStatus{
Conditions: []metav1.Condition{},
},
},
wantErrors: field.ErrorList{
field.Invalid(field.NewPath("status"), field.OmitValueType{}, `immutable after PodCertificateRequest is issued, denied, or failed`),
},
},
{
description: "a request cannot change from issued to pending",
oldPCR: &capi.PodCertificateRequest{
ObjectMeta: metav1.ObjectMeta{
Namespace: "foo",
Name: "bar",
},
Spec: capi.PodCertificateRequestSpec{
SignerName: "foo.com/abc",
PodName: "pod-1",
PodUID: types.UID(podUID1),
ServiceAccountName: "sa-1",
ServiceAccountUID: "sa-uid-1",
NodeName: "node-1",
NodeUID: "node-uid-1",
MaxExpirationSeconds: ptr.To[int32](86400),
PKIXPublicKey: pubPKIX1,
ProofOfPossession: proof1,
},
Status: capi.PodCertificateRequestStatus{
Conditions: []metav1.Condition{
{
Type: capi.PodCertificateRequestConditionTypeIssued,
Status: metav1.ConditionTrue,
Reason: "Whatever",
Message: "Foo message",
LastTransitionTime: metav1.NewTime(time.Now()),
},
},
CertificateChain: pod1Cert1,
NotBefore: ptr.To(metav1.NewTime(mustParseTime(t, "1970-01-01T00:00:00Z"))),
BeginRefreshAt: ptr.To(metav1.NewTime(mustParseTime(t, "1970-01-01T12:00:00Z"))),
NotAfter: ptr.To(metav1.NewTime(mustParseTime(t, "1970-01-02T00:00:00Z"))),
},
},
newPCR: &capi.PodCertificateRequest{
ObjectMeta: metav1.ObjectMeta{
Namespace: "foo",
Name: "bar",
},
Spec: capi.PodCertificateRequestSpec{
SignerName: "foo.com/abc",
PodName: "pod-1",
PodUID: types.UID(podUID1),
ServiceAccountName: "sa-1",
ServiceAccountUID: "sa-uid-1",
NodeName: "node-1",
NodeUID: "node-uid-1",
MaxExpirationSeconds: ptr.To[int32](86400),
PKIXPublicKey: pubPKIX1,
ProofOfPossession: proof1,
},
},
wantErrors: field.ErrorList{
field.Invalid(field.NewPath("status"), field.OmitValueType{}, `immutable after PodCertificateRequest is issued, denied, or failed`),
},
},
{
description: "a request cannot change from issued to Failed",
oldPCR: &capi.PodCertificateRequest{
ObjectMeta: metav1.ObjectMeta{
Namespace: "foo",
Name: "bar",
},
Spec: capi.PodCertificateRequestSpec{
SignerName: "foo.com/abc",
PodName: "pod-1",
PodUID: types.UID(podUID1),
ServiceAccountName: "sa-1",
ServiceAccountUID: "sa-uid-1",
NodeName: "node-1",
NodeUID: "node-uid-1",
MaxExpirationSeconds: ptr.To[int32](86400),
PKIXPublicKey: pubPKIX1,
ProofOfPossession: proof1,
},
Status: capi.PodCertificateRequestStatus{
Conditions: []metav1.Condition{
{
Type: capi.PodCertificateRequestConditionTypeIssued,
Status: metav1.ConditionTrue,
Reason: "Whatever",
Message: "Foo message",
LastTransitionTime: metav1.NewTime(time.Now()),
},
},
CertificateChain: pod1Cert1,
NotBefore: ptr.To(metav1.NewTime(mustParseTime(t, "1970-01-01T00:00:00Z"))),
BeginRefreshAt: ptr.To(metav1.NewTime(mustParseTime(t, "1970-01-01T12:00:00Z"))),
NotAfter: ptr.To(metav1.NewTime(mustParseTime(t, "1970-01-02T00:00:00Z"))),
},
},
newPCR: &capi.PodCertificateRequest{
ObjectMeta: metav1.ObjectMeta{
Namespace: "foo",
Name: "bar",
},
Spec: capi.PodCertificateRequestSpec{
SignerName: "foo.com/abc",
PodName: "pod-1",
PodUID: types.UID(podUID1),
ServiceAccountName: "sa-1",
ServiceAccountUID: "sa-uid-1",
NodeName: "node-1",
NodeUID: "node-uid-1",
MaxExpirationSeconds: ptr.To[int32](86400),
PKIXPublicKey: pubPKIX1,
ProofOfPossession: proof1,
},
Status: capi.PodCertificateRequestStatus{
Conditions: []metav1.Condition{
{
Type: capi.PodCertificateRequestConditionTypeFailed,
Status: metav1.ConditionTrue,
Reason: "Whatever",
Message: "Foo message",
LastTransitionTime: metav1.NewTime(time.Now()),
},
},
},
},
wantErrors: field.ErrorList{
field.Invalid(field.NewPath("status"), field.OmitValueType{}, `immutable after PodCertificateRequest is issued, denied, or failed`),
},
},
{
description: "a request cannot change from issued to Denied",
oldPCR: &capi.PodCertificateRequest{
ObjectMeta: metav1.ObjectMeta{
Namespace: "foo",
Name: "bar",
},
Spec: capi.PodCertificateRequestSpec{
SignerName: "foo.com/abc",
PodName: "pod-1",
PodUID: types.UID(podUID1),
ServiceAccountName: "sa-1",
ServiceAccountUID: "sa-uid-1",
NodeName: "node-1",
NodeUID: "node-uid-1",
MaxExpirationSeconds: ptr.To[int32](86400),
PKIXPublicKey: pubPKIX1,
ProofOfPossession: proof1,
},
Status: capi.PodCertificateRequestStatus{
Conditions: []metav1.Condition{
{
Type: capi.PodCertificateRequestConditionTypeIssued,
Status: metav1.ConditionTrue,
Reason: "Whatever",
Message: "Foo message",
LastTransitionTime: metav1.NewTime(time.Now()),
},
},
CertificateChain: pod1Cert1,
NotBefore: ptr.To(metav1.NewTime(mustParseTime(t, "1970-01-01T00:00:00Z"))),
BeginRefreshAt: ptr.To(metav1.NewTime(mustParseTime(t, "1970-01-01T12:00:00Z"))),
NotAfter: ptr.To(metav1.NewTime(mustParseTime(t, "1970-01-02T00:00:00Z"))),
},
},
newPCR: &capi.PodCertificateRequest{
ObjectMeta: metav1.ObjectMeta{
Namespace: "foo",
Name: "bar",
},
Spec: capi.PodCertificateRequestSpec{
SignerName: "foo.com/abc",
PodName: "pod-1",
PodUID: types.UID(podUID1),
ServiceAccountName: "sa-1",
ServiceAccountUID: "sa-uid-1",
NodeName: "node-1",
NodeUID: "node-uid-1",
MaxExpirationSeconds: ptr.To[int32](86400),
PKIXPublicKey: pubPKIX1,
ProofOfPossession: proof1,
},
Status: capi.PodCertificateRequestStatus{
Conditions: []metav1.Condition{
{
Type: capi.PodCertificateRequestConditionTypeDenied,
Status: metav1.ConditionTrue,
Reason: "Whatever",
Message: "Foo message",
LastTransitionTime: metav1.NewTime(time.Now()),
},
},
},
},
wantErrors: field.ErrorList{
field.Invalid(field.NewPath("status"), field.OmitValueType{}, `immutable after PodCertificateRequest is issued, denied, or failed`),
},
},
{
description: "notbefore must be consistent with leaf certificate",
oldPCR: &capi.PodCertificateRequest{
ObjectMeta: metav1.ObjectMeta{
Namespace: "foo",
Name: "bar",
},
Spec: capi.PodCertificateRequestSpec{
SignerName: "foo.com/abc",
PodName: "pod-1",
PodUID: types.UID(podUID1),
ServiceAccountName: "sa-1",
ServiceAccountUID: "sa-uid-1",
NodeName: "node-1",
NodeUID: "node-uid-1",
MaxExpirationSeconds: ptr.To[int32](86400),
PKIXPublicKey: pubPKIX1,
ProofOfPossession: proof1,
},
},
newPCR: &capi.PodCertificateRequest{
ObjectMeta: metav1.ObjectMeta{
Namespace: "foo",
Name: "bar",
},
Spec: capi.PodCertificateRequestSpec{
SignerName: "foo.com/abc",
PodName: "pod-1",
PodUID: types.UID(podUID1),
ServiceAccountName: "sa-1",
ServiceAccountUID: "sa-uid-1",
NodeName: "node-1",
NodeUID: "node-uid-1",
MaxExpirationSeconds: ptr.To[int32](86400),
PKIXPublicKey: pubPKIX1,
ProofOfPossession: proof1,
},
Status: capi.PodCertificateRequestStatus{
Conditions: []metav1.Condition{
{
Type: capi.PodCertificateRequestConditionTypeIssued,
Status: metav1.ConditionTrue,
Reason: "Whatever",
Message: "Foo message",
LastTransitionTime: metav1.NewTime(time.Now()),
},
},
CertificateChain: pod1Cert1,
NotBefore: ptr.To(metav1.NewTime(mustParseTime(t, "1971-01-01T00:00:00Z"))),
BeginRefreshAt: ptr.To(metav1.NewTime(mustParseTime(t, "1970-01-01T12:00:00Z"))),
NotAfter: ptr.To(metav1.NewTime(mustParseTime(t, "1970-01-02T00:00:00Z"))),
},
},
wantErrors: field.ErrorList{
field.Invalid(field.NewPath("status", "notBefore"), mustParseTime(t, "1971-01-01T00:00:00Z"), "must be set to the NotBefore time encoded in the leaf certificate"),
},
},
{
description: "notAfter must be consistent with leaf certificate",
oldPCR: &capi.PodCertificateRequest{
ObjectMeta: metav1.ObjectMeta{
Namespace: "foo",
Name: "bar",
},
Spec: capi.PodCertificateRequestSpec{
SignerName: "foo.com/abc",
PodName: "pod-1",
PodUID: types.UID(podUID1),
ServiceAccountName: "sa-1",
ServiceAccountUID: "sa-uid-1",
NodeName: "node-1",
NodeUID: "node-uid-1",
MaxExpirationSeconds: ptr.To[int32](86400),
PKIXPublicKey: pubPKIX1,
ProofOfPossession: proof1,
},
},
newPCR: &capi.PodCertificateRequest{
ObjectMeta: metav1.ObjectMeta{
Namespace: "foo",
Name: "bar",
},
Spec: capi.PodCertificateRequestSpec{
SignerName: "foo.com/abc",
PodName: "pod-1",
PodUID: types.UID(podUID1),
ServiceAccountName: "sa-1",
ServiceAccountUID: "sa-uid-1",
NodeName: "node-1",
NodeUID: "node-uid-1",
MaxExpirationSeconds: ptr.To[int32](86400),
PKIXPublicKey: pubPKIX1,
ProofOfPossession: proof1,
},
Status: capi.PodCertificateRequestStatus{
Conditions: []metav1.Condition{
{
Type: capi.PodCertificateRequestConditionTypeIssued,
Status: metav1.ConditionTrue,
Reason: "Whatever",
Message: "Foo message",
LastTransitionTime: metav1.NewTime(time.Now()),
},
},
CertificateChain: pod1Cert1,
NotBefore: ptr.To(metav1.NewTime(mustParseTime(t, "1970-01-01T00:00:00Z"))),
BeginRefreshAt: ptr.To(metav1.NewTime(mustParseTime(t, "1970-01-01T12:00:00Z"))),
NotAfter: ptr.To(metav1.NewTime(mustParseTime(t, "1971-01-02T00:00:00Z"))),
},
},
wantErrors: field.ErrorList{
field.Invalid(field.NewPath("status", "notAfter"), mustParseTime(t, "1971-01-02T00:00:00Z"), "must be set to the NotAfter time encoded in the leaf certificate"),
},
},
{
description: "beginRefreshAt must be >= notBefore + 10 min",
oldPCR: &capi.PodCertificateRequest{
ObjectMeta: metav1.ObjectMeta{
Namespace: "foo",
Name: "bar",
},
Spec: capi.PodCertificateRequestSpec{
SignerName: "foo.com/abc",
PodName: "pod-1",
PodUID: types.UID(podUID1),
ServiceAccountName: "sa-1",
ServiceAccountUID: "sa-uid-1",
NodeName: "node-1",
NodeUID: "node-uid-1",
MaxExpirationSeconds: ptr.To[int32](86400),
PKIXPublicKey: pubPKIX1,
ProofOfPossession: proof1,
},
},
newPCR: &capi.PodCertificateRequest{
ObjectMeta: metav1.ObjectMeta{
Namespace: "foo",
Name: "bar",
},
Spec: capi.PodCertificateRequestSpec{
SignerName: "foo.com/abc",
PodName: "pod-1",
PodUID: types.UID(podUID1),
ServiceAccountName: "sa-1",
ServiceAccountUID: "sa-uid-1",
NodeName: "node-1",
NodeUID: "node-uid-1",
MaxExpirationSeconds: ptr.To[int32](86400),
PKIXPublicKey: pubPKIX1,
ProofOfPossession: proof1,
},
Status: capi.PodCertificateRequestStatus{
Conditions: []metav1.Condition{
{
Type: capi.PodCertificateRequestConditionTypeIssued,
Status: metav1.ConditionTrue,
Reason: "Whatever",
Message: "Foo message",
LastTransitionTime: metav1.NewTime(time.Now()),
},
},
CertificateChain: pod1Cert1,
NotBefore: ptr.To(metav1.NewTime(mustParseTime(t, "1970-01-01T00:00:00Z"))),
BeginRefreshAt: ptr.To(metav1.NewTime(mustParseTime(t, "1970-01-01T00:05:00Z"))),
NotAfter: ptr.To(metav1.NewTime(mustParseTime(t, "1970-01-02T00:00:00Z"))),
},
},
wantErrors: field.ErrorList{
field.Invalid(field.NewPath("status", "beginRefreshAt"), mustParseTime(t, "1970-01-01T00:05:00Z"), "must be at least 10 minutes after status.notBefore"),
},
},
{
description: "beginRefreshAt must be <= notAfter - 10 min",
oldPCR: &capi.PodCertificateRequest{
ObjectMeta: metav1.ObjectMeta{
Namespace: "foo",
Name: "bar",
},
Spec: capi.PodCertificateRequestSpec{
SignerName: "foo.com/abc",
PodName: "pod-1",
PodUID: types.UID(podUID1),
ServiceAccountName: "sa-1",
ServiceAccountUID: "sa-uid-1",
NodeName: "node-1",
NodeUID: "node-uid-1",
MaxExpirationSeconds: ptr.To[int32](86400),
PKIXPublicKey: pubPKIX1,
ProofOfPossession: proof1,
},
},
newPCR: &capi.PodCertificateRequest{
ObjectMeta: metav1.ObjectMeta{
Namespace: "foo",
Name: "bar",
},
Spec: capi.PodCertificateRequestSpec{
SignerName: "foo.com/abc",
PodName: "pod-1",
PodUID: types.UID(podUID1),
ServiceAccountName: "sa-1",
ServiceAccountUID: "sa-uid-1",
NodeName: "node-1",
NodeUID: "node-uid-1",
MaxExpirationSeconds: ptr.To[int32](86400),
PKIXPublicKey: pubPKIX1,
ProofOfPossession: proof1,
},
Status: capi.PodCertificateRequestStatus{
Conditions: []metav1.Condition{
{
Type: capi.PodCertificateRequestConditionTypeIssued,
Status: metav1.ConditionTrue,
Reason: "Whatever",
Message: "Foo message",
LastTransitionTime: metav1.NewTime(time.Now()),
},
},
CertificateChain: pod1Cert1,
NotBefore: ptr.To(metav1.NewTime(mustParseTime(t, "1970-01-01T00:00:00Z"))),
BeginRefreshAt: ptr.To(metav1.NewTime(mustParseTime(t, "1970-01-01T23:55:00Z"))),
NotAfter: ptr.To(metav1.NewTime(mustParseTime(t, "1970-01-02T00:00:00Z"))),
},
},
wantErrors: field.ErrorList{
field.Invalid(field.NewPath("status", "beginRefreshAt"), mustParseTime(t, "1970-01-01T23:55:00Z"), "must be at least 10 minutes before status.notAfter"),
},
},
{
description: "timestamps must be set",
oldPCR: &capi.PodCertificateRequest{
ObjectMeta: metav1.ObjectMeta{
Namespace: "foo",
Name: "bar",
},
Spec: capi.PodCertificateRequestSpec{
SignerName: "foo.com/abc",
PodName: "pod-1",
PodUID: types.UID(podUID1),
ServiceAccountName: "sa-1",
ServiceAccountUID: "sa-uid-1",
NodeName: "node-1",
NodeUID: "node-uid-1",
MaxExpirationSeconds: ptr.To[int32](86400),
PKIXPublicKey: pubPKIX1,
ProofOfPossession: proof1,
},
},
newPCR: &capi.PodCertificateRequest{
ObjectMeta: metav1.ObjectMeta{
Namespace: "foo",
Name: "bar",
},
Spec: capi.PodCertificateRequestSpec{
SignerName: "foo.com/abc",
PodName: "pod-1",
PodUID: types.UID(podUID1),
ServiceAccountName: "sa-1",
ServiceAccountUID: "sa-uid-1",
NodeName: "node-1",
NodeUID: "node-uid-1",
MaxExpirationSeconds: ptr.To[int32](86400),
PKIXPublicKey: pubPKIX1,
ProofOfPossession: proof1,
},
Status: capi.PodCertificateRequestStatus{
Conditions: []metav1.Condition{
{
Type: capi.PodCertificateRequestConditionTypeIssued,
Status: metav1.ConditionTrue,
Reason: "Whatever",
Message: "Foo message",
LastTransitionTime: metav1.NewTime(time.Now()),
},
},
CertificateChain: pod1Cert1,
},
},
wantErrors: field.ErrorList{
field.Required(field.NewPath("status", "notBefore"), "must be present and consistent with the issued certificate"),
field.Required(field.NewPath("status", "notAfter"), "must be present and consistent with the issued certificate"),
field.Required(field.NewPath("status", "beginRefreshAt"), "must be present and in the range [notbefore+10min, notafter-10min]"),
},
},
{
description: "certs shorter than one hour are rejected",
oldPCR: &capi.PodCertificateRequest{
ObjectMeta: metav1.ObjectMeta{
Namespace: "foo",
Name: "bar",
},
Spec: capi.PodCertificateRequestSpec{
SignerName: "foo.com/abc",
PodName: "pod-1",
PodUID: types.UID(podUID1),
ServiceAccountName: "sa-1",
ServiceAccountUID: "sa-uid-1",
NodeName: "node-1",
NodeUID: "node-uid-1",
MaxExpirationSeconds: ptr.To[int32](86400),
PKIXPublicKey: pubPKIX1,
ProofOfPossession: proof1,
},
},
newPCR: &capi.PodCertificateRequest{
ObjectMeta: metav1.ObjectMeta{
Namespace: "foo",
Name: "bar",
},
Spec: capi.PodCertificateRequestSpec{
SignerName: "foo.com/abc",
PodName: "pod-1",
PodUID: types.UID(podUID1),
ServiceAccountName: "sa-1",
ServiceAccountUID: "sa-uid-1",
NodeName: "node-1",
NodeUID: "node-uid-1",
MaxExpirationSeconds: ptr.To[int32](86400),
PKIXPublicKey: pubPKIX1,
ProofOfPossession: proof1,
},
Status: capi.PodCertificateRequestStatus{
Conditions: []metav1.Condition{
{
Type: capi.PodCertificateRequestConditionTypeIssued,
Status: metav1.ConditionTrue,
Reason: "Whatever",
Message: "Foo message",
LastTransitionTime: metav1.NewTime(time.Now()),
},
},
CertificateChain: badCertTooShort,
NotBefore: ptr.To(metav1.NewTime(mustParseTime(t, "1970-01-01T00:00:00Z"))),
BeginRefreshAt: ptr.To(metav1.NewTime(mustParseTime(t, "1970-01-01T00:25:00Z"))),
NotAfter: ptr.To(metav1.NewTime(mustParseTime(t, "1970-01-01T00:50:00Z"))),
},
},
wantErrors: field.ErrorList{
field.Invalid(field.NewPath("status", "certificateChain"), 50*time.Minute, "leaf certificate lifetime must be >= 1 hour"),
},
},
{
description: "certs longer than maxExpirationSeconds are rejected",
oldPCR: &capi.PodCertificateRequest{
ObjectMeta: metav1.ObjectMeta{
Namespace: "foo",
Name: "bar",
},
Spec: capi.PodCertificateRequestSpec{
SignerName: "foo.com/abc",
PodName: "pod-1",
PodUID: types.UID(podUID1),
ServiceAccountName: "sa-1",
ServiceAccountUID: "sa-uid-1",
NodeName: "node-1",
NodeUID: "node-uid-1",
MaxExpirationSeconds: ptr.To[int32](86400),
PKIXPublicKey: pubPKIX1,
ProofOfPossession: proof1,
},
},
newPCR: &capi.PodCertificateRequest{
ObjectMeta: metav1.ObjectMeta{
Namespace: "foo",
Name: "bar",
},
Spec: capi.PodCertificateRequestSpec{
SignerName: "foo.com/abc",
PodName: "pod-1",
PodUID: types.UID(podUID1),
ServiceAccountName: "sa-1",
ServiceAccountUID: "sa-uid-1",
NodeName: "node-1",
NodeUID: "node-uid-1",
MaxExpirationSeconds: ptr.To[int32](86400),
PKIXPublicKey: pubPKIX1,
ProofOfPossession: proof1,
},
Status: capi.PodCertificateRequestStatus{
Conditions: []metav1.Condition{
{
Type: capi.PodCertificateRequestConditionTypeIssued,
Status: metav1.ConditionTrue,
Reason: "Whatever",
Message: "Foo message",
LastTransitionTime: metav1.NewTime(time.Now()),
},
},
CertificateChain: badCertTooLong,
NotBefore: ptr.To(metav1.NewTime(mustParseTime(t, "1970-01-01T00:00:00Z"))),
BeginRefreshAt: ptr.To(metav1.NewTime(mustParseTime(t, "1970-01-01T12:00:00Z"))),
NotAfter: ptr.To(metav1.NewTime(mustParseTime(t, "1970-01-02T01:00:00Z"))),
},
},
wantErrors: field.ErrorList{
field.Invalid(field.NewPath("status", "certificateChain"), 25*time.Hour, "leaf certificate lifetime must be <= spec.maxExpirationSeconds (86400)"),
},
},
{
description: "leaf cert can not contain empty DNSName",
oldPCR: &capi.PodCertificateRequest{
ObjectMeta: metav1.ObjectMeta{
Namespace: "foo",
Name: "bar",
},
Spec: capi.PodCertificateRequestSpec{
SignerName: "foo.com/abc",
PodName: "pod-1",
PodUID: types.UID(podUID1),
ServiceAccountName: "sa-1",
ServiceAccountUID: "sa-uid-1",
NodeName: "node-1",
NodeUID: "node-uid-1",
MaxExpirationSeconds: ptr.To[int32](86400),
PKIXPublicKey: pubPKIX1,
ProofOfPossession: proof1,
},
},
newPCR: &capi.PodCertificateRequest{
ObjectMeta: metav1.ObjectMeta{
Namespace: "foo",
Name: "bar",
},
Spec: capi.PodCertificateRequestSpec{
SignerName: "foo.com/abc",
PodName: "pod-1",
PodUID: types.UID(podUID1),
ServiceAccountName: "sa-1",
ServiceAccountUID: "sa-uid-1",
NodeName: "node-1",
NodeUID: "node-uid-1",
MaxExpirationSeconds: ptr.To[int32](86400),
PKIXPublicKey: pubPKIX1,
ProofOfPossession: proof1,
},
Status: capi.PodCertificateRequestStatus{
Conditions: []metav1.Condition{
{
Type: capi.PodCertificateRequestConditionTypeIssued,
Status: metav1.ConditionTrue,
Reason: "Whatever",
Message: "Foo message",
LastTransitionTime: metav1.NewTime(time.Now()),
},
},
CertificateChain: certWithBadDNSName1,
NotBefore: ptr.To(metav1.NewTime(mustParseTime(t, "1970-01-01T00:00:00Z"))),
BeginRefreshAt: ptr.To(metav1.NewTime(mustParseTime(t, "1970-01-01T12:00:00Z"))),
NotAfter: ptr.To(metav1.NewTime(mustParseTime(t, "1970-01-02T00:00:00Z"))),
},
},
wantErrors: field.ErrorList{
field.Invalid(field.NewPath("status", "certificateChain"), "", "leaf certificate should not contain empty DNSName"),
},
},
{
description: "leaf cert can not contain DNSName contains '..'",
oldPCR: &capi.PodCertificateRequest{
ObjectMeta: metav1.ObjectMeta{
Namespace: "foo",
Name: "bar",
},
Spec: capi.PodCertificateRequestSpec{
SignerName: "foo.com/abc",
PodName: "pod-1",
PodUID: types.UID(podUID1),
ServiceAccountName: "sa-1",
ServiceAccountUID: "sa-uid-1",
NodeName: "node-1",
NodeUID: "node-uid-1",
MaxExpirationSeconds: ptr.To[int32](86400),
PKIXPublicKey: pubPKIX1,
ProofOfPossession: proof1,
},
},
newPCR: &capi.PodCertificateRequest{
ObjectMeta: metav1.ObjectMeta{
Namespace: "foo",
Name: "bar",
},
Spec: capi.PodCertificateRequestSpec{
SignerName: "foo.com/abc",
PodName: "pod-1",
PodUID: types.UID(podUID1),
ServiceAccountName: "sa-1",
ServiceAccountUID: "sa-uid-1",
NodeName: "node-1",
NodeUID: "node-uid-1",
MaxExpirationSeconds: ptr.To[int32](86400),
PKIXPublicKey: pubPKIX1,
ProofOfPossession: proof1,
},
Status: capi.PodCertificateRequestStatus{
Conditions: []metav1.Condition{
{
Type: capi.PodCertificateRequestConditionTypeIssued,
Status: metav1.ConditionTrue,
Reason: "Whatever",
Message: "Foo message",
LastTransitionTime: metav1.NewTime(time.Now()),
},
},
CertificateChain: certWithBadDNSName2,
NotBefore: ptr.To(metav1.NewTime(mustParseTime(t, "1970-01-01T00:00:00Z"))),
BeginRefreshAt: ptr.To(metav1.NewTime(mustParseTime(t, "1970-01-01T12:00:00Z"))),
NotAfter: ptr.To(metav1.NewTime(mustParseTime(t, "1970-01-02T00:00:00Z"))),
},
},
wantErrors: field.ErrorList{
field.Invalid(field.NewPath("status", "certificateChain"), "test-name..example", "leaf certificate's DNSName should not contain '..'"),
},
},
{
description: "leaf cert can not contain DNSName start with '.'",
oldPCR: &capi.PodCertificateRequest{
ObjectMeta: metav1.ObjectMeta{
Namespace: "foo",
Name: "bar",
},
Spec: capi.PodCertificateRequestSpec{
SignerName: "foo.com/abc",
PodName: "pod-1",
PodUID: types.UID(podUID1),
ServiceAccountName: "sa-1",
ServiceAccountUID: "sa-uid-1",
NodeName: "node-1",
NodeUID: "node-uid-1",
MaxExpirationSeconds: ptr.To[int32](86400),
PKIXPublicKey: pubPKIX1,
ProofOfPossession: proof1,
},
},
newPCR: &capi.PodCertificateRequest{
ObjectMeta: metav1.ObjectMeta{
Namespace: "foo",
Name: "bar",
},
Spec: capi.PodCertificateRequestSpec{
SignerName: "foo.com/abc",
PodName: "pod-1",
PodUID: types.UID(podUID1),
ServiceAccountName: "sa-1",
ServiceAccountUID: "sa-uid-1",
NodeName: "node-1",
NodeUID: "node-uid-1",
MaxExpirationSeconds: ptr.To[int32](86400),
PKIXPublicKey: pubPKIX1,
ProofOfPossession: proof1,
},
Status: capi.PodCertificateRequestStatus{
Conditions: []metav1.Condition{
{
Type: capi.PodCertificateRequestConditionTypeIssued,
Status: metav1.ConditionTrue,
Reason: "Whatever",
Message: "Foo message",
LastTransitionTime: metav1.NewTime(time.Now()),
},
},
CertificateChain: certWithBadDNSName3,
NotBefore: ptr.To(metav1.NewTime(mustParseTime(t, "1970-01-01T00:00:00Z"))),
BeginRefreshAt: ptr.To(metav1.NewTime(mustParseTime(t, "1970-01-01T12:00:00Z"))),
NotAfter: ptr.To(metav1.NewTime(mustParseTime(t, "1970-01-02T00:00:00Z"))),
},
},
wantErrors: field.ErrorList{
field.Invalid(field.NewPath("status", "certificateChain"), ".example", "leaf certificate's DNSName should not start or end with '.'"),
},
},
{
description: "leaf cert can not contain DNSName end with '.'",
oldPCR: &capi.PodCertificateRequest{
ObjectMeta: metav1.ObjectMeta{
Namespace: "foo",
Name: "bar",
},
Spec: capi.PodCertificateRequestSpec{
SignerName: "foo.com/abc",
PodName: "pod-1",
PodUID: types.UID(podUID1),
ServiceAccountName: "sa-1",
ServiceAccountUID: "sa-uid-1",
NodeName: "node-1",
NodeUID: "node-uid-1",
MaxExpirationSeconds: ptr.To[int32](86400),
PKIXPublicKey: pubPKIX1,
ProofOfPossession: proof1,
},
},
newPCR: &capi.PodCertificateRequest{
ObjectMeta: metav1.ObjectMeta{
Namespace: "foo",
Name: "bar",
},
Spec: capi.PodCertificateRequestSpec{
SignerName: "foo.com/abc",
PodName: "pod-1",
PodUID: types.UID(podUID1),
ServiceAccountName: "sa-1",
ServiceAccountUID: "sa-uid-1",
NodeName: "node-1",
NodeUID: "node-uid-1",
MaxExpirationSeconds: ptr.To[int32](86400),
PKIXPublicKey: pubPKIX1,
ProofOfPossession: proof1,
},
Status: capi.PodCertificateRequestStatus{
Conditions: []metav1.Condition{
{
Type: capi.PodCertificateRequestConditionTypeIssued,
Status: metav1.ConditionTrue,
Reason: "Whatever",
Message: "Foo message",
LastTransitionTime: metav1.NewTime(time.Now()),
},
},
CertificateChain: certWithBadDNSName4,
NotBefore: ptr.To(metav1.NewTime(mustParseTime(t, "1970-01-01T00:00:00Z"))),
BeginRefreshAt: ptr.To(metav1.NewTime(mustParseTime(t, "1970-01-01T12:00:00Z"))),
NotAfter: ptr.To(metav1.NewTime(mustParseTime(t, "1970-01-02T00:00:00Z"))),
},
},
wantErrors: field.ErrorList{
field.Invalid(field.NewPath("status", "certificateChain"), "example.", "leaf certificate's DNSName should not start or end with '.'"),
},
},
{
description: "leaf cert can not contain bad email address",
oldPCR: &capi.PodCertificateRequest{
ObjectMeta: metav1.ObjectMeta{
Namespace: "foo",
Name: "bar",
},
Spec: capi.PodCertificateRequestSpec{
SignerName: "foo.com/abc",
PodName: "pod-1",
PodUID: types.UID(podUID1),
ServiceAccountName: "sa-1",
ServiceAccountUID: "sa-uid-1",
NodeName: "node-1",
NodeUID: "node-uid-1",
MaxExpirationSeconds: ptr.To[int32](86400),
PKIXPublicKey: pubPKIX1,
ProofOfPossession: proof1,
},
},
newPCR: &capi.PodCertificateRequest{
ObjectMeta: metav1.ObjectMeta{
Namespace: "foo",
Name: "bar",
},
Spec: capi.PodCertificateRequestSpec{
SignerName: "foo.com/abc",
PodName: "pod-1",
PodUID: types.UID(podUID1),
ServiceAccountName: "sa-1",
ServiceAccountUID: "sa-uid-1",
NodeName: "node-1",
NodeUID: "node-uid-1",
MaxExpirationSeconds: ptr.To[int32](86400),
PKIXPublicKey: pubPKIX1,
ProofOfPossession: proof1,
},
Status: capi.PodCertificateRequestStatus{
Conditions: []metav1.Condition{
{
Type: capi.PodCertificateRequestConditionTypeIssued,
Status: metav1.ConditionTrue,
Reason: "Whatever",
Message: "Foo message",
LastTransitionTime: metav1.NewTime(time.Now()),
},
},
CertificateChain: certWithBadEmailAddress,
NotBefore: ptr.To(metav1.NewTime(mustParseTime(t, "1970-01-01T00:00:00Z"))),
BeginRefreshAt: ptr.To(metav1.NewTime(mustParseTime(t, "1970-01-01T12:00:00Z"))),
NotAfter: ptr.To(metav1.NewTime(mustParseTime(t, "1970-01-02T00:00:00Z"))),
},
},
wantErrors: field.ErrorList{
field.Invalid(field.NewPath("status", "certificateChain"), "email@@address", "leaf certificate should not contain invalid EmailAddress"),
},
},
}
for _, tc := range testCases {
t.Run(tc.description, func(t *testing.T) {
tc.oldPCR.ObjectMeta.ResourceVersion = "1"
tc.newPCR.ObjectMeta.ResourceVersion = "2"
gotErrors := ValidatePodCertificateRequestStatusUpdate(tc.newPCR, tc.oldPCR, testclock.NewFakeClock(mustParseTime(t, "1970-01-01T00:00:00Z")))
if diff := cmp.Diff(gotErrors, tc.wantErrors); diff != "" {
t.Errorf("Unexpected error output from ValidatePodCertificateRequestUpdate; diff (-got +want)\n%s", diff)
}
})
}
}
func mustMakeCA(t *testing.T) ([]byte, ed25519.PrivateKey) {
signPub, signPriv, err := ed25519.GenerateKey(rand.Reader)
if err != nil {
t.Fatalf("Error while generating CA signing key: %v", err)
}
caCertTemplate := &x509.Certificate{
IsCA: true,
BasicConstraintsValid: true,
KeyUsage: x509.KeyUsageDigitalSignature | x509.KeyUsageCertSign,
NotBefore: mustParseTime(t, "1970-01-01T00:00:00Z"),
NotAfter: mustParseTime(t, "1971-01-01T00:00:00Z"),
}
caCertDER, err := x509.CreateCertificate(rand.Reader, caCertTemplate, caCertTemplate, signPub, signPriv)
if err != nil {
t.Fatalf("Error while creating CA certificate: %v", err)
}
return caCertDER, signPriv
}
func mustMakeIntermediateCA(t *testing.T, rootDER []byte, rootPrivateKey crypto.PrivateKey) ([]byte, ed25519.PrivateKey) {
intermediatePub, intermediatePriv, err := ed25519.GenerateKey(rand.Reader)
if err != nil {
t.Fatalf("Error while generating intermediate signing key: %v", err)
}
intermediateCertTemplate := &x509.Certificate{
IsCA: true,
BasicConstraintsValid: true,
KeyUsage: x509.KeyUsageDigitalSignature | x509.KeyUsageCertSign,
NotBefore: mustParseTime(t, "1970-01-01T00:00:00Z"),
NotAfter: mustParseTime(t, "1971-01-01T00:00:00Z"),
}
rootCert, err := x509.ParseCertificate(rootDER)
if err != nil {
t.Fatalf("Error while parsing root certificate: %v", err)
}
intermediateCertDER, err := x509.CreateCertificate(rand.Reader, intermediateCertTemplate, rootCert, intermediatePub, rootPrivateKey)
if err != nil {
t.Fatalf("Error while creating intermediate certificate: %v", err)
}
return intermediateCertDER, intermediatePriv
}
func mustParseTime(t *testing.T, stamp string) time.Time {
got, err := time.Parse(time.RFC3339, stamp)
if err != nil {
t.Fatalf("Error while parsing timestamp: %v", err)
}
return got
}
func mustMakeEd25519KeyAndProof(t *testing.T, toBeSigned []byte, pkcs10DNSSANS []string) (ed25519.PrivateKey, ed25519.PublicKey, []byte, []byte, []byte) {
pub, priv, err := ed25519.GenerateKey(rand.Reader)
if err != nil {
t.Fatalf("Error while generating ed25519 key: %v", err)
}
pubPKIX, err := x509.MarshalPKIXPublicKey(pub)
if err != nil {
t.Fatalf("Error while marshaling PKIX public key: %v", err)
}
sig := ed25519.Sign(priv, toBeSigned)
pkcs10DER, err := x509.CreateCertificateRequest(rand.Reader, &x509.CertificateRequest{DNSNames: pkcs10DNSSANS}, priv)
if err != nil {
t.Fatalf("Error while creating PKCS#10 certificate signing request: %v", err)
}
return priv, pub, pubPKIX, sig, pkcs10DER
}
func mustMakeECDSAKeyAndProof(t *testing.T, curve elliptic.Curve, toBeSigned []byte) (*ecdsa.PrivateKey, *ecdsa.PublicKey, []byte, []byte) {
priv, err := ecdsa.GenerateKey(curve, rand.Reader)
if err != nil {
t.Fatalf("Error while generating ECDSA key: %v", err)
}
pubPKIX, err := x509.MarshalPKIXPublicKey(priv.Public())
if err != nil {
t.Fatalf("Error while marshaling PKIX public key: %v", err)
}
sig, err := ecdsa.SignASN1(rand.Reader, priv, hashBytes(toBeSigned))
if err != nil {
t.Fatalf("Error while making proof of possession: %v", err)
}
return priv, &priv.PublicKey, pubPKIX, sig
}
func mustMakeRSAKeyAndProof(t *testing.T, modulusSize int, toBeSigned []byte) (*rsa.PrivateKey, *rsa.PublicKey, []byte, []byte) {
priv, err := rsa.GenerateKey(rand.Reader, modulusSize)
if err != nil {
t.Fatalf("Error while generating RSA key: %v", err)
}
pubPKIX, err := x509.MarshalPKIXPublicKey(&priv.PublicKey)
if err != nil {
t.Fatalf("Error while marshaling public key: %v", err)
}
sig, err := rsa.SignPSS(rand.Reader, priv, crypto.SHA256, hashBytes(toBeSigned), nil)
if err != nil {
t.Fatalf("Error while making proof of possession: %v", err)
}
return priv, &priv.PublicKey, pubPKIX, sig
}
func mustSignCertForPublicKey(t *testing.T, validity time.Duration, subjectPublicKey crypto.PublicKey, caCertDER []byte, caPrivateKey crypto.PrivateKey, usebadDNSName bool, badDNSName, badEmailAddress string) string {
certTemplate := &x509.Certificate{
Subject: pkix.Name{
CommonName: "foo",
},
KeyUsage: x509.KeyUsageDigitalSignature,
ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageClientAuth, x509.ExtKeyUsageServerAuth},
NotBefore: mustParseTime(t, "1970-01-01T00:00:00Z"),
NotAfter: mustParseTime(t, "1970-01-01T00:00:00Z").Add(validity),
}
if usebadDNSName {
certTemplate.DNSNames = append(certTemplate.DNSNames, badDNSName)
}
if badEmailAddress != "" {
certTemplate.EmailAddresses = append(certTemplate.EmailAddresses, badEmailAddress)
}
caCert, err := x509.ParseCertificate(caCertDER)
if err != nil {
t.Fatalf("Error while parsing CA certificate: %v", err)
}
certDER, err := x509.CreateCertificate(rand.Reader, certTemplate, caCert, subjectPublicKey, caPrivateKey)
if err != nil {
t.Fatalf("Error while signing subject certificate: %v", err)
}
certPEM := pem.EncodeToMemory(&pem.Block{
Type: "CERTIFICATE",
Bytes: certDER,
})
return string(certPEM)
}
func pemEncode(blockType string, data []byte) string {
return string(pem.EncodeToMemory(&pem.Block{
Type: blockType,
Bytes: data,
}))
}
var (
validCertificate = []byte(`
Leading non-PEM content
-----BEGIN CERTIFICATE-----
MIIBqDCCAU2gAwIBAgIUfbqeieihh/oERbfvRm38XvS/xHAwCgYIKoZIzj0EAwIw
GjEYMBYGA1UEAxMPSW50ZXJtZWRpYXRlLUNBMCAXDTE2MTAxMTA1MDYwMFoYDzIx
MTYwOTE3MDUwNjAwWjAUMRIwEAYDVQQDEwlNeSBDbGllbnQwWTATBgcqhkjOPQIB
BggqhkjOPQMBBwNCAARv6N4R/sjMR65iMFGNLN1GC/vd7WhDW6J4X/iAjkRLLnNb
KbRG/AtOUZ+7upJ3BWIRKYbOabbQGQe2BbKFiap4o3UwczAOBgNVHQ8BAf8EBAMC
BaAwEwYDVR0lBAwwCgYIKwYBBQUHAwIwDAYDVR0TAQH/BAIwADAdBgNVHQ4EFgQU
K/pZOWpNcYai6eHFpmJEeFpeQlEwHwYDVR0jBBgwFoAUX6nQlxjfWnP6aM1meO/Q
a6b3a9kwCgYIKoZIzj0EAwIDSQAwRgIhAIWTKw/sjJITqeuNzJDAKU4xo1zL+xJ5
MnVCuBwfwDXCAiEAw/1TA+CjPq9JC5ek1ifR0FybTURjeQqYkKpve1dveps=
-----END CERTIFICATE-----
Intermediate non-PEM content
-----BEGIN CERTIFICATE-----
MIIBqDCCAU6gAwIBAgIUfqZtjoFgczZ+oQZbEC/BDSS2J6wwCgYIKoZIzj0EAwIw
EjEQMA4GA1UEAxMHUm9vdC1DQTAgFw0xNjEwMTEwNTA2MDBaGA8yMTE2MDkxNzA1
MDYwMFowGjEYMBYGA1UEAxMPSW50ZXJtZWRpYXRlLUNBMFkwEwYHKoZIzj0CAQYI
KoZIzj0DAQcDQgAEyWHEMMCctJg8Xa5YWLqaCPbk3MjB+uvXac42JM9pj4k9jedD
kpUJRkWIPzgJI8Zk/3cSzluUTixP6JBSDKtwwaN4MHYwDgYDVR0PAQH/BAQDAgGm
MBMGA1UdJQQMMAoGCCsGAQUFBwMCMA8GA1UdEwEB/wQFMAMBAf8wHQYDVR0OBBYE
FF+p0JcY31pz+mjNZnjv0Gum92vZMB8GA1UdIwQYMBaAFB7P6+i4/pfNjqZgJv/b
dgA7Fe4tMAoGCCqGSM49BAMCA0gAMEUCIQCTT1YWQZaAqfQ2oBxzOkJE2BqLFxhz
3smQlrZ5gCHddwIgcvT7puhYOzAgcvMn9+SZ1JOyZ7edODjshCVCRnuHK2c=
-----END CERTIFICATE-----
Trailing non-PEM content
`)
invalidCertificateNoPEM = []byte(`no PEM content`)
invalidCertificateNonCertificatePEM = []byte(`
Leading non-PEM content
-----BEGIN CERTIFICATE1-----
MIIBqDCCAU2gAwIBAgIUfbqeieihh/oERbfvRm38XvS/xHAwCgYIKoZIzj0EAwIw
GjEYMBYGA1UEAxMPSW50ZXJtZWRpYXRlLUNBMCAXDTE2MTAxMTA1MDYwMFoYDzIx
MTYwOTE3MDUwNjAwWjAUMRIwEAYDVQQDEwlNeSBDbGllbnQwWTATBgcqhkjOPQIB
BggqhkjOPQMBBwNCAARv6N4R/sjMR65iMFGNLN1GC/vd7WhDW6J4X/iAjkRLLnNb
KbRG/AtOUZ+7upJ3BWIRKYbOabbQGQe2BbKFiap4o3UwczAOBgNVHQ8BAf8EBAMC
BaAwEwYDVR0lBAwwCgYIKwYBBQUHAwIwDAYDVR0TAQH/BAIwADAdBgNVHQ4EFgQU
K/pZOWpNcYai6eHFpmJEeFpeQlEwHwYDVR0jBBgwFoAUX6nQlxjfWnP6aM1meO/Q
a6b3a9kwCgYIKoZIzj0EAwIDSQAwRgIhAIWTKw/sjJITqeuNzJDAKU4xo1zL+xJ5
MnVCuBwfwDXCAiEAw/1TA+CjPq9JC5ek1ifR0FybTURjeQqYkKpve1dveps=
-----END CERTIFICATE1-----
Trailing non-PEM content
`)
invalidCertificatePEMHeaders = []byte(`
Leading non-PEM content
-----BEGIN CERTIFICATE-----
Some-Header: Some-Value
MIIBqDCCAU2gAwIBAgIUfbqeieihh/oERbfvRm38XvS/xHAwCgYIKoZIzj0EAwIw
GjEYMBYGA1UEAxMPSW50ZXJtZWRpYXRlLUNBMCAXDTE2MTAxMTA1MDYwMFoYDzIx
MTYwOTE3MDUwNjAwWjAUMRIwEAYDVQQDEwlNeSBDbGllbnQwWTATBgcqhkjOPQIB
BggqhkjOPQMBBwNCAARv6N4R/sjMR65iMFGNLN1GC/vd7WhDW6J4X/iAjkRLLnNb
KbRG/AtOUZ+7upJ3BWIRKYbOabbQGQe2BbKFiap4o3UwczAOBgNVHQ8BAf8EBAMC
BaAwEwYDVR0lBAwwCgYIKwYBBQUHAwIwDAYDVR0TAQH/BAIwADAdBgNVHQ4EFgQU
K/pZOWpNcYai6eHFpmJEeFpeQlEwHwYDVR0jBBgwFoAUX6nQlxjfWnP6aM1meO/Q
a6b3a9kwCgYIKoZIzj0EAwIDSQAwRgIhAIWTKw/sjJITqeuNzJDAKU4xo1zL+xJ5
MnVCuBwfwDXCAiEAw/1TA+CjPq9JC5ek1ifR0FybTURjeQqYkKpve1dveps=
-----END CERTIFICATE-----
Trailing non-PEM content
`)
invalidCertificateNonBase64PEM = []byte(`
Leading non-PEM content
-----BEGIN CERTIFICATE-----
MIIBqDCCAU2gAwIBAgIUfbqeieihh/oERbfvRm38XvS/xHAwCgYIKoZIzj0EAwIw
GjEYMBYGA1UEAxMPSW50ZXJtZWRpYXRlLUNBMCAXDTE2MTAxMTA1MDYwMFoYDzIx
MTYwOTE3MDUwNjAwWjAUMRIwEAYDVQQDEwlNeSBDbGllbnQwWTATBgcqhkjOPQIB
BggqhkjOPQMBBwNCAARv6N4R/sjMR65iMFGNLN1GC/vd7WhDW6J4X/iAjkRLLnNb
KbRG/AtOUZ+7upJ3BWIRKYbOabbQGQe2BbKFiap4o3UwczAOBgNVHQ8BAf8EBAMC
BaAwEwYDVR0lBAwwCgYIKwYBBQUHAwIwDAYDVR0TAQH/BAIwADAdBgNVHQ4EFgQU
K/pZOWpNcYai6eHFpmJEeFpeQlEwHwYDVR0jBBgwFoAUX6nQlxjfWnP6aM1meO/Q
a6b3a9kwCgYIKoZIzj0EAwIDSQAwRgIhAIWTKw/sjJITqeuNzJDAKU4xo1zL+xJ5
MnVCuBwfwDXCAiEAw/1TA+CjPq9JC5ek1ifR0FybTURjeQqYkKpve1d?????????
-----END CERTIFICATE-----
Trailing non-PEM content
`)
invalidCertificateEmptyPEM = []byte(`
Leading non-PEM content
-----BEGIN CERTIFICATE-----
-----END CERTIFICATE-----
Trailing non-PEM content
`)
// first character is invalid
invalidCertificateNonASN1Data = []byte(`
Leading non-PEM content
-----BEGIN CERTIFICATE-----
MIIBqDCCAU2gAwIBAgIUfbqeieihh/oERbfvRm38XvS/xHAwCgYIKoZIzj0EAwIw
GjEYMBYGA1UEAxMPSW50ZXJtZWRpYXRlLUNBMCAXDTE2MTAxMTA1MDYwMFoYDzIx
MTYwOTE3MDUwNjAwWjAUNRIwEAYDVQQDEwlNeSBDbGllbnQwWTATBgcqhkjOPQIB
BggqhkjOPQMBBwNCAARv6N4R/sjMR65iMFGNLN1GC/vd7WhDW6J4X/iAjkRLLnNb
KbRG/AtOUZ+7upJ3BWIRKYbOabbQGQe2BbKFiap4o3UwczAOBgNVHQ8BAf8EBAMC
BaAwEwYDVR0lBAwwCgYIKwYBBQUHAwIwDAYDVR0TAQH/BAIwADAdBgNVHQ4EFgQU
K/pZOWpNcYai6eHFpmJEeFpeQlEwHwYDVR0jBBgwFoAUX6nQlxjfWnP6aM1meO/Q
a6b3a9kwCgYIKoZIzj0EAwIDSQAwRgIhAIWTKw/sjJITqeuNzJDAKU4xo1zL+xJ5
MnVCuBwfwDXCAiEAw/1TA+CjPq9JC5ek1ifR0FybTURjeQqYkKpve1dveps=
-----END CERTIFICATE-----
Trailing non-PEM content
`)
) | go | github | https://github.com/kubernetes/kubernetes | pkg/apis/certificates/validation/validation_test.go |
from __future__ import unicode_literals
import uuid
from django.conf import settings
from django.db.backends.base.operations import BaseDatabaseOperations
from django.utils import six, timezone
from django.utils.encoding import force_text
class DatabaseOperations(BaseDatabaseOperations):
compiler_module = "django.db.backends.mysql.compiler"
# MySQL stores positive fields as UNSIGNED ints.
integer_field_ranges = dict(BaseDatabaseOperations.integer_field_ranges,
PositiveSmallIntegerField=(0, 65535),
PositiveIntegerField=(0, 4294967295),
)
def date_extract_sql(self, lookup_type, field_name):
# http://dev.mysql.com/doc/mysql/en/date-and-time-functions.html
if lookup_type == 'week_day':
# DAYOFWEEK() returns an integer, 1-7, Sunday=1.
# Note: WEEKDAY() returns 0-6, Monday=0.
return "DAYOFWEEK(%s)" % field_name
else:
return "EXTRACT(%s FROM %s)" % (lookup_type.upper(), field_name)
def date_trunc_sql(self, lookup_type, field_name):
fields = ['year', 'month', 'day', 'hour', 'minute', 'second']
format = ('%%Y-', '%%m', '-%%d', ' %%H:', '%%i', ':%%s') # Use double percents to escape.
format_def = ('0000-', '01', '-01', ' 00:', '00', ':00')
try:
i = fields.index(lookup_type) + 1
except ValueError:
sql = field_name
else:
format_str = ''.join([f for f in format[:i]] + [f for f in format_def[i:]])
sql = "CAST(DATE_FORMAT(%s, '%s') AS DATETIME)" % (field_name, format_str)
return sql
def _convert_field_to_tz(self, field_name, tzname):
if settings.USE_TZ:
field_name = "CONVERT_TZ(%s, 'UTC', %%s)" % field_name
params = [tzname]
else:
params = []
return field_name, params
def datetime_cast_date_sql(self, field_name, tzname):
field_name, params = self._convert_field_to_tz(field_name, tzname)
sql = "DATE(%s)" % field_name
return sql, params
def datetime_extract_sql(self, lookup_type, field_name, tzname):
field_name, params = self._convert_field_to_tz(field_name, tzname)
sql = self.date_extract_sql(lookup_type, field_name)
return sql, params
def datetime_trunc_sql(self, lookup_type, field_name, tzname):
field_name, params = self._convert_field_to_tz(field_name, tzname)
fields = ['year', 'month', 'day', 'hour', 'minute', 'second']
format = ('%%Y-', '%%m', '-%%d', ' %%H:', '%%i', ':%%s') # Use double percents to escape.
format_def = ('0000-', '01', '-01', ' 00:', '00', ':00')
try:
i = fields.index(lookup_type) + 1
except ValueError:
sql = field_name
else:
format_str = ''.join([f for f in format[:i]] + [f for f in format_def[i:]])
sql = "CAST(DATE_FORMAT(%s, '%s') AS DATETIME)" % (field_name, format_str)
return sql, params
def date_interval_sql(self, timedelta):
return "INTERVAL '%d 0:0:%d:%d' DAY_MICROSECOND" % (
timedelta.days, timedelta.seconds, timedelta.microseconds), []
def format_for_duration_arithmetic(self, sql):
if self.connection.features.supports_microsecond_precision:
return 'INTERVAL %s MICROSECOND' % sql
else:
return 'INTERVAL FLOOR(%s / 1000000) SECOND' % sql
def drop_foreignkey_sql(self):
return "DROP FOREIGN KEY"
def force_no_ordering(self):
"""
"ORDER BY NULL" prevents MySQL from implicitly ordering by grouped
columns. If no ordering would otherwise be applied, we don't want any
implicit sorting going on.
"""
return [(None, ("NULL", [], False))]
def fulltext_search_sql(self, field_name):
return 'MATCH (%s) AGAINST (%%s IN BOOLEAN MODE)' % field_name
def last_executed_query(self, cursor, sql, params):
# With MySQLdb, cursor objects have an (undocumented) "_last_executed"
# attribute where the exact query sent to the database is saved.
# See MySQLdb/cursors.py in the source distribution.
return force_text(getattr(cursor, '_last_executed', None), errors='replace')
def no_limit_value(self):
# 2**64 - 1, as recommended by the MySQL documentation
return 18446744073709551615
def quote_name(self, name):
if name.startswith("`") and name.endswith("`"):
return name # Quoting once is enough.
return "`%s`" % name
def random_function_sql(self):
return 'RAND()'
def sql_flush(self, style, tables, sequences, allow_cascade=False):
# NB: The generated SQL below is specific to MySQL
# 'TRUNCATE x;', 'TRUNCATE y;', 'TRUNCATE z;'... style SQL statements
# to clear all tables of all data
if tables:
sql = ['SET FOREIGN_KEY_CHECKS = 0;']
for table in tables:
sql.append('%s %s;' % (
style.SQL_KEYWORD('TRUNCATE'),
style.SQL_FIELD(self.quote_name(table)),
))
sql.append('SET FOREIGN_KEY_CHECKS = 1;')
sql.extend(self.sequence_reset_by_name_sql(style, sequences))
return sql
else:
return []
def validate_autopk_value(self, value):
# MySQLism: zero in AUTO_INCREMENT field does not work. Refs #17653.
if value == 0:
raise ValueError('The database backend does not accept 0 as a '
'value for AutoField.')
return value
def adapt_datetimefield_value(self, value):
if value is None:
return None
# MySQL doesn't support tz-aware datetimes
if timezone.is_aware(value):
if settings.USE_TZ:
value = timezone.make_naive(value, self.connection.timezone)
else:
raise ValueError("MySQL backend does not support timezone-aware datetimes when USE_TZ is False.")
if not self.connection.features.supports_microsecond_precision:
value = value.replace(microsecond=0)
return six.text_type(value)
def adapt_timefield_value(self, value):
if value is None:
return None
# MySQL doesn't support tz-aware times
if timezone.is_aware(value):
raise ValueError("MySQL backend does not support timezone-aware times.")
return six.text_type(value)
def max_name_length(self):
return 64
def bulk_insert_sql(self, fields, placeholder_rows):
placeholder_rows_sql = (", ".join(row) for row in placeholder_rows)
values_sql = ", ".join("(%s)" % sql for sql in placeholder_rows_sql)
return "VALUES " + values_sql
def combine_expression(self, connector, sub_expressions):
"""
MySQL requires special cases for ^ operators in query expressions
"""
if connector == '^':
return 'POW(%s)' % ','.join(sub_expressions)
return super(DatabaseOperations, self).combine_expression(connector, sub_expressions)
def get_db_converters(self, expression):
converters = super(DatabaseOperations, self).get_db_converters(expression)
internal_type = expression.output_field.get_internal_type()
if internal_type == 'TextField':
converters.append(self.convert_textfield_value)
elif internal_type in ['BooleanField', 'NullBooleanField']:
converters.append(self.convert_booleanfield_value)
elif internal_type == 'DateTimeField':
converters.append(self.convert_datetimefield_value)
elif internal_type == 'UUIDField':
converters.append(self.convert_uuidfield_value)
return converters
def convert_textfield_value(self, value, expression, connection, context):
if value is not None:
value = force_text(value)
return value
def convert_booleanfield_value(self, value, expression, connection, context):
if value in (0, 1):
value = bool(value)
return value
def convert_datetimefield_value(self, value, expression, connection, context):
if value is not None:
if settings.USE_TZ:
value = timezone.make_aware(value, self.connection.timezone)
return value
def convert_uuidfield_value(self, value, expression, connection, context):
if value is not None:
value = uuid.UUID(value)
return value | unknown | codeparrot/codeparrot-clean | ||
extern crate autocfg;
fn main() {
// Normally, cargo will set `OUT_DIR` for build scripts.
let ac = autocfg::AutoCfg::with_dir("target").unwrap();
for i in 3..8 {
ac.emit_has_type(&format!("i{}", 1 << i));
}
} | rust | github | https://github.com/nodejs/node | deps/crates/vendor/autocfg/examples/integers.rs |
#include "locking-selftest-rlock.h"
#include "locking-selftest-softirq.h" | c | github | https://github.com/torvalds/linux | lib/locking-selftest-rlock-softirq.h |
from __future__ import absolute_import
from __future__ import with_statement
import sys
import socket
from nose import SkipTest
from celery.exceptions import ImproperlyConfigured
from celery import states
from celery.utils import uuid
from celery.backends import redis
from celery.backends.redis import RedisBackend
from celery.tests.utils import Case, mask_modules
_no_redis_msg = "* Redis %s. Will not execute related tests."
_no_redis_msg_emitted = False
try:
from redis.exceptions import ConnectionError
except ImportError:
class ConnectionError(socket.error): # noqa
pass
class SomeClass(object):
def __init__(self, data):
self.data = data
def get_redis_or_SkipTest():
def emit_no_redis_msg(reason):
global _no_redis_msg_emitted
if not _no_redis_msg_emitted:
sys.stderr.write("\n" + _no_redis_msg % reason + "\n")
_no_redis_msg_emitted = True
if redis.redis is None:
emit_no_redis_msg("not installed")
raise SkipTest("redis library not installed")
try:
tb = RedisBackend(redis_db="celery_unittest")
try:
# Evaluate lazy connection
tb.client.info()
except ConnectionError, exc:
emit_no_redis_msg("not running")
raise SkipTest("can't connect to redis: %s" % (exc, ))
return tb
except ImproperlyConfigured, exc:
if "need to install" in str(exc):
return emit_no_redis_msg("not installed")
return emit_no_redis_msg("not configured")
class TestRedisBackend(Case):
def test_mark_as_done(self):
tb = get_redis_or_SkipTest()
tid = uuid()
self.assertEqual(tb.get_status(tid), states.PENDING)
self.assertIsNone(tb.get_result(tid))
tb.mark_as_done(tid, 42)
self.assertEqual(tb.get_status(tid), states.SUCCESS)
self.assertEqual(tb.get_result(tid), 42)
def test_is_pickled(self):
tb = get_redis_or_SkipTest()
tid2 = uuid()
result = {"foo": "baz", "bar": SomeClass(12345)}
tb.mark_as_done(tid2, result)
# is serialized properly.
rindb = tb.get_result(tid2)
self.assertEqual(rindb.get("foo"), "baz")
self.assertEqual(rindb.get("bar").data, 12345)
def test_mark_as_failure(self):
tb = get_redis_or_SkipTest()
tid3 = uuid()
try:
raise KeyError("foo")
except KeyError, exception:
pass
tb.mark_as_failure(tid3, exception)
self.assertEqual(tb.get_status(tid3), states.FAILURE)
self.assertIsInstance(tb.get_result(tid3), KeyError)
class TestRedisBackendNoRedis(Case):
def test_redis_None_if_redis_not_installed(self):
prev = sys.modules.pop("celery.backends.redis")
try:
with mask_modules("redis"):
from celery.backends.redis import redis
self.assertIsNone(redis)
finally:
sys.modules["celery.backends.redis"] = prev
def test_constructor_raises_if_redis_not_installed(self):
from celery.backends import redis
prev = redis.RedisBackend.redis
redis.RedisBackend.redis = None
try:
with self.assertRaises(ImproperlyConfigured):
redis.RedisBackend()
finally:
redis.RedisBackend.redis = prev | unknown | codeparrot/codeparrot-clean | ||
#!/usr/bin/python
# Copyright 2013 Google Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
module: gce_pd
version_added: "1.4"
short_description: utilize GCE persistent disk resources
description:
- This module can create and destroy unformatted GCE persistent disks
U(https://developers.google.com/compute/docs/disks#persistentdisks).
It also supports attaching and detaching disks from running instances.
Full install/configuration instructions for the gce* modules can
be found in the comments of ansible/test/gce_tests.py.
options:
detach_only:
description:
- do not destroy the disk, merely detach it from an instance
required: false
default: "no"
choices: ["yes", "no"]
aliases: []
instance_name:
description:
- instance name if you wish to attach or detach the disk
required: false
default: null
aliases: []
mode:
description:
- GCE mount mode of disk, READ_ONLY (default) or READ_WRITE
required: false
default: "READ_ONLY"
choices: ["READ_WRITE", "READ_ONLY"]
aliases: []
name:
description:
- name of the disk
required: true
default: null
aliases: []
size_gb:
description:
- whole integer size of disk (in GB) to create, default is 10 GB
required: false
default: 10
aliases: []
image:
description:
- the source image to use for the disk
required: false
default: null
aliases: []
version_added: "1.7"
snapshot:
description:
- the source snapshot to use for the disk
required: false
default: null
aliases: []
version_added: "1.7"
state:
description:
- desired state of the persistent disk
required: false
default: "present"
choices: ["active", "present", "absent", "deleted"]
aliases: []
zone:
description:
- zone in which to create the disk
required: false
default: "us-central1-b"
aliases: []
service_account_email:
version_added: "1.6"
description:
- service account email
required: false
default: null
aliases: []
pem_file:
version_added: "1.6"
description:
- path to the pem file associated with the service account email
required: false
default: null
aliases: []
project_id:
version_added: "1.6"
description:
- your GCE project ID
required: false
default: null
aliases: []
disk_type:
version_added: "1.9"
description:
- type of disk provisioned
required: false
default: "pd-standard"
choices: ["pd-standard", "pd-ssd"]
aliases: []
requirements: [ "libcloud" ]
author: Eric Johnson <erjohnso@google.com>
'''
EXAMPLES = '''
# Simple attachment action to an existing instance
- local_action:
module: gce_pd
instance_name: notlocalhost
size_gb: 5
name: pd
'''
import sys
try:
from libcloud.compute.types import Provider
from libcloud.compute.providers import get_driver
from libcloud.common.google import GoogleBaseError, QuotaExceededError, \
ResourceExistsError, ResourceNotFoundError, ResourceInUseError
_ = Provider.GCE
except ImportError:
print("failed=True " + \
"msg='libcloud with GCE support is required for this module.'")
sys.exit(1)
def main():
module = AnsibleModule(
argument_spec = dict(
detach_only = dict(type='bool'),
instance_name = dict(),
mode = dict(default='READ_ONLY', choices=['READ_WRITE', 'READ_ONLY']),
name = dict(required=True),
size_gb = dict(default=10),
disk_type = dict(default='pd-standard'),
image = dict(),
snapshot = dict(),
state = dict(default='present'),
zone = dict(default='us-central1-b'),
service_account_email = dict(),
pem_file = dict(),
project_id = dict(),
)
)
gce = gce_connect(module)
detach_only = module.params.get('detach_only')
instance_name = module.params.get('instance_name')
mode = module.params.get('mode')
name = module.params.get('name')
size_gb = module.params.get('size_gb')
disk_type = module.params.get('disk_type')
image = module.params.get('image')
snapshot = module.params.get('snapshot')
state = module.params.get('state')
zone = module.params.get('zone')
if detach_only and not instance_name:
module.fail_json(
msg='Must specify an instance name when detaching a disk',
changed=False)
disk = inst = None
changed = is_attached = False
json_output = { 'name': name, 'zone': zone, 'state': state, 'disk_type': disk_type }
if detach_only:
json_output['detach_only'] = True
json_output['detached_from_instance'] = instance_name
if instance_name:
# user wants to attach/detach from an existing instance
try:
inst = gce.ex_get_node(instance_name, zone)
# is the disk attached?
for d in inst.extra['disks']:
if d['deviceName'] == name:
is_attached = True
json_output['attached_mode'] = d['mode']
json_output['attached_to_instance'] = inst.name
except:
pass
# find disk if it already exists
try:
disk = gce.ex_get_volume(name)
json_output['size_gb'] = int(disk.size)
except ResourceNotFoundError:
pass
except Exception, e:
module.fail_json(msg=unexpected_error_msg(e), changed=False)
# user wants a disk to exist. If "instance_name" is supplied the user
# also wants it attached
if state in ['active', 'present']:
if not size_gb:
module.fail_json(msg="Must supply a size_gb", changed=False)
try:
size_gb = int(round(float(size_gb)))
if size_gb < 1:
raise Exception
except:
module.fail_json(msg="Must supply a size_gb larger than 1 GB",
changed=False)
if instance_name and inst is None:
module.fail_json(msg='Instance %s does not exist in zone %s' % (
instance_name, zone), changed=False)
if not disk:
if image is not None and snapshot is not None:
module.fail_json(
msg='Cannot give both image (%s) and snapshot (%s)' % (
image, snapshot), changed=False)
lc_image = None
lc_snapshot = None
if image is not None:
lc_image = gce.ex_get_image(image)
elif snapshot is not None:
lc_snapshot = gce.ex_get_snapshot(snapshot)
try:
disk = gce.create_volume(
size_gb, name, location=zone, image=lc_image,
snapshot=lc_snapshot, ex_disk_type=disk_type)
except ResourceExistsError:
pass
except QuotaExceededError:
module.fail_json(msg='Requested disk size exceeds quota',
changed=False)
except Exception, e:
module.fail_json(msg=unexpected_error_msg(e), changed=False)
json_output['size_gb'] = size_gb
if image is not None:
json_output['image'] = image
if snapshot is not None:
json_output['snapshot'] = snapshot
changed = True
if inst and not is_attached:
try:
gce.attach_volume(inst, disk, device=name, ex_mode=mode)
except Exception, e:
module.fail_json(msg=unexpected_error_msg(e), changed=False)
json_output['attached_to_instance'] = inst.name
json_output['attached_mode'] = mode
changed = True
# user wants to delete a disk (or perhaps just detach it).
if state in ['absent', 'deleted'] and disk:
if inst and is_attached:
try:
gce.detach_volume(disk, ex_node=inst)
except Exception, e:
module.fail_json(msg=unexpected_error_msg(e), changed=False)
changed = True
if not detach_only:
try:
gce.destroy_volume(disk)
except ResourceInUseError, e:
module.fail_json(msg=str(e.value), changed=False)
except Exception, e:
module.fail_json(msg=unexpected_error_msg(e), changed=False)
changed = True
json_output['changed'] = changed
print json.dumps(json_output)
sys.exit(0)
# import module snippets
from ansible.module_utils.basic import *
from ansible.module_utils.gce import *
main() | unknown | codeparrot/codeparrot-clean | ||
# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for layers.feature_column."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
class FeatureColumnTest(tf.test.TestCase):
def testImmutability(self):
a = tf.contrib.layers.sparse_column_with_hash_bucket("aaa",
hash_bucket_size=100)
with self.assertRaises(AttributeError):
a.column_name = "bbb"
def testSparseColumn(self):
a = tf.contrib.layers.sparse_column_with_hash_bucket("aaa",
hash_bucket_size=100)
self.assertEqual(a.name, "aaa")
def testEmbeddingColumn(self):
a = tf.contrib.layers.sparse_column_with_hash_bucket("aaa",
hash_bucket_size=100,
combiner="sum")
b = tf.contrib.layers.embedding_column(a, dimension=4, combiner="mean")
self.assertEqual(b.sparse_id_column.name, "aaa")
self.assertEqual(b.dimension, 4)
self.assertEqual(b.combiner, "mean")
def testRealValuedColumn(self):
a = tf.contrib.layers.real_valued_column("aaa")
self.assertEqual(a.name, "aaa")
self.assertEqual(a.dimension, 1)
b = tf.contrib.layers.real_valued_column("bbb", 10)
self.assertEqual(b.dimension, 10)
self.assertTrue(b.default_value is None)
# default_value is an integer.
c1 = tf.contrib.layers.real_valued_column("c1", default_value=2)
self.assertListEqual(list(c1.default_value), [2.])
c2 = tf.contrib.layers.real_valued_column("c2",
default_value=2,
dtype=tf.int32)
self.assertListEqual(list(c2.default_value), [2])
c3 = tf.contrib.layers.real_valued_column("c3",
dimension=4,
default_value=2)
self.assertListEqual(list(c3.default_value), [2, 2, 2, 2])
c4 = tf.contrib.layers.real_valued_column("c4",
dimension=4,
default_value=2,
dtype=tf.int32)
self.assertListEqual(list(c4.default_value), [2, 2, 2, 2])
# default_value is a float.
d1 = tf.contrib.layers.real_valued_column("d1", default_value=2.)
self.assertListEqual(list(d1.default_value), [2.])
d2 = tf.contrib.layers.real_valued_column("d2",
dimension=4,
default_value=2.)
self.assertListEqual(list(d2.default_value), [2., 2., 2., 2.])
with self.assertRaises(TypeError):
tf.contrib.layers.real_valued_column("d3",
default_value=2.,
dtype=tf.int32)
# default_value is neither interger nor float.
with self.assertRaises(TypeError):
tf.contrib.layers.real_valued_column("e1", default_value="string")
with self.assertRaises(TypeError):
tf.contrib.layers.real_valued_column("e1",
dimension=3,
default_value=[1, 3., "string"])
# default_value is a list of integers.
f1 = tf.contrib.layers.real_valued_column("f1", default_value=[2])
self.assertListEqual(list(f1.default_value), [2])
f2 = tf.contrib.layers.real_valued_column("f2",
dimension=3,
default_value=[2, 2, 2])
self.assertListEqual(list(f2.default_value), [2., 2., 2.])
f3 = tf.contrib.layers.real_valued_column("f3",
dimension=3,
default_value=[2, 2, 2],
dtype=tf.int32)
self.assertListEqual(list(f3.default_value), [2, 2, 2])
# default_value is a list of floats.
g1 = tf.contrib.layers.real_valued_column("g1", default_value=[2.])
self.assertListEqual(list(g1.default_value), [2.])
g2 = tf.contrib.layers.real_valued_column("g2",
dimension=3,
default_value=[2., 2, 2])
self.assertListEqual(list(g2.default_value), [2., 2., 2.])
with self.assertRaises(TypeError):
tf.contrib.layers.real_valued_column("g3",
default_value=[2.],
dtype=tf.int32)
with self.assertRaises(ValueError):
tf.contrib.layers.real_valued_column("g4",
dimension=3,
default_value=[2.])
def testBucketizedColumnNameEndsWithUnderscoreBucketized(self):
a = tf.contrib.layers.bucketized_column(
tf.contrib.layers.real_valued_column("aaa"), [0, 4])
self.assertEqual(a.name, "aaa_BUCKETIZED")
def testBucketizedColumnRequiresRealValuedColumn(self):
with self.assertRaises(TypeError):
tf.contrib.layers.bucketized_column("bbb", [0])
def testBucketizedColumnRequiresSortedBuckets(self):
with self.assertRaises(ValueError):
tf.contrib.layers.bucketized_column(
tf.contrib.layers.real_valued_column("ccc"), [5, 0, 4])
def testBucketizedColumnWithSameBucketBoundaries(self):
a_bucketized = tf.contrib.layers.bucketized_column(
tf.contrib.layers.real_valued_column("a"), [1., 2., 2., 3., 3.])
self.assertEqual(a_bucketized.name, "a_BUCKETIZED")
self.assertTupleEqual(a_bucketized.boundaries, (1., 2., 3.))
def testCrossedColumnNameCreatesSortedNames(self):
a = tf.contrib.layers.sparse_column_with_hash_bucket("aaa",
hash_bucket_size=100)
b = tf.contrib.layers.sparse_column_with_hash_bucket("bbb",
hash_bucket_size=100)
bucket = tf.contrib.layers.bucketized_column(
tf.contrib.layers.real_valued_column("cost"), [0, 4])
crossed = tf.contrib.layers.crossed_column(
set([b, bucket, a]), hash_bucket_size=10000)
self.assertEqual("aaa_X_bbb_X_cost_BUCKETIZED", crossed.name,
"name should be generated by sorted column names")
self.assertEqual("aaa", crossed.columns[0].name)
self.assertEqual("bbb", crossed.columns[1].name)
self.assertEqual("cost_BUCKETIZED", crossed.columns[2].name)
def testCrossedColumnNotSupportRealValuedColumn(self):
b = tf.contrib.layers.sparse_column_with_hash_bucket("bbb",
hash_bucket_size=100)
with self.assertRaises(TypeError):
tf.contrib.layers.crossed_column(
set([b, tf.contrib.layers.real_valued_column("real")]),
hash_bucket_size=10000)
def testRealValuedColumnDtypes(self):
rvc = tf.contrib.layers.real_valued_column("rvc")
self.assertDictEqual(
{"rvc": tf.FixedLenFeature(
[1], dtype=tf.float32)},
rvc.config)
rvc = tf.contrib.layers.real_valued_column("rvc", dtype=tf.int32)
self.assertDictEqual(
{"rvc": tf.FixedLenFeature(
[1], dtype=tf.int32)},
rvc.config)
with self.assertRaises(ValueError):
tf.contrib.layers.real_valued_column("rvc", dtype=tf.string)
def testSparseColumnDtypes(self):
sc = tf.contrib.layers.sparse_column_with_integerized_feature("sc", 10)
self.assertDictEqual({"sc": tf.VarLenFeature(dtype=tf.int64)}, sc.config)
sc = tf.contrib.layers.sparse_column_with_integerized_feature(
"sc", 10, dtype=tf.int32)
self.assertDictEqual({"sc": tf.VarLenFeature(dtype=tf.int32)}, sc.config)
with self.assertRaises(ValueError):
tf.contrib.layers.sparse_column_with_integerized_feature("sc",
10,
dtype=tf.float32)
def testCreateFeatureSpec(self):
sparse_col = tf.contrib.layers.sparse_column_with_hash_bucket(
"sparse_column", hash_bucket_size=100)
embedding_col = tf.contrib.layers.embedding_column(
tf.contrib.layers.sparse_column_with_hash_bucket(
"sparse_column_for_embedding",
hash_bucket_size=10),
dimension=4)
real_valued_col1 = tf.contrib.layers.real_valued_column(
"real_valued_column1")
real_valued_col2 = tf.contrib.layers.real_valued_column(
"real_valued_column2", 5)
bucketized_col1 = tf.contrib.layers.bucketized_column(
tf.contrib.layers.real_valued_column(
"real_valued_column_for_bucketization1"), [0, 4])
bucketized_col2 = tf.contrib.layers.bucketized_column(
tf.contrib.layers.real_valued_column(
"real_valued_column_for_bucketization2", 4), [0, 4])
a = tf.contrib.layers.sparse_column_with_hash_bucket("cross_aaa",
hash_bucket_size=100)
b = tf.contrib.layers.sparse_column_with_hash_bucket("cross_bbb",
hash_bucket_size=100)
cross_col = tf.contrib.layers.crossed_column(
set([a, b]), hash_bucket_size=10000)
feature_columns = set([sparse_col, embedding_col,
real_valued_col1, real_valued_col2,
bucketized_col1, bucketized_col2,
cross_col])
config = tf.contrib.layers.create_feature_spec_for_parsing(feature_columns)
self.assertDictEqual({
"sparse_column": tf.VarLenFeature(tf.string),
"sparse_column_for_embedding": tf.VarLenFeature(tf.string),
"real_valued_column1": tf.FixedLenFeature([1], dtype=tf.float32),
"real_valued_column2": tf.FixedLenFeature([5], dtype=tf.float32),
"real_valued_column_for_bucketization1":
tf.FixedLenFeature([1], dtype=tf.float32),
"real_valued_column_for_bucketization2":
tf.FixedLenFeature([4], dtype=tf.float32),
"cross_aaa": tf.VarLenFeature(tf.string),
"cross_bbb": tf.VarLenFeature(tf.string)}, config)
def testCreateFeatureSpec_RealValuedColumnWithDefaultValue(self):
real_valued_col1 = tf.contrib.layers.real_valued_column(
"real_valued_column1", default_value=2)
real_valued_col2 = tf.contrib.layers.real_valued_column(
"real_valued_column2", 5, default_value=4)
real_valued_col3 = tf.contrib.layers.real_valued_column(
"real_valued_column3", default_value=[8])
real_valued_col4 = tf.contrib.layers.real_valued_column(
"real_valued_column4", 3,
default_value=[1, 0, 6])
feature_columns = [real_valued_col1, real_valued_col2,
real_valued_col3, real_valued_col4]
config = tf.contrib.layers.create_feature_spec_for_parsing(feature_columns)
self.assertEqual(4, len(config))
self.assertDictEqual({
"real_valued_column1":
tf.FixedLenFeature([1], dtype=tf.float32, default_value=[2.]),
"real_valued_column2":
tf.FixedLenFeature([5], dtype=tf.float32,
default_value=[4., 4., 4., 4., 4.]),
"real_valued_column3":
tf.FixedLenFeature([1], dtype=tf.float32, default_value=[8.]),
"real_valued_column4":
tf.FixedLenFeature([3], dtype=tf.float32,
default_value=[1., 0., 6.])}, config)
def testMakePlaceHolderTensorsForBaseFeatures(self):
sparse_col = tf.contrib.layers.sparse_column_with_hash_bucket(
"sparse_column", hash_bucket_size=100)
real_valued_col = tf.contrib.layers.real_valued_column("real_valued_column",
5)
bucketized_col = tf.contrib.layers.bucketized_column(
tf.contrib.layers.real_valued_column(
"real_valued_column_for_bucketization"), [0, 4])
feature_columns = set([sparse_col, real_valued_col, bucketized_col])
placeholders = (
tf.contrib.layers.make_place_holder_tensors_for_base_features(
feature_columns))
self.assertEqual(3, len(placeholders))
self.assertTrue(isinstance(placeholders["sparse_column"],
tf.SparseTensor))
placeholder = placeholders["real_valued_column"]
self.assertTrue(placeholder.name.startswith(u"Placeholder"))
self.assertEqual(tf.float32, placeholder.dtype)
self.assertEqual([None, 5], placeholder.get_shape().as_list())
placeholder = placeholders["real_valued_column_for_bucketization"]
self.assertTrue(placeholder.name.startswith(u"Placeholder"))
self.assertEqual(tf.float32, placeholder.dtype)
self.assertEqual([None, 1], placeholder.get_shape().as_list())
if __name__ == "__main__":
tf.test.main() | unknown | codeparrot/codeparrot-clean | ||
// Code generated by go-swagger; DO NOT EDIT.
package plugin
// This file was generated by the swagger tool.
// Editing this file might prove futile when you re-run the swagger generate command
// Env env
//
// swagger:model Env
type Env struct {
// description
// Required: true
Description string `json:"Description"`
// name
// Required: true
Name string `json:"Name"`
// settable
// Required: true
Settable []string `json:"Settable"`
// value
// Required: true
Value *string `json:"Value"`
} | go | github | https://github.com/moby/moby | api/types/plugin/env.go |
use std::{
fmt,
marker::PhantomData,
net,
rc::Rc,
task::{Context, Poll},
};
use actix_codec::{AsyncRead, AsyncWrite, Framed};
use actix_rt::net::TcpStream;
use actix_service::{
fn_service, IntoServiceFactory, Service, ServiceFactory, ServiceFactoryExt as _,
};
use actix_utils::future::ready;
use futures_core::future::LocalBoxFuture;
use tracing::error;
use super::{codec::Codec, dispatcher::Dispatcher, ExpectHandler, UpgradeHandler};
use crate::{
body::{BoxBody, MessageBody},
config::ServiceConfig,
error::DispatchError,
service::HttpServiceHandler,
ConnectCallback, OnConnectData, Request, Response,
};
/// `ServiceFactory` implementation for HTTP1 transport
pub struct H1Service<T, S, B, X = ExpectHandler, U = UpgradeHandler> {
srv: S,
cfg: ServiceConfig,
expect: X,
upgrade: Option<U>,
on_connect_ext: Option<Rc<ConnectCallback<T>>>,
_phantom: PhantomData<B>,
}
impl<T, S, B> H1Service<T, S, B>
where
S: ServiceFactory<Request, Config = ()>,
S::Error: Into<Response<BoxBody>>,
S::InitError: fmt::Debug,
S::Response: Into<Response<B>>,
B: MessageBody,
{
/// Create new `HttpService` instance with config.
pub(crate) fn with_config<F: IntoServiceFactory<S, Request>>(
cfg: ServiceConfig,
service: F,
) -> Self {
H1Service {
cfg,
srv: service.into_factory(),
expect: ExpectHandler,
upgrade: None,
on_connect_ext: None,
_phantom: PhantomData,
}
}
}
impl<S, B, X, U> H1Service<TcpStream, S, B, X, U>
where
S: ServiceFactory<Request, Config = ()>,
S::Future: 'static,
S::Error: Into<Response<BoxBody>>,
S::InitError: fmt::Debug,
S::Response: Into<Response<B>>,
B: MessageBody,
X: ServiceFactory<Request, Config = (), Response = Request>,
X::Future: 'static,
X::Error: Into<Response<BoxBody>>,
X::InitError: fmt::Debug,
U: ServiceFactory<(Request, Framed<TcpStream, Codec>), Config = (), Response = ()>,
U::Future: 'static,
U::Error: fmt::Display + Into<Response<BoxBody>>,
U::InitError: fmt::Debug,
{
/// Create simple tcp stream service
pub fn tcp(
self,
) -> impl ServiceFactory<TcpStream, Config = (), Response = (), Error = DispatchError, InitError = ()>
{
fn_service(|io: TcpStream| {
let peer_addr = io.peer_addr().ok();
ready(Ok((io, peer_addr)))
})
.and_then(self)
}
}
#[cfg(feature = "openssl")]
mod openssl {
use actix_tls::accept::{
openssl::{
reexports::{Error as SslError, SslAcceptor},
Acceptor, TlsStream,
},
TlsError,
};
use super::*;
impl<S, B, X, U> H1Service<TlsStream<TcpStream>, S, B, X, U>
where
S: ServiceFactory<Request, Config = ()>,
S::Future: 'static,
S::Error: Into<Response<BoxBody>>,
S::InitError: fmt::Debug,
S::Response: Into<Response<B>>,
B: MessageBody,
X: ServiceFactory<Request, Config = (), Response = Request>,
X::Future: 'static,
X::Error: Into<Response<BoxBody>>,
X::InitError: fmt::Debug,
U: ServiceFactory<
(Request, Framed<TlsStream<TcpStream>, Codec>),
Config = (),
Response = (),
>,
U::Future: 'static,
U::Error: fmt::Display + Into<Response<BoxBody>>,
U::InitError: fmt::Debug,
{
/// Create OpenSSL based service.
pub fn openssl(
self,
acceptor: SslAcceptor,
) -> impl ServiceFactory<
TcpStream,
Config = (),
Response = (),
Error = TlsError<SslError, DispatchError>,
InitError = (),
> {
Acceptor::new(acceptor)
.map_init_err(|_| {
unreachable!("TLS acceptor service factory does not error on init")
})
.map_err(TlsError::into_service_error)
.map(|io: TlsStream<TcpStream>| {
let peer_addr = io.get_ref().peer_addr().ok();
(io, peer_addr)
})
.and_then(self.map_err(TlsError::Service))
}
}
}
#[cfg(feature = "rustls-0_20")]
mod rustls_0_20 {
use std::io;
use actix_service::ServiceFactoryExt as _;
use actix_tls::accept::{
rustls_0_20::{reexports::ServerConfig, Acceptor, TlsStream},
TlsError,
};
use super::*;
impl<S, B, X, U> H1Service<TlsStream<TcpStream>, S, B, X, U>
where
S: ServiceFactory<Request, Config = ()>,
S::Future: 'static,
S::Error: Into<Response<BoxBody>>,
S::InitError: fmt::Debug,
S::Response: Into<Response<B>>,
B: MessageBody,
X: ServiceFactory<Request, Config = (), Response = Request>,
X::Future: 'static,
X::Error: Into<Response<BoxBody>>,
X::InitError: fmt::Debug,
U: ServiceFactory<
(Request, Framed<TlsStream<TcpStream>, Codec>),
Config = (),
Response = (),
>,
U::Future: 'static,
U::Error: fmt::Display + Into<Response<BoxBody>>,
U::InitError: fmt::Debug,
{
/// Create Rustls v0.20 based service.
pub fn rustls(
self,
config: ServerConfig,
) -> impl ServiceFactory<
TcpStream,
Config = (),
Response = (),
Error = TlsError<io::Error, DispatchError>,
InitError = (),
> {
Acceptor::new(config)
.map_init_err(|_| {
unreachable!("TLS acceptor service factory does not error on init")
})
.map_err(TlsError::into_service_error)
.map(|io: TlsStream<TcpStream>| {
let peer_addr = io.get_ref().0.peer_addr().ok();
(io, peer_addr)
})
.and_then(self.map_err(TlsError::Service))
}
}
}
#[cfg(feature = "rustls-0_21")]
mod rustls_0_21 {
use std::io;
use actix_service::ServiceFactoryExt as _;
use actix_tls::accept::{
rustls_0_21::{reexports::ServerConfig, Acceptor, TlsStream},
TlsError,
};
use super::*;
impl<S, B, X, U> H1Service<TlsStream<TcpStream>, S, B, X, U>
where
S: ServiceFactory<Request, Config = ()>,
S::Future: 'static,
S::Error: Into<Response<BoxBody>>,
S::InitError: fmt::Debug,
S::Response: Into<Response<B>>,
B: MessageBody,
X: ServiceFactory<Request, Config = (), Response = Request>,
X::Future: 'static,
X::Error: Into<Response<BoxBody>>,
X::InitError: fmt::Debug,
U: ServiceFactory<
(Request, Framed<TlsStream<TcpStream>, Codec>),
Config = (),
Response = (),
>,
U::Future: 'static,
U::Error: fmt::Display + Into<Response<BoxBody>>,
U::InitError: fmt::Debug,
{
/// Create Rustls v0.21 based service.
pub fn rustls_021(
self,
config: ServerConfig,
) -> impl ServiceFactory<
TcpStream,
Config = (),
Response = (),
Error = TlsError<io::Error, DispatchError>,
InitError = (),
> {
Acceptor::new(config)
.map_init_err(|_| {
unreachable!("TLS acceptor service factory does not error on init")
})
.map_err(TlsError::into_service_error)
.map(|io: TlsStream<TcpStream>| {
let peer_addr = io.get_ref().0.peer_addr().ok();
(io, peer_addr)
})
.and_then(self.map_err(TlsError::Service))
}
}
}
#[cfg(feature = "rustls-0_22")]
mod rustls_0_22 {
use std::io;
use actix_service::ServiceFactoryExt as _;
use actix_tls::accept::{
rustls_0_22::{reexports::ServerConfig, Acceptor, TlsStream},
TlsError,
};
use super::*;
impl<S, B, X, U> H1Service<TlsStream<TcpStream>, S, B, X, U>
where
S: ServiceFactory<Request, Config = ()>,
S::Future: 'static,
S::Error: Into<Response<BoxBody>>,
S::InitError: fmt::Debug,
S::Response: Into<Response<B>>,
B: MessageBody,
X: ServiceFactory<Request, Config = (), Response = Request>,
X::Future: 'static,
X::Error: Into<Response<BoxBody>>,
X::InitError: fmt::Debug,
U: ServiceFactory<
(Request, Framed<TlsStream<TcpStream>, Codec>),
Config = (),
Response = (),
>,
U::Future: 'static,
U::Error: fmt::Display + Into<Response<BoxBody>>,
U::InitError: fmt::Debug,
{
/// Create Rustls v0.22 based service.
pub fn rustls_0_22(
self,
config: ServerConfig,
) -> impl ServiceFactory<
TcpStream,
Config = (),
Response = (),
Error = TlsError<io::Error, DispatchError>,
InitError = (),
> {
Acceptor::new(config)
.map_init_err(|_| {
unreachable!("TLS acceptor service factory does not error on init")
})
.map_err(TlsError::into_service_error)
.map(|io: TlsStream<TcpStream>| {
let peer_addr = io.get_ref().0.peer_addr().ok();
(io, peer_addr)
})
.and_then(self.map_err(TlsError::Service))
}
}
}
#[cfg(feature = "rustls-0_23")]
mod rustls_0_23 {
use std::io;
use actix_service::ServiceFactoryExt as _;
use actix_tls::accept::{
rustls_0_23::{reexports::ServerConfig, Acceptor, TlsStream},
TlsError,
};
use super::*;
impl<S, B, X, U> H1Service<TlsStream<TcpStream>, S, B, X, U>
where
S: ServiceFactory<Request, Config = ()>,
S::Future: 'static,
S::Error: Into<Response<BoxBody>>,
S::InitError: fmt::Debug,
S::Response: Into<Response<B>>,
B: MessageBody,
X: ServiceFactory<Request, Config = (), Response = Request>,
X::Future: 'static,
X::Error: Into<Response<BoxBody>>,
X::InitError: fmt::Debug,
U: ServiceFactory<
(Request, Framed<TlsStream<TcpStream>, Codec>),
Config = (),
Response = (),
>,
U::Future: 'static,
U::Error: fmt::Display + Into<Response<BoxBody>>,
U::InitError: fmt::Debug,
{
/// Create Rustls v0.23 based service.
pub fn rustls_0_23(
self,
config: ServerConfig,
) -> impl ServiceFactory<
TcpStream,
Config = (),
Response = (),
Error = TlsError<io::Error, DispatchError>,
InitError = (),
> {
Acceptor::new(config)
.map_init_err(|_| {
unreachable!("TLS acceptor service factory does not error on init")
})
.map_err(TlsError::into_service_error)
.map(|io: TlsStream<TcpStream>| {
let peer_addr = io.get_ref().0.peer_addr().ok();
(io, peer_addr)
})
.and_then(self.map_err(TlsError::Service))
}
}
}
impl<T, S, B, X, U> H1Service<T, S, B, X, U>
where
S: ServiceFactory<Request, Config = ()>,
S::Error: Into<Response<BoxBody>>,
S::Response: Into<Response<B>>,
S::InitError: fmt::Debug,
B: MessageBody,
{
pub fn expect<X1>(self, expect: X1) -> H1Service<T, S, B, X1, U>
where
X1: ServiceFactory<Request, Response = Request>,
X1::Error: Into<Response<BoxBody>>,
X1::InitError: fmt::Debug,
{
H1Service {
expect,
cfg: self.cfg,
srv: self.srv,
upgrade: self.upgrade,
on_connect_ext: self.on_connect_ext,
_phantom: PhantomData,
}
}
pub fn upgrade<U1>(self, upgrade: Option<U1>) -> H1Service<T, S, B, X, U1>
where
U1: ServiceFactory<(Request, Framed<T, Codec>), Response = ()>,
U1::Error: fmt::Display,
U1::InitError: fmt::Debug,
{
H1Service {
upgrade,
cfg: self.cfg,
srv: self.srv,
expect: self.expect,
on_connect_ext: self.on_connect_ext,
_phantom: PhantomData,
}
}
/// Set on connect callback.
pub(crate) fn on_connect_ext(mut self, f: Option<Rc<ConnectCallback<T>>>) -> Self {
self.on_connect_ext = f;
self
}
}
impl<T, S, B, X, U> ServiceFactory<(T, Option<net::SocketAddr>)> for H1Service<T, S, B, X, U>
where
T: AsyncRead + AsyncWrite + Unpin + 'static,
S: ServiceFactory<Request, Config = ()>,
S::Future: 'static,
S::Error: Into<Response<BoxBody>>,
S::Response: Into<Response<B>>,
S::InitError: fmt::Debug,
B: MessageBody,
X: ServiceFactory<Request, Config = (), Response = Request>,
X::Future: 'static,
X::Error: Into<Response<BoxBody>>,
X::InitError: fmt::Debug,
U: ServiceFactory<(Request, Framed<T, Codec>), Config = (), Response = ()>,
U::Future: 'static,
U::Error: fmt::Display + Into<Response<BoxBody>>,
U::InitError: fmt::Debug,
{
type Response = ();
type Error = DispatchError;
type Config = ();
type Service = H1ServiceHandler<T, S::Service, B, X::Service, U::Service>;
type InitError = ();
type Future = LocalBoxFuture<'static, Result<Self::Service, Self::InitError>>;
fn new_service(&self, _: ()) -> Self::Future {
let service = self.srv.new_service(());
let expect = self.expect.new_service(());
let upgrade = self.upgrade.as_ref().map(|s| s.new_service(()));
let on_connect_ext = self.on_connect_ext.clone();
let cfg = self.cfg.clone();
Box::pin(async move {
let expect = expect.await.map_err(|err| {
tracing::error!("Initialization of HTTP expect service error: {err:?}");
})?;
let upgrade = match upgrade {
Some(upgrade) => {
let upgrade = upgrade.await.map_err(|err| {
tracing::error!("Initialization of HTTP upgrade service error: {err:?}");
})?;
Some(upgrade)
}
None => None,
};
let service = service
.await
.map_err(|err| error!("Initialization of HTTP service error: {err:?}"))?;
Ok(H1ServiceHandler::new(
cfg,
service,
expect,
upgrade,
on_connect_ext,
))
})
}
}
/// `Service` implementation for HTTP/1 transport
pub type H1ServiceHandler<T, S, B, X, U> = HttpServiceHandler<T, S, B, X, U>;
impl<T, S, B, X, U> Service<(T, Option<net::SocketAddr>)> for HttpServiceHandler<T, S, B, X, U>
where
T: AsyncRead + AsyncWrite + Unpin,
S: Service<Request>,
S::Error: Into<Response<BoxBody>>,
S::Response: Into<Response<B>>,
B: MessageBody,
X: Service<Request, Response = Request>,
X::Error: Into<Response<BoxBody>>,
U: Service<(Request, Framed<T, Codec>), Response = ()>,
U::Error: fmt::Display + Into<Response<BoxBody>>,
{
type Response = ();
type Error = DispatchError;
type Future = Dispatcher<T, S, B, X, U>;
fn poll_ready(&self, cx: &mut Context<'_>) -> Poll<Result<(), Self::Error>> {
self._poll_ready(cx).map_err(|err| {
error!("HTTP/1 service readiness error: {:?}", err);
DispatchError::Service(err)
})
}
fn call(&self, (io, addr): (T, Option<net::SocketAddr>)) -> Self::Future {
let conn_data = OnConnectData::from_io(&io, self.on_connect_ext.as_deref());
Dispatcher::new(io, Rc::clone(&self.flow), self.cfg.clone(), addr, conn_data)
}
} | rust | github | https://github.com/actix/actix-web | actix-http/src/h1/service.rs |
from pyasn1 import error
class TagMap:
def __init__(self, posMap={}, negMap={}, defType=None):
self.__posMap = posMap.copy()
self.__negMap = negMap.copy()
self.__defType = defType
def __contains__(self, tagSet):
return tagSet in self.__posMap or \
self.__defType is not None and tagSet not in self.__negMap
def __getitem__(self, tagSet):
if tagSet in self.__posMap:
return self.__posMap[tagSet]
elif tagSet in self.__negMap:
raise error.PyAsn1Error('Key in negative map')
elif self.__defType is not None:
return self.__defType
else:
raise KeyError()
def __repr__(self):
s = '%r/%r' % (self.__posMap, self.__negMap)
if self.__defType is not None:
s = s + '/%r' % (self.__defType,)
return s
def clone(self, parentType, tagMap, uniq=False):
if self.__defType is not None and tagMap.getDef() is not None:
raise error.PyAsn1Error('Duplicate default value at %s' % self)
if tagMap.getDef() is not None:
defType = tagMap.getDef()
else:
defType = self.__defType
posMap = self.__posMap.copy()
for k in tagMap.getPosMap():
if uniq and k in posMap:
raise error.PyAsn1Error('Duplicate positive key %s' % k)
posMap[k] = parentType
negMap = self.__negMap.copy()
negMap.update(tagMap.getNegMap())
return self.__class__(
posMap, negMap, defType,
)
def getPosMap(self): return self.__posMap.copy()
def getNegMap(self): return self.__negMap.copy()
def getDef(self): return self.__defType | unknown | codeparrot/codeparrot-clean | ||
# Copyright (C) 2010 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import logging
import time
import urllib2
_log = logging.getLogger(__name__)
class NetworkTimeout(Exception):
def __str__(self):
return 'NetworkTimeout'
class NetworkTransaction(object):
def __init__(self, initial_backoff_seconds=10, grown_factor=1.5, timeout_seconds=(10 * 60), convert_404_to_None=False):
self._initial_backoff_seconds = initial_backoff_seconds
self._grown_factor = grown_factor
self._timeout_seconds = timeout_seconds
self._convert_404_to_None = convert_404_to_None
def run(self, request):
self._total_sleep = 0
self._backoff_seconds = self._initial_backoff_seconds
while True:
try:
return request()
except urllib2.HTTPError, e:
if self._convert_404_to_None and e.code == 404:
return None
self._check_for_timeout()
_log.warn("Received HTTP status %s loading \"%s\". Retrying in %s seconds..." % (e.code, e.filename, self._backoff_seconds))
self._sleep()
def _check_for_timeout(self):
if self._total_sleep + self._backoff_seconds > self._timeout_seconds:
raise NetworkTimeout()
def _sleep(self):
time.sleep(self._backoff_seconds)
self._total_sleep += self._backoff_seconds
self._backoff_seconds *= self._grown_factor | unknown | codeparrot/codeparrot-clean | ||
# This code is supporting material for the book
# Building Machine Learning Systems with Python
# by Willi Richert and Luis Pedro Coelho
# published by PACKT Publishing
#
# It is made available under the MIT License
# This script fits several forms of penalized regression
from __future__ import print_function
import numpy as np
from sklearn.cross_validation import KFold
from sklearn.linear_model import LinearRegression, ElasticNet, Lasso, Ridge
from sklearn.metrics import r2_score
from sklearn.datasets import load_boston
boston = load_boston()
x = boston.data
y = boston.target
for name, met in [
('linear regression', LinearRegression()),
('lasso()', Lasso()),
('elastic-net(.5)', ElasticNet(alpha=0.5)),
('lasso(.5)', Lasso(alpha=0.5)),
('ridge(.5)', Ridge(alpha=0.5)),
]:
# Fit on the whole data:
met.fit(x, y)
# Predict on the whole data:
p = met.predict(x)
r2_train = r2_score(y, p)
# Now, we use 10 fold cross-validation to estimate generalization error
kf = KFold(len(x), n_folds=5)
p = np.zeros_like(y)
for train, test in kf:
met.fit(x[train], y[train])
p[test] = met.predict(x[test])
r2_cv = r2_score(y, p)
print('Method: {}'.format(name))
print('R2 on training: {}'.format(r2_train))
print('R2 on 5-fold CV: {}'.format(r2_cv))
print()
print() | unknown | codeparrot/codeparrot-clean | ||
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Addons modules by CLEARCORP S.A.
# Copyright (C) 2009-TODAY CLEARCORP S.A. (<http://clearcorp.co.cr>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
############################################################################## | unknown | codeparrot/codeparrot-clean | ||
List_of_plants = []
List_of_produce = []
class ambrosia():
name = 'ambrosia'
plantname = 'Ambrosia Vulgaris'
Description = "These seeds grow into common ambrosia, a plant grown by and from medicine."
icon_state = 'seed-ambrosiavulgaris'
lifespan = 60
endurance = 25
production = 6
plant_yield = 6
potency = 5
weed_growth_rate = 1
weed_resistance = 5
dead_Sprite = 'ambrosia-dead'
genes = ["Perennial_Growth"]
mutates_into = ["ambrosia_deus"]
reagents_add = {'C2/aiuri': 0.1,'C2/libital': 0.1 ,'space_drugs': 0.15,'vitamin': 0.04,'nutriment': 0.05,'toxin': 0.1}
species = 'ambrosiavulgaris'
List_of_plants.append(ambrosia)
class ambrosia_deus():
name = 'ambrosia_deus'
plantname = 'Ambrosia Deus'
Description = "These seeds grow into ambrosia deus. Could it be the food of the gods..?"
icon_state = 'seed-ambrosiadeus'
lifespan = 60
endurance = 25
production = 6
plant_yield = 6
potency = 5
weed_growth_rate = 1
weed_resistance = 5
dead_Sprite = 'ambrosia-dead'
genes = ["Perennial_Growth"]
mutates_into = ["ambrosia_gaia"]
reagents_add = {'omnizine': 0.15,'synaptizine': 0.15,'space_drugs': 0.1,'vitamin': 0.04,'nutriment': 0.05}
species = 'ambrosiadeus'
List_of_plants.append(ambrosia_deus)
class ambrosia_gaia():
name = 'ambrosia_gaia'
plantname = 'Ambrosia Gaia'
Description = "These seeds grow into ambrosia gaia, filled with infinite potential."
icon_state = 'seed-ambrosia_gaia'
lifespan = 60
endurance = 25
production = 6
plant_yield = 6
potency = 5
weed_growth_rate = 1
weed_resistance = 5
dead_Sprite = 'ambrosia-dead'
genes = []
mutates_into = ["ambrosia_deus"]
reagents_add = {'earthsblood': 0.05,'nutriment': 0.06,'vitamin': 0.05}
species = 'ambrosia_gaia'
List_of_plants.append(ambrosia_gaia)
class apple():
name = 'apple'
plantname = 'Apple Tree'
Description = "These seeds grow into apple trees."
icon_state = 'seed-apple'
lifespan = 55
endurance = 35
production = 6
plant_yield = 5
potency = 10
weed_growth_rate = 1
weed_resistance = 5
growing_icon = 'growing_fruits'
Grown_Sprite = 'apple-grow'
dead_Sprite = 'apple-dead'
genes = ["Perennial_Growth"]
mutates_into = ["apple_gold"]
reagents_add = {'vitamin': 0.04,'nutriment': 0.1}
species = 'apple'
List_of_plants.append(apple)
class apple_gold():
name = 'apple_gold'
plantname = 'Golden Apple Tree'
Description = "These seeds grow into golden apple trees. Good thing there are no firebirds in space."
icon_state = 'seed-goldapple'
lifespan = 55
endurance = 35
production = 10
plant_yield = 5
potency = 10
weed_growth_rate = 1
weed_resistance = 5
growing_icon = 'growing_fruits'
Grown_Sprite = 'apple-grow'
dead_Sprite = 'apple-dead'
genes = ["Perennial_Growth"]
mutates_into = [""]
reagents_add = {'gold': 0.2,'vitamin': 0.04,'nutriment': 0.1}
species = 'goldapple'
List_of_plants.append(apple_gold)
class banana():
name = 'banana'
plantname = 'Banana Tree'
Description = "They're seeds that grow into banana trees. When grown, keep away from clown."
icon_state = 'seed-banana'
lifespan = 50
endurance = 30
production = 6
plant_yield = 3
potency = 10
weed_growth_rate = 1
weed_resistance = 5
growing_icon = 'growing_fruits'
dead_Sprite = 'banana-dead'
genes = ["Slippery Skin", "Perennial_Growth"]
mutates_into = ["banana_mime","banana_bluespace"]
reagents_add = {'banana': 0.1,'potassium': 0.1,'vitamin': 0.04,'nutriment': 0.02}
species = 'banana'
List_of_plants.append(banana)
class banana_mime():
name = 'banana_mime'
plantname = 'Mimana Tree'
Description = "They're seeds that grow into mimana trees. When grown, keep away from mime."
icon_state = 'seed-mimana'
lifespan = 50
endurance = 30
production = 6
plant_yield = 3
potency = 10
weed_growth_rate = 1
weed_resistance = 5
growing_icon = 'growing_fruits'
dead_Sprite = 'banana-dead'
genes = ["Slippery Skin", "Perennial_Growth"]
mutates_into = [""]
reagents_add = {'nothing': 0.1,'toxin/mutetoxin': 0.1,'nutriment': 0.02}
species = 'mimana'
List_of_plants.append(banana_mime)
class banana_bluespace():
name = 'banana_bluespace'
plantname = 'Bluespace Banana Tree'
Description = "They're seeds that grow into bluespace banana trees. When grown, keep away from bluespace clown."
icon_state = 'seed-banana-blue'
lifespan = 50
endurance = 30
production = 6
plant_yield = 3
potency = 10
weed_growth_rate = 1
weed_resistance = 5
growing_icon = 'growing_fruits'
Grown_Sprite = 'banana-grow'
dead_Sprite = 'banana-dead'
genes = ["Slippery Skin", "Bluespace Activity", "Perennial_Growth"]
mutates_into = [""]
reagents_add = {'bluespace': 0.2,'banana': 0.1,'vitamin': 0.04,'nutriment': 0.02}
species = 'bluespacebanana'
List_of_plants.append(banana_bluespace)
class soya():
name = 'soya'
plantname = 'Soybean Plants'
Description = "These seeds grow into soybean plants."
icon_state = 'seed-soybean'
lifespan = 25
endurance = 15
production = 4
plant_yield = 3
potency = 15
weed_growth_rate = 1
weed_resistance = 5
growing_icon = 'growing_vegetables'
Grown_Sprite = 'soybean-grow'
dead_Sprite = 'soybean-dead'
genes = ["Perennial_Growth"]
mutates_into = ["soya_koi"]
reagents_add = {'vitamin': 0.04,'nutriment': 0.05,'cooking_oil': 0.03}
species = 'soybean'
List_of_plants.append(soya)
class soya_koi():
name = 'soya_koi'
plantname = 'Koibean Plants'
Description = "These seeds grow into koibean plants."
icon_state = 'seed-koibean'
lifespan = 25
endurance = 15
production = 4
plant_yield = 3
potency = 10
weed_growth_rate = 1
weed_resistance = 5
growing_icon = 'growing_vegetables'
Grown_Sprite = 'soybean-grow'
dead_Sprite = 'soybean-dead'
genes = ["Perennial_Growth"]
mutates_into = [""]
reagents_add = {'toxin/carpotoxin': 0.1,'vitamin': 0.04,'nutriment': 0.05}
species = 'koibean'
List_of_plants.append(soya_koi)
class berry():
name = 'berry'
plantname = 'Berry Bush'
Description = "These seeds grow into berry bushes."
icon_state = 'seed-berry'
lifespan = 20
endurance = 15
production = 5
plant_yield = 2
potency = 10
weed_growth_rate = 1
weed_resistance = 5
growing_icon = 'growing_fruits'
Grown_Sprite = 'berry-grow'
dead_Sprite = 'berry-dead'
genes = ["Perennial_Growth"]
mutates_into = ["berry_glow","berry_poison"]
reagents_add = {'vitamin': 0.04,'nutriment': 0.1}
species = 'berry'
List_of_plants.append(berry)
class berry_poison():
name = 'berry_poison'
plantname = 'Poison-Berry Bush'
Description = "These seeds grow into poison-berry bushes."
icon_state = 'seed-poisonberry'
lifespan = 20
endurance = 15
production = 5
plant_yield = 2
potency = 10
weed_growth_rate = 1
weed_resistance = 5
growing_icon = 'growing_fruits'
Grown_Sprite = 'berry-grow'
dead_Sprite = 'berry-dead'
genes = ["Perennial_Growth"]
mutates_into = ["berry_death"]
reagents_add = {'toxin/cyanide': 0.15,'toxin/staminatoxin': 0.2,'vitamin': 0.04,'nutriment': 0.1}
species = 'poisonberry'
List_of_plants.append(berry_poison)
class berry_death():
name = 'berry_death'
plantname = 'Death Berry Bush'
Description = "These seeds grow into death berries."
icon_state = 'seed-deathberry'
lifespan = 30
endurance = 15
production = 5
plant_yield = 2
potency = 50
weed_growth_rate = 1
weed_resistance = 5
growing_icon = 'growing_fruits'
Grown_Sprite = 'berry-grow'
dead_Sprite = 'berry-dead'
genes = ["Perennial_Growth"]
mutates_into = [""]
reagents_add = {'toxin/coniine': 0.08,'toxin/staminatoxin': 0.1,'vitamin': 0.04,'nutriment': 0.1}
species = 'deathberry'
List_of_plants.append(berry_death)
class berry_glow():
name = 'berry_glow'
plantname = 'Glow-Berry Bush'
Description = "These seeds grow into glow-berry bushes."
icon_state = 'seed-glowberry'
lifespan = 30
endurance = 25
production = 5
plant_yield = 2
potency = 10
weed_growth_rate = 1
weed_resistance = 5
growing_icon = 'growing_fruits'
Grown_Sprite = 'berry-grow'
dead_Sprite = 'berry-dead'
genes = ["Bioluminescence/white", "Perennial_Growth"]
mutates_into = [""]
reagents_add = {'uranium': 0.25,'iodine': 0.2,'vitamin': 0.04,'nutriment': 0.1}
species = 'glowberry'
List_of_plants.append(berry_glow)
class cherry():
name = 'cherry'
plantname = 'Cherry Tree'
Description = "Careful not to crack a tooth on one... That'd be the pits."
icon_state = 'seed-cherry'
lifespan = 35
endurance = 35
production = 5
plant_yield = 3
potency = 10
weed_growth_rate = 1
weed_resistance = 5
growing_icon = 'growing_fruits'
Grown_Sprite = 'cherry-grow'
dead_Sprite = 'cherry-dead'
genes = ["Perennial_Growth"]
mutates_into = ["cherry_blue","cherry_bulb"]
reagents_add = {'nutriment': 0.07,'sugar': 0.07}
species = 'cherry'
List_of_plants.append(cherry)
class cherry_blue():
name = 'cherry_blue'
plantname = 'Blue Cherry Tree'
Description = "The blue kind of cherries."
icon_state = 'seed-bluecherry'
lifespan = 35
endurance = 35
production = 5
plant_yield = 3
potency = 10
weed_growth_rate = 1
weed_resistance = 5
growing_icon = 'growing_fruits'
Grown_Sprite = 'cherry-grow'
dead_Sprite = 'cherry-dead'
genes = ["Perennial_Growth"]
mutates_into = [""]
reagents_add = {'nutriment': 0.07,'sugar': 0.07}
species = 'bluecherry'
List_of_plants.append(cherry_blue)
class cherry_bulb():
name = 'cherry_bulb'
plantname = 'Cherry Bulb Tree'
Description = "The glowy kind of cherries."
icon_state = 'seed-cherrybulb'
lifespan = 35
endurance = 35
production = 5
plant_yield = 3
potency = 10
weed_growth_rate = 1
weed_resistance = 5
growing_icon = 'growing_fruits'
Grown_Sprite = 'cherry-grow'
dead_Sprite = 'cherry-dead'
genes = ["Perennial_Growth", "Bioluminescence/pink"]
mutates_into = [""]
reagents_add = {'nutriment': 0.07,'sugar': 0.07}
species = 'cherrybulb'
List_of_plants.append(cherry_bulb)
class grape():
name = 'grape'
plantname = 'Grape Vine'
Description = "These seeds grow into grape vines."
icon_state = 'seed-grapes'
lifespan = 50
endurance = 25
production = 5
plant_yield = 4
potency = 10
weed_growth_rate = 1
weed_resistance = 5
growing_icon = 'growing_fruits'
Grown_Sprite = 'grape-grow'
dead_Sprite = 'grape-dead'
genes = ["Perennial_Growth"]
mutates_into = ["grape_green"]
reagents_add = {'vitamin': 0.04,'nutriment': 0.1,'sugar': 0.1}
species = 'grape'
List_of_plants.append(grape)
class grape_green():
name = 'grape_green'
plantname = 'Green-Grape Vine'
Description = "These seeds grow into green-grape vines."
icon_state = 'seed-greengrapes'
lifespan = 50
endurance = 25
production = 5
plant_yield = 4
potency = 10
weed_growth_rate = 1
weed_resistance = 5
growing_icon = 'growing_fruits'
Grown_Sprite = 'grape-grow'
dead_Sprite = 'grape-dead'
genes = ["Perennial_Growth"]
reagents_add = {'vitamin': 0.04,'nutriment': 0.1,'sugar': 0.1,'C2/aiuri': 0.2}
species = 'greengrape'
List_of_plants.append(grape_green)
class cannabis():
name = 'cannabis'
plantname = 'Cannabis Plant'
Description = "Taxable."
icon_state = 'seed-cannabis'
lifespan = 25
endurance = 15
production = 6
plant_yield = 3
potency = 20
weed_growth_rate = 1
weed_resistance = 5
growing_icon = 'goon/icons/obj/hydroponics'
Grown_Sprite = 'cannabis-grow'
dead_Sprite = 'cannabis-dead'
genes = ["Perennial_Growth"]
mutates_into = ["cannabis_rainbow"]
reagents_add = {'space_drugs': 0.15,'toxin/lipolicide': 0.35}
species = 'cannabis'
List_of_plants.append(cannabis)
class cannabis_rainbow():
name = 'cannabis_rainbow'
plantname = 'Rainbow Weed'
Description = "These seeds grow into rainbow weed. Groovy... and also highly addictive."
icon_state = 'seed-megacannabis'
lifespan = 25
endurance = 15
production = 6
plant_yield = 3
potency = 20
weed_growth_rate = 1
weed_resistance = 5
growing_icon = 'goon/icons/obj/hydroponics'
Grown_Sprite = 'cannabis-grow'
dead_Sprite = 'cannabis-dead'
genes = ["Perennial_Growth"]
mutates_into = [""]
reagents_add = {'colorful_reagent': 0.05,'psicodine': 0.03,'happiness': 0.1,'toxin/mindbreaker': 0.1,'toxin/lipolicide': 0.15}
species = 'megacannabis'
List_of_plants.append(cannabis_rainbow)
class cannabis_death():
name = 'cannabis_death'
plantname = 'Deathweed'
Description = "These seeds grow into deathweed. Not groovy."
icon_state = 'seed-blackcannabis'
lifespan = 25
endurance = 15
production = 6
plant_yield = 3
potency = 20
weed_growth_rate = 1
weed_resistance = 5
growing_icon = 'goon/icons/obj/hydroponics'
Grown_Sprite = 'cannabis-grow'
dead_Sprite = 'cannabis-dead'
genes = ["Perennial_Growth"]
mutates_into = [""]
reagents_add = {'toxin/cyanide': 0.35,'space_drugs': 0.15,'toxin/lipolicide': 0.15}
species = 'blackcannabis'
List_of_plants.append(cannabis_death)
class cannabis_white():
name = 'cannabis_white'
plantname = 'Lifeweed'
Description = "I will give unto him that is munchies of the fountain of the cravings of life, freely."
icon_state = 'seed-whitecannabis'
lifespan = 25
endurance = 15
production = 6
plant_yield = 3
potency = 20
weed_growth_rate = 1
weed_resistance = 5
growing_icon = 'goon/icons/obj/hydroponics'
Grown_Sprite = 'cannabis-grow'
dead_Sprite = 'cannabis-dead'
genes = ["Perennial_Growth"]
mutates_into = [""]
reagents_add = {'omnizine': 0.35,'space_drugs': 0.15,'toxin/lipolicide': 0.15}
species = 'whitecannabis'
List_of_plants.append(cannabis_white)
class cannabis_ultimate():
name = 'cannabis_ultimate'
plantname = 'Omega Weed'
Description = "These seeds grow into omega weed."
icon_state = 'seed-ocannabis'
lifespan = 25
endurance = 15
production = 6
plant_yield = 3
potency = 20
weed_growth_rate = 1
weed_resistance = 5
growing_icon = 'goon/icons/obj/hydroponics'
Grown_Sprite = 'cannabis-grow'
dead_Sprite = 'cannabis-dead'
genes = ["Perennial_Growth", "Bioluminescence/green"]
mutates_into = [""]
reagents_add = {'space_drugs': 0.3}
species = 'ocannabis'
List_of_plants.append(cannabis_ultimate)
class wheat():
name = 'wheat'
plantname = 'Wheat Stalks'
Description = "These may, or may not, grow into wheat."
icon_state = 'seed-wheat'
lifespan = 25
endurance = 15
production = 1
plant_yield = 4
potency = 15
weed_growth_rate = 1
weed_resistance = 5
dead_Sprite = 'wheat-dead'
mutates_into = ["wheat_oat","wheat_meat"]
reagents_add = {'nutriment': 0.04}
species = 'wheat'
List_of_plants.append(wheat)
class wheat_oat():
name = 'wheat_oat'
plantname = 'Oat Stalks'
Description = "These may, or may not, grow into oat."
icon_state = 'seed-oat'
lifespan = 25
endurance = 15
production = 1
plant_yield = 4
potency = 15
weed_growth_rate = 1
weed_resistance = 5
dead_Sprite = 'wheat-dead'
mutates_into = [""]
reagents_add = {'nutriment': 0.04}
species = 'oat'
List_of_plants.append(wheat_oat)
class wheat_rice():
name = 'wheat_rice'
plantname = 'Rice Stalks'
Description = "These may, or may not, grow into rice."
icon_state = 'seed-rice'
lifespan = 25
endurance = 15
production = 1
plant_yield = 4
potency = 15
weed_growth_rate = 1
weed_resistance = 5
dead_Sprite = 'wheat-dead'
mutates_into = [""]
reagents_add = {'nutriment': 0.04}
species = 'rice'
List_of_plants.append(wheat_rice)
class wheat_meat():
name = 'wheat_meat'
plantname = 'Meatwheat'
Description = "If you ever wanted to drive a vegetarian to insanity, here's how."
icon_state = 'seed-meatwheat'
lifespan = 25
endurance = 15
production = 1
plant_yield = 4
potency = 15
weed_growth_rate = 1
weed_resistance = 5
dead_Sprite = 'wheat-dead'
mutates_into = [""]
reagents_add = {'nutriment': 0.04}
species = 'meatwheat'
List_of_plants.append(wheat_meat)
class chili():
name = 'chili'
plantname = 'Chili Plants'
Description = "These seeds grow into chili plants. HOT! HOT! HOT!"
icon_state = 'seed-chili'
lifespan = 20
endurance = 15
production = 5
plant_yield = 4
potency = 20
weed_growth_rate = 1
weed_resistance = 5
growing_icon = 'growing_vegetables'
Grown_Sprite = 'chili-grow'
dead_Sprite = 'chili-dead'
genes = ["Perennial_Growth"]
mutates_into = ["chili_ice","chili_ghost"]
reagents_add = {'capsaicin': 0.25,'vitamin': 0.04,'nutriment': 0.04}
species = 'chili'
List_of_plants.append(chili)
class chili_ice():
name = 'chili_ice'
plantname = 'Chilly Pepper Plants'
Description = "These seeds grow into chilly pepper plants."
icon_state = 'seed-icepepper'
lifespan = 25
endurance = 15
production = 4
plant_yield = 4
potency = 20
weed_growth_rate = 1
weed_resistance = 5
growing_icon = 'growing_vegetables'
Grown_Sprite = 'chili-grow'
dead_Sprite = 'chili-dead'
genes = ["Perennial_Growth"]
mutates_into = [""]
reagents_add = {'frostoil': 0.25,'vitamin': 0.02,'nutriment': 0.02}
species = 'chiliice'
List_of_plants.append(chili_ice)
class chili_ghost():
name = 'chili_ghost'
plantname = 'Ghost Chili Plants'
Description = "These seeds grow into a chili said to be the hottest in the galaxy."
icon_state = 'seed-chilighost'
lifespan = 20
endurance = 10
production = 10
plant_yield = 3
potency = 20
weed_growth_rate = 1
weed_resistance = 5
growing_icon = 'growing_vegetables'
Grown_Sprite = 'chili-grow'
dead_Sprite = 'chili-dead'
genes = ["Perennial_Growth"]
mutates_into = [""]
reagents_add = {'condensedcapsaicin': 0.3,'capsaicin': 0.55,'nutriment': 0.04}
species = 'chilighost'
List_of_plants.append(chili_ghost)
class lime():
name = 'lime'
plantname = 'Lime Tree'
Description = "These are very sour seeds."
icon_state = 'seed-lime'
lifespan = 55
endurance = 50
production = 6
plant_yield = 4
potency = 15
weed_growth_rate = 1
weed_resistance = 5
growing_icon = 'growing_fruits'
genes = ["Perennial_Growth"]
mutates_into = ["orange"]
reagents_add = {'vitamin': 0.04,'nutriment': 0.05}
species = 'lime'
List_of_plants.append(lime)
class orange():
name = 'orange'
plantname = 'Orange Tree'
Description = "Sour seeds."
icon_state = 'seed-orange'
lifespan = 60
endurance = 50
production = 6
plant_yield = 5
potency = 20
weed_growth_rate = 1
weed_resistance = 5
growing_icon = 'growing_fruits'
Grown_Sprite = 'lime-grow'
dead_Sprite = 'lime-dead'
genes = ["Perennial_Growth"]
mutates_into = ["lime","orange_3d"]
reagents_add = {'vitamin': 0.04,'nutriment': 0.05}
species = 'orange'
List_of_plants.append(orange)
class lemon():
name = 'lemon'
plantname = 'Lemon Tree'
Description = "These are sour seeds."
icon_state = 'seed-lemon'
lifespan = 55
endurance = 45
production = 6
plant_yield = 4
potency = 10
weed_growth_rate = 1
weed_resistance = 5
growing_icon = 'growing_fruits'
Grown_Sprite = 'lime-grow'
dead_Sprite = 'lime-dead'
genes = ["Perennial_Growth"]
mutates_into = ["firelemon"]
reagents_add = {'vitamin': 0.04,'nutriment': 0.05}
species = 'lemon'
List_of_plants.append(lemon)
class firelemon():
name = 'firelemon '
plantname = 'Combustible Lemon Tree'
Description = "When life gives you lemons, don't make lemonade. Make life take the lemons back! Get mad! I don't want your damn lemons!"
icon_state = 'seed-firelemon'
lifespan = 55
endurance = 45
production = 6
plant_yield = 4
potency = 10
weed_growth_rate = 1
weed_resistance = 5
growing_icon = 'growing_fruits'
Grown_Sprite = 'lime-grow'
dead_Sprite = 'lime-dead'
genes = ["Perennial_Growth"]
reagents_add = {'nutriment': 0.05}
species = 'firelemon'
List_of_plants.append(firelemon)
class orange_3d():
name = 'orange_3d'
plantname = 'Extradimensional Orange Tree'
Description = "Polygonal seeds."
icon_state = 'seed-orange'
lifespan = 60
endurance = 50
production = 6
plant_yield = 5
potency = 20
weed_growth_rate = 1
weed_resistance = 5
growing_icon = 'growing_fruits'
Grown_Sprite = 'lime-grow'
dead_Sprite = 'lime-dead'
genes = ["Perennial_Growth"]
reagents_add = {'vitamin': 0.04,'nutriment': 0.05,'haloperidol': 0.15}
species = 'orange'
List_of_plants.append(orange_3d)
class cocoapod():
name = 'cocoapod'
plantname = 'Cocao Tree'
Description = "These seeds grow into cacao trees. They look fattening."
icon_state = 'seed-cocoapod'
lifespan = 20
endurance = 15
production = 5
plant_yield = 2
potency = 10
weed_growth_rate = 1
weed_resistance = 5
growing_icon = 'growing_fruits'
Grown_Sprite = 'cocoapod-grow'
dead_Sprite = 'cocoapod-dead'
genes = ["Perennial_Growth"]
mutates_into = ["cocoapod_vanillapod","cocoapod_bungotree"]
reagents_add = {'coco': 0.25,'nutriment': 0.1}
species = 'cocoapod'
List_of_plants.append(cocoapod)
class cocoapod_vanillapod():
name = 'cocoapod_vanillapod'
plantname = 'Vanilla Tree'
Description = "These seeds grow into vanilla trees. They look fattening."
icon_state = 'seed-vanillapod'
lifespan = 20
endurance = 15
production = 5
plant_yield = 2
potency = 10
weed_growth_rate = 1
weed_resistance = 5
growing_icon = 'growing_fruits'
Grown_Sprite = 'cocoapod-grow'
dead_Sprite = 'cocoapod-dead'
genes = ["Perennial_Growth"]
mutates_into = [""]
reagents_add = {'vanilla': 0.25,'nutriment': 0.1}
species = 'vanillapod'
List_of_plants.append(cocoapod_vanillapod)
class cocoapod_bungotree():
name = 'cocoapod_bungotree'
plantname = 'Bungo Tree'
Description = "These seeds grow into bungo trees. They appear to be heavy and almost perfectly spherical."
icon_state = 'seed-bungotree'
lifespan = 30
endurance = 15
production = 7
plant_yield = 3
potency = 10
weed_growth_rate = 1
weed_resistance = 5
growing_icon = 'growing_fruits'
Grown_Sprite = 'bungotree-grow'
dead_Sprite = 'bungotree-dead'
genes = ["Perennial_Growth"]
mutates_into = [""]
reagents_add = {'enzyme': 0.1,'nutriment': 0.1}
species = 'bungotree'
List_of_plants.append(cocoapod_bungotree)
class corn():
name = 'corn'
plantname = 'Corn Stalks'
Description = "I don't mean to sound corny..."
icon_state = 'seed-corn'
lifespan = 25
endurance = 15
production = 6
plant_yield = 3
potency = 20
weed_growth_rate = 1
weed_resistance = 5
growing_icon = 'growing_vegetables'
Grown_Sprite = 'corn-grow'
dead_Sprite = 'corn-dead'
mutates_into = ["corn_snapcorn"]
reagents_add = {'cornoil': 0.2,'vitamin': 0.04,'nutriment': 0.1}
species = 'corn'
List_of_plants.append(corn)
class corn_snapcorn():
name = 'corn_snapcorn'
plantname = 'Snapcorn Stalks'
Description = "Oh snap!"
icon_state = 'seed-snapcorn'
lifespan = 25
endurance = 15
production = 6
plant_yield = 3
potency = 20
weed_growth_rate = 1
weed_resistance = 5
growing_icon = 'growing_vegetables'
Grown_Sprite = 'corn-grow'
dead_Sprite = 'corn-dead'
mutates_into = [""]
reagents_add = {'cornoil': 0.2,'vitamin': 0.04,'nutriment': 0.1}
species = 'snapcorn'
List_of_plants.append(corn_snapcorn)
class cotton():
name = 'cotton'
plantname = 'Cotton'
Description = "A pack of seeds that'll grow into a cotton plant. Assistants make good free labor if neccesary."
icon_state = 'seed-cotton'
lifespan = 35
endurance = 25
production = 1
plant_yield = 2
potency = 50
weed_growth_rate = 1
weed_resistance = 5
growing_icon = 'growing'
dead_Sprite = 'cotton-dead'
mutates_into = ["cotton_durathread"]
species = 'cotton'
List_of_plants.append(cotton)
class cotton_durathread():
name = 'cotton_durathread'
plantname = 'Durathread'
Description = "A pack of seeds that'll grow into an extremely durable thread that could easily rival plasteel if woven properly."
icon_state = 'seed-durathread'
lifespan = 80
endurance = 50
production = 1
plant_yield = 2
potency = 50
weed_growth_rate = 1
weed_resistance = 5
growing_icon = 'growing'
dead_Sprite = 'cotton-dead'
species = 'durathread'
List_of_plants.append(cotton_durathread)
class eggplant():
name = 'eggplant'
plantname = 'Eggplants'
Description = "These seeds grow to produce berries that look nothing like eggs."
icon_state = 'seed-eggplant'
lifespan = 25
endurance = 15
production = 6
plant_yield = 2
potency = 20
weed_growth_rate = 1
weed_resistance = 5
growing_icon = 'growing_vegetables'
Grown_Sprite = 'eggplant-grow'
dead_Sprite = 'eggplant-dead'
genes = ["Perennial_Growth"]
mutates_into = ["eggplant_eggy"]
reagents_add = {'vitamin': 0.04,'nutriment': 0.1}
species = 'eggplant'
List_of_plants.append(eggplant)
class eggplant_eggy():
name = 'eggplant_eggy'
plantname = 'Egg-Plants'
Description = "These seeds grow to produce berries that look a lot like eggs."
icon_state = 'seed-eggy'
lifespan = 75
endurance = 15
production = 12
plant_yield = 2
potency = 20
weed_growth_rate = 1
weed_resistance = 5
growing_icon = 'growing_vegetables'
Grown_Sprite = 'eggplant-grow'
dead_Sprite = 'eggplant-dead'
genes = ["Perennial_Growth"]
mutates_into = [""]
reagents_add = {'nutriment': 0.1}
species = 'eggy'
List_of_plants.append(eggplant_eggy)
class poppy():
name = 'poppy'
plantname = 'Poppy Plants'
Description = "These seeds grow into poppies."
icon_state = 'seed-poppy'
lifespan = 25
endurance = 10
production = 6
plant_yield = 6
potency = 20
weed_growth_rate = 1
weed_resistance = 5
growing_icon = 'growing_flowers'
Grown_Sprite = 'poppy-grow'
dead_Sprite = 'poppy-dead'
mutates_into = ["poppy_geranium","poppy_lily"]
reagents_add = {'C2/libital': 0.2,'nutriment': 0.05}
species = 'poppy'
List_of_plants.append(poppy)
class poppy_lily():
name = 'poppy_lily'
plantname = 'Lily Plants'
Description = "These seeds grow into lilies."
icon_state = 'seed-lily'
lifespan = 25
endurance = 10
production = 6
plant_yield = 6
potency = 20
weed_growth_rate = 1
weed_resistance = 5
growing_icon = 'growing_flowers'
Grown_Sprite = 'poppy-grow'
dead_Sprite = 'poppy-dead'
mutates_into = ["poppy_lily_trumpet"]
reagents_add = {'C2/libital': 0.2,'nutriment': 0.05}
species = 'lily'
List_of_plants.append(poppy_lily)
class poppy_lily_trumpet():
name = 'poppy_lily_trumpet'
plantname = 'Spaceman\'s Trumpet Plant'
Description = "A plant sculped by extensive genetic engineering. The spaceman's trumpet is said to bear no resemblance to its wild ancestors. Inside NT AgriSci circles it is better known as NTPW-0372."
icon_state = 'seed-trumpet'
lifespan = 80
endurance = 10
production = 5
plant_yield = 4
potency = 20
weed_growth_rate = 1
weed_resistance = 5
growing_icon = 'growing_flowers'
Grown_Sprite = 'spacemanstrumpet-grow'
dead_Sprite = 'spacemanstrumpet-dead'
genes = ["polypyr"]
mutates_into = [""]
reagents_add = {'nutriment': 0.05}
species = 'spacemanstrumpet'
List_of_plants.append(poppy_lily_trumpet)
class poppy_geranium():
name = 'poppy_geranium'
plantname = 'Geranium Plants'
Description = "These seeds grow into geranium."
icon_state = 'seed-geranium'
lifespan = 25
endurance = 10
production = 6
plant_yield = 6
potency = 20
weed_growth_rate = 1
weed_resistance = 5
growing_icon = 'growing_flowers'
Grown_Sprite = 'poppy-grow'
dead_Sprite = 'poppy-dead'
mutates_into = [""]
reagents_add = {'C2/libital': 0.2,'nutriment': 0.05}
species = 'geranium'
List_of_plants.append(poppy_geranium)
class harebell():
name = 'harebell'
plantname = 'Harebells'
Description = "These seeds grow into pretty little flowers."
icon_state = 'seed-harebell'
lifespan = 100
endurance = 20
production = 1
plant_yield = 2
potency = 30
weed_growth_rate = 1
weed_resistance = 5
growing_icon = 'growing_flowers'
genes = ["Weed Adaptation"]
reagents_add = {'nutriment': 0.04}
species = 'harebell'
List_of_plants.append(harebell)
class sunflower():
name = 'sunflower'
plantname = 'Sunflowers'
Description = "These seeds grow into sunflowers."
icon_state = 'seed-sunflower'
lifespan = 25
endurance = 20
production = 2
plant_yield = 2
potency = 10
weed_growth_rate = 1
weed_resistance = 5
growing_icon = 'growing_flowers'
Grown_Sprite = 'sunflower-grow'
dead_Sprite = 'sunflower-dead'
mutates_into = ["sunflower_moonflower","sunflower_novaflower"]
reagents_add = {'cornoil': 0.08,'nutriment': 0.04}
species = 'sunflower'
List_of_plants.append(sunflower)
class sunflower_moonflower():
name = 'sunflower_moonflower'
plantname = 'Moonflowers'
Description = "These seeds grow into moonflowers."
icon_state = 'seed-moonflower'
lifespan = 25
endurance = 20
production = 2
plant_yield = 2
potency = 10
weed_growth_rate = 1
weed_resistance = 5
growing_icon = 'growing_flowers'
Grown_Sprite = 'moonflower-grow'
dead_Sprite = 'sunflower-dead'
genes = ["Bioluminescence/purple"]
mutates_into = [""]
reagents_add = {'ethanol/moonshine': 0.2,'vitamin': 0.02,'nutriment': 0.02}
species = 'moonflower'
List_of_plants.append(sunflower_moonflower)
class sunflower_novaflower():
name = 'sunflower_novaflower'
plantname = 'Novaflowers'
Description = "These seeds grow into novaflowers."
icon_state = 'seed-novaflower'
lifespan = 25
endurance = 20
production = 2
plant_yield = 2
potency = 10
weed_growth_rate = 1
weed_resistance = 5
growing_icon = 'growing_flowers'
Grown_Sprite = 'novaflower-grow'
dead_Sprite = 'sunflower-dead'
mutates_into = [""]
reagents_add = {'condensedcapsaicin': 0.25,'capsaicin': 0.3,'nutriment': 0}
species = 'novaflower'
List_of_plants.append(sunflower_novaflower)
class garlic():
name = 'garlic'
plantname = 'Garlic Sprouts'
Description = "A packet of extremely pungent seeds."
icon_state = 'seed-garlic'
lifespan = 25
endurance = 15
production = 6
plant_yield = 6
potency = 25
weed_growth_rate = 1
weed_resistance = 5
growing_icon = 'growing_vegetables'
reagents_add = {'garlic': 0.15,'nutriment': 0.1}
species = 'garlic'
List_of_plants.append(garlic)
class grass():
name = 'grass'
plantname = 'Grass'
Description = "These seeds grow into grass. Yummy!"
icon_state = 'seed-grass'
lifespan = 40
endurance = 40
production = 5
plant_yield = 5
potency = 10
weed_growth_rate = 1
weed_resistance = 5
Grown_Sprite = 'grass-grow'
dead_Sprite = 'grass-dead'
genes = ["Perennial_Growth"]
mutates_into = ["grass_carpet","grass_fairy"]
reagents_add = {'nutriment': 0.02,'hydrogen': 0.05}
species = 'grass'
List_of_plants.append(grass)
class grass_fairy():
name = 'grass_fairy'
plantname = 'Fairygrass'
Description = "These seeds grow into a more mystical grass."
icon_state = 'seed-fairygrass'
lifespan = 40
endurance = 40
production = 5
plant_yield = 5
potency = 10
weed_growth_rate = 1
weed_resistance = 5
Grown_Sprite = 'fairygrass-grow'
dead_Sprite = 'fairygrass-dead'
genes = ["Perennial_Growth", "Bioluminescence/blue"]
reagents_add = {'nutriment': 0.02,'hydrogen': 0.05,'space_drugs': 0.15}
species = 'fairygrass'
List_of_plants.append(grass_fairy)
class grass_carpet():
name = 'grass_carpet'
plantname = 'Carpet'
Description = "These seeds grow into stylish carpet samples."
icon_state = 'seed-carpet'
lifespan = 40
endurance = 40
production = 5
plant_yield = 5
potency = 10
weed_growth_rate = 1
weed_resistance = 5
Grown_Sprite = 'grass-grow'
dead_Sprite = 'grass-dead'
genes = ["Perennial_Growth"]
mutates_into = [""]
reagents_add = {'nutriment': 0.02,'hydrogen': 0.05}
species = 'carpet'
List_of_plants.append(grass_carpet)
class kudzu():
name = 'kudzu'
plantname = 'Kudzu'
Description = "These seeds grow into a weed that grows incredibly fast."
icon_state = 'seed-kudzu'
lifespan = 20
endurance = 10
production = 6
plant_yield = 4
potency = 10
weed_growth_rate = 1
weed_resistance = 5
genes = ["Perennial_Growth", "Weed Adaptation"]
reagents_add = {'C2/multiver': 0.04,'nutriment': 0.02}
species = 'kudzu'
List_of_plants.append(kudzu)
class watermelon():
name = 'watermelon'
plantname = 'Watermelon Vines'
Description = "These seeds grow into watermelon plants."
icon_state = 'seed-watermelon'
lifespan = 50
endurance = 40
production = 6
plant_yield = 3
potency = 10
weed_growth_rate = 1
weed_resistance = 5
growing_icon = 'growing_fruits'
dead_Sprite = 'watermelon-dead'
genes = ["Perennial_Growth"]
mutates_into = ["watermelon_holy"]
reagents_add = {'water': 0.2,'vitamin': 0.04,'nutriment': 0.2}
species = 'watermelon'
List_of_plants.append(watermelon)
class watermelon_holy():
name = 'watermelon_holy'
plantname = 'Holy Melon Vines'
Description = "These seeds grow into holymelon plants."
icon_state = 'seed-holymelon'
lifespan = 50
endurance = 40
production = 6
plant_yield = 3
potency = 10
weed_growth_rate = 1
weed_resistance = 5
growing_icon = 'growing_fruits'
dead_Sprite = 'watermelon-dead'
genes = ["Bioluminescence/yellow"]
mutates_into = [""]
reagents_add = {'water/holywater': 0.2,'vitamin': 0.04,'nutriment': 0.1}
species = 'holymelon'
List_of_plants.append(watermelon_holy)
class starthistle():
name = 'starthistle'
plantname = 'Starthistle'
Description = "A robust species of weed that often springs up in-between the cracks of spaceship parking lots."
icon_state = 'seed-starthistle'
lifespan = 70
endurance = 50
production = 1
plant_yield = 2
potency = 10
weed_growth_rate = 1
weed_resistance = 5
growing_icon = 'growing_flowers'
genes = ["Weed Adaptation"]
mutates_into = ["starthistle_corpse_flower","galaxythistle"]
species = 'starthistle'
List_of_plants.append(starthistle)
class starthistle_corpse_flower():
name = 'starthistle_corpse_flower'
plantname = 'Corpse flower'
Description = "A species of plant that emits a horrible odor. The odor stops being produced in difficult atmospheric conditions."
icon_state = 'seed-corpse-flower'
lifespan = 70
endurance = 50
production = 2
plant_yield = 2
potency = 10
weed_growth_rate = 1
weed_resistance = 5
growing_icon = 'growing_flowers'
genes = []
mutates_into = [""]
species = 'corpse-flower'
List_of_plants.append(starthistle_corpse_flower)
class galaxythistle():
name = 'galaxythistle'
plantname = 'Galaxythistle'
Description = "An impressive species of weed that is thought to have evolved from the simple milk thistle. Contains flavolignans that can help repair a damaged liver."
icon_state = 'seed-galaxythistle'
lifespan = 70
endurance = 40
production = 2
plant_yield = 2
potency = 25
weed_growth_rate = 1
weed_resistance = 5
growing_icon = 'growing_flowers'
genes = ["Weed Adaptation", "invasive"]
mutates_into = [""]
reagents_add = {'nutriment': 0.05,'silibinin': 0.1}
species = 'galaxythistle'
List_of_plants.append(galaxythistle)
class cabbage():
name = 'cabbage'
plantname = 'Cabbages'
Description = "These seeds grow into cabbages."
icon_state = 'seed-cabbage'
lifespan = 50
endurance = 25
production = 5
plant_yield = 4
potency = 10
weed_growth_rate = 1
weed_resistance = 5
growing_icon = 'growing_vegetables'
genes = ["Perennial_Growth"]
mutates_into = ["replicapod"]
reagents_add = {'vitamin': 0.04,'nutriment': 0.1}
species = 'cabbage'
List_of_plants.append(cabbage)
class sugarcane():
name = 'sugarcane'
plantname = 'Sugarcane'
Description = "These seeds grow into sugarcane."
icon_state = 'seed-sugarcane'
lifespan = 60
endurance = 50
production = 6
plant_yield = 4
potency = 10
weed_growth_rate = 1
weed_resistance = 5
genes = ["Perennial_Growth"]
mutates_into = ["bamboo"]
reagents_add = {'sugar': 0.25}
species = 'sugarcane'
List_of_plants.append(sugarcane)
class gatfruit():
name = 'gatfruit'
plantname = 'Gatfruit Tree'
Description = "These seeds grow into .357 revolvers."
icon_state = 'seed-gatfruit'
lifespan = 20
endurance = 20
production = 10
plant_yield = 2
potency = 60
weed_growth_rate = 1
weed_resistance = 5
growing_icon = 'growing_fruits'
genes = ["Perennial_Growth"]
reagents_add = {'sulfur': 0.1,'carbon': 0.1,'nitrogen': 0.07,'potassium': 0.05}
species = 'gatfruit'
List_of_plants.append(gatfruit)
class cherry_bomb():
name = 'cherry_bomb'
plantname = 'Cherry Bomb Tree'
Description = "They give you vibes of dread and frustration."
icon_state = 'seed-cherry_bomb'
lifespan = 35
endurance = 35
production = 5
plant_yield = 3
potency = 10
weed_growth_rate = 1
weed_resistance = 5
growing_icon = 'growing_fruits'
Grown_Sprite = 'cherry-grow'
dead_Sprite = 'cherry-dead'
genes = ["Perennial_Growth"]
mutates_into = [""]
reagents_add = {'nutriment': 0.1,'sugar': 0.1,'gunpowder': 0.7}
species = 'cherry_bomb'
List_of_plants.append(cherry_bomb)
class reishi():
name = 'reishi'
plantname = 'Reishi'
Description = "This mycelium grows into something medicinal and relaxing."
icon_state = 'mycelium-reishi'
lifespan = 35
endurance = 35
production = 5
plant_yield = 4
potency = 15
weed_growth_rate = 1
weed_resistance = 5
growing_icon = 'growing_mushrooms'
genes = ["Fungal Vitality"]
reagents_add = {'morphine': 0.35,'C2/multiver': 0.35,'nutriment': 0}
species = 'reishi'
List_of_plants.append(reishi)
class amanita():
name = 'amanita'
plantname = 'Fly Amanitas'
Description = "This mycelium grows into something horrible."
icon_state = 'mycelium-amanita'
lifespan = 50
endurance = 35
production = 5
plant_yield = 4
potency = 10
weed_growth_rate = 1
weed_resistance = 5
growing_icon = 'growing_mushrooms'
genes = ["Fungal Vitality"]
mutates_into = ["angel"]
reagents_add = {'mushroomhallucinogen': 0.04,'toxin/amatoxin': 0.35,'nutriment': 0,'growthserum': 0.1}
species = 'amanita'
List_of_plants.append(amanita)
class angel():
name = 'angel'
plantname = 'Destroying Angels'
Description = "This mycelium grows into something devastating."
icon_state = 'mycelium-angel'
lifespan = 50
endurance = 35
production = 5
plant_yield = 2
potency = 35
weed_growth_rate = 1
weed_resistance = 5
growing_icon = 'growing_mushrooms'
genes = ["Fungal Vitality"]
reagents_add = {'mushroomhallucinogen': 0.04,'toxin/amatoxin': 0.1,'nutriment': 0,'toxin/amanitin': 0.2}
species = 'angel'
List_of_plants.append(angel)
class liberty():
name = 'liberty'
plantname = 'Liberty-Caps'
Description = "This mycelium grows into liberty-cap mushrooms."
icon_state = 'mycelium-liberty'
lifespan = 25
endurance = 15
production = 1
plant_yield = 5
potency = 15
weed_growth_rate = 1
weed_resistance = 5
growing_icon = 'growing_mushrooms'
genes = ["Fungal Vitality"]
reagents_add = {'mushroomhallucinogen': 0.25,'nutriment': 0.02}
species = 'liberty'
List_of_plants.append(liberty)
class plump():
name = 'plump'
plantname = 'Plump-Helmet Mushrooms'
Description = "This mycelium grows into helmets... maybe."
icon_state = 'mycelium-plump'
lifespan = 25
endurance = 15
production = 1
plant_yield = 4
potency = 15
weed_growth_rate = 1
weed_resistance = 5
growing_icon = 'growing_mushrooms'
genes = ["Fungal Vitality"]
mutates_into = ["plump_walkingmushroom"]
reagents_add = {'vitamin': 0.04,'nutriment': 0.1}
species = 'plump'
List_of_plants.append(plump)
class plump_walkingmushroom():
name = 'plump_walkingmushroom'
plantname = 'Walking Mushrooms'
Description = "This mycelium will grow into huge stuff!"
icon_state = 'mycelium-walkingmushroom'
lifespan = 30
endurance = 30
production = 1
plant_yield = 1
potency = 15
weed_growth_rate = 1
weed_resistance = 5
growing_icon = 'growing_mushrooms'
genes = ["Fungal Vitality"]
mutates_into = [""]
reagents_add = {'vitamin': 0.05,'nutriment': 0.15}
species = 'walkingmushroom'
List_of_plants.append(plump_walkingmushroom)
class chanter():
name = 'chanter'
plantname = 'Chanterelle Mushrooms'
Description = "This mycelium grows into chanterelle mushrooms."
icon_state = 'mycelium-chanter'
lifespan = 35
endurance = 20
production = 1
plant_yield = 5
potency = 15
weed_growth_rate = 1
weed_resistance = 5
growing_icon = 'growing_mushrooms'
genes = ["Fungal Vitality"]
mutates_into = ["chanter_jupitercup"]
reagents_add = {'nutriment': 0.1}
species = 'chanter'
List_of_plants.append(chanter)
class chanter_jupitercup():
name = 'chanter_jupitercup'
plantname = 'Jupiter Cups'
Description = "This mycelium grows into jupiter cups. Zeus would be envious at the power at your fingertips."
icon_state = 'mycelium-jupitercup'
lifespan = 40
endurance = 8
production = 4
plant_yield = 4
potency = 15
weed_growth_rate = 1
weed_resistance = 5
growing_icon = 'growing_mushrooms'
genes = ["Fungal Vitality", "liquidelectricity", "Carnivory"]
reagents_add = {'nutriment': 0.1}
species = 'jupitercup'
List_of_plants.append(chanter_jupitercup)
class glowshroom():
name = 'glowshroom'
plantname = 'Glowshrooms'
Description = "This mycelium -glows- into mushrooms!"
icon_state = 'mycelium-glowshroom'
lifespan = 100
endurance = 30
production = 1
plant_yield = 3
potency = 30
weed_growth_rate = 1
weed_resistance = 5
growing_icon = 'growing_mushrooms'
genes = ["Bioluminescence", "Fungal Vitality"]
mutates_into = ["glowshroom_glowcap","glowshroom_shadowshroom"]
reagents_add = {'uranium/radium': 0.1,'phosphorus': 0.1,'nutriment': 0.04}
species = 'glowshroom'
List_of_plants.append(glowshroom)
class glowshroom_glowcap():
name = 'glowshroom_glowcap'
plantname = 'Glowcaps'
Description = "This mycelium -powers- into mushrooms!"
icon_state = 'mycelium-glowcap'
lifespan = 100
endurance = 30
production = 1
plant_yield = 3
potency = 30
weed_growth_rate = 1
weed_resistance = 5
growing_icon = 'growing_mushrooms'
genes = ["Red Electrical Glow", "Electrical Activity", "Fungal Vitality"]
mutates_into = [""]
reagents_add = {'teslium': 0.1,'nutriment': 0.04}
species = 'glowcap'
List_of_plants.append(glowshroom_glowcap)
class glowshroom_shadowshroom():
name = 'glowshroom_shadowshroom'
plantname = 'Shadowshrooms'
Description = "This mycelium will grow into something shadowy."
icon_state = 'mycelium-shadowshroom'
lifespan = 100
endurance = 30
production = 1
plant_yield = 3
potency = 30
weed_growth_rate = 1
weed_resistance = 5
growing_icon = 'growing_mushrooms'
Grown_Sprite = 'shadowshroom-grow'
dead_Sprite = 'shadowshroom-dead'
genes = ["Shadow Emission", "Fungal Vitality"]
mutates_into = [""]
reagents_add = {'uranium/radium': 0.2,'nutriment': 0.04}
species = 'shadowshroom'
List_of_plants.append(glowshroom_shadowshroom)
class nettle():
name = 'nettle'
plantname = 'Nettles'
Description = "These seeds grow into nettles."
icon_state = 'seed-nettle'
lifespan = 30
endurance = 40
production = 6
plant_yield = 4
potency = 10
weed_growth_rate = 1
weed_resistance = 5
genes = ["Perennial_Growth", "Weed Adaptation"]
mutates_into = ["nettle_death"]
reagents_add = {'toxin/acid': 0.5}
species = 'nettle'
List_of_plants.append(nettle)
class nettle_death():
name = 'nettle_death'
plantname = 'Death Nettles'
Description = "These seeds grow into death-nettles."
icon_state = 'seed-deathnettle'
lifespan = 30
endurance = 25
production = 6
plant_yield = 2
potency = 10
weed_growth_rate = 1
weed_resistance = 5
genes = ["Perennial_Growth", "Weed Adaptation", "Hypodermic Prickles"]
mutates_into = [""]
reagents_add = {'toxin/acid/fluacid': 0.5,'toxin/acid': 0.5}
species = 'deathnettle'
List_of_plants.append(nettle_death)
class onion():
name = 'onion'
plantname = 'Onion Sprouts'
Description = "These seeds grow into onions."
icon_state = 'seed-onion'
lifespan = 20
endurance = 25
production = 4
plant_yield = 6
potency = 10
weed_growth_rate = 1
weed_resistance = 5
growing_icon = 'growing_vegetables'
mutates_into = ["onion_red"]
reagents_add = {'vitamin': 0.04,'nutriment': 0.1}
species = 'onion'
List_of_plants.append(onion)
class onion_red():
name = 'onion_red'
plantname = 'Red Onion Sprouts'
Description = "For growing exceptionally potent onions."
icon_state = 'seed-onionred'
lifespan = 20
endurance = 25
production = 4
plant_yield = 6
potency = 10
weed_growth_rate = 1
weed_resistance = 5
growing_icon = 'growing_vegetables'
reagents_add = {'vitamin': 0.04,'nutriment': 0.1,'tearjuice': 0.05}
species = 'onion_red'
List_of_plants.append(onion_red)
class pineapple():
name = 'pineapple'
plantname = 'Pineapple Plant'
Description = "Oooooooooooooh!"
icon_state = 'seed-pineapple'
lifespan = 40
endurance = 30
production = 6
plant_yield = 3
potency = 10
weed_growth_rate = 1
weed_resistance = 5
growing_icon = 'growing_fruits'
genes = ["Perennial_Growth"]
mutates_into = ["apple"]
reagents_add = {'vitamin': 0.02,'nutriment': 0.2,'water': 0.04}
species = 'pineapple'
List_of_plants.append(pineapple)
class potato():
name = 'potato'
plantname = 'Potato Plants'
Description = "Boil 'em! Mash 'em! Stick 'em in a stew!"
icon_state = 'seed-potato'
lifespan = 30
endurance = 15
production = 1
plant_yield = 4
potency = 10
weed_growth_rate = 1
weed_resistance = 5
growing_icon = 'growing_vegetables'
Grown_Sprite = 'potato-grow'
dead_Sprite = 'potato-dead'
genes = ["Capacitive Cell Production"]
mutates_into = ["potato_sweet"]
reagents_add = {'vitamin': 0.04,'nutriment': 0.1}
species = 'potato'
List_of_plants.append(potato)
class potato_sweet():
name = 'potato_sweet'
plantname = 'Sweet Potato Plants'
Description = "These seeds grow into sweet potato plants."
icon_state = 'seed-sweetpotato'
lifespan = 30
endurance = 15
production = 1
plant_yield = 4
potency = 10
weed_growth_rate = 1
weed_resistance = 5
growing_icon = 'growing_vegetables'
Grown_Sprite = 'potato-grow'
dead_Sprite = 'potato-dead'
genes = ["Capacitive Cell Production"]
mutates_into = [""]
reagents_add = {'vitamin': 0.1,'sugar': 0.1,'nutriment': 0.1}
species = 'sweetpotato'
List_of_plants.append(potato_sweet)
class pumpkin():
name = 'pumpkin'
plantname = 'Pumpkin Vines'
Description = "These seeds grow into pumpkin vines."
icon_state = 'seed-pumpkin'
lifespan = 50
endurance = 40
production = 6
plant_yield = 3
potency = 10
weed_growth_rate = 1
weed_resistance = 5
growing_icon = 'growing_fruits'
Grown_Sprite = 'pumpkin-grow'
dead_Sprite = 'pumpkin-dead'
genes = ["Perennial_Growth"]
mutates_into = ["pumpkin_blumpkin"]
reagents_add = {'vitamin': 0.04,'nutriment': 0.2}
species = 'pumpkin'
List_of_plants.append(pumpkin)
class pumpkin_blumpkin():
name = 'pumpkin_blumpkin'
plantname = 'Blumpkin Vines'
Description = "These seeds grow into blumpkin vines."
icon_state = 'seed-blumpkin'
lifespan = 50
endurance = 40
production = 6
plant_yield = 3
potency = 10
weed_growth_rate = 1
weed_resistance = 5
growing_icon = 'growing_fruits'
Grown_Sprite = 'pumpkin-grow'
dead_Sprite = 'pumpkin-dead'
genes = ["Perennial_Growth"]
mutates_into = [""]
reagents_add = {'ammonia': 0.2,'chlorine': 0.1,'nutriment': 0.2}
species = 'blumpkin'
List_of_plants.append(pumpkin_blumpkin)
class rainbow_bunch():
name = 'rainbow_bunch'
plantname = 'Rainbow Flowers'
Description = "A pack of seeds that'll grow into a beautiful bush of various colored flowers."
icon_state = 'seed-rainbowbunch'
lifespan = 25
endurance = 10
production = 3
plant_yield = 5
potency = 20
weed_growth_rate = 1
weed_resistance = 5
growing_icon = 'growing_flowers'
dead_Sprite = 'rainbowbunch-dead'
genes = ["Perennial_Growth"]
reagents_add = {'nutriment': 0.05}
species = 'rainbowbunch'
List_of_plants.append(rainbow_bunch)
class random():
name = 'random'
plantname = 'strange plant'
Description = "Mysterious seeds as strange as their name implies. Spooky."
icon_state = 'seed-x'
lifespan = 25
endurance = 15
production = 6
plant_yield = 3
potency = 10
weed_growth_rate = 1
weed_resistance = 5
Grown_Sprite = 'xpod-grow'
dead_Sprite = 'xpod-dead'
species = '?????'
List_of_plants.append(random)
class replicapod():
name = 'replicapod'
plantname = 'Replica Pod'
Description = "These seeds grow into replica pods. They say these are used to harvest humans."
icon_state = 'seed-replicapod'
lifespan = 50
endurance = 8
production = 1
plant_yield = 1
potency = 30
weed_growth_rate = 1
weed_resistance = 5
species = 'replicapod'
List_of_plants.append(replicapod)
class carrot():
name = 'carrot'
plantname = 'Carrots'
Description = "These seeds grow into carrots."
icon_state = 'seed-carrot'
lifespan = 25
endurance = 15
production = 1
plant_yield = 5
potency = 10
weed_growth_rate = 1
weed_resistance = 5
growing_icon = 'growing_vegetables'
mutates_into = ["carrot_parsnip"]
reagents_add = {'oculine': 0.25,'vitamin': 0.04,'nutriment': 0.05}
species = 'carrot'
List_of_plants.append(carrot)
class carrot_parsnip():
name = 'carrot_parsnip'
plantname = 'Parsnip'
Description = "These seeds grow into parsnips."
icon_state = 'seed-parsnip'
lifespan = 25
endurance = 15
production = 1
plant_yield = 5
potency = 10
weed_growth_rate = 1
weed_resistance = 5
growing_icon = 'growing_vegetables'
dead_Sprite = 'carrot-dead'
mutates_into = [""]
reagents_add = {'vitamin': 0.05,'nutriment': 0.05}
species = 'parsnip'
List_of_plants.append(carrot_parsnip)
class whitebeet():
name = 'whitebeet'
plantname = 'White-Beet Plants'
Description = "These seeds grow into sugary beet producing plants."
icon_state = 'seed-whitebeet'
lifespan = 60
endurance = 50
production = 6
plant_yield = 6
potency = 10
weed_growth_rate = 1
weed_resistance = 5
growing_icon = 'growing_vegetables'
dead_Sprite = 'whitebeet-dead'
mutates_into = ["redbeet"]
reagents_add = {'vitamin': 0.04,'sugar': 0.2,'nutriment': 0.05}
species = 'whitebeet'
List_of_plants.append(whitebeet)
class redbeet():
name = 'redbeet'
plantname = 'Red-Beet Plants'
Description = "These seeds grow into red beet producing plants."
icon_state = 'seed-redbeet'
lifespan = 60
endurance = 50
production = 6
plant_yield = 6
potency = 10
weed_growth_rate = 1
weed_resistance = 5
growing_icon = 'growing_vegetables'
dead_Sprite = 'whitebeet-dead'
genes = ["Densified Chemicals"]
reagents_add = {'vitamin': 0.05,'nutriment': 0.05}
species = 'redbeet'
List_of_plants.append(redbeet)
class tea():
name = 'tea'
plantname = 'Tea Aspera Plant'
Description = "These seeds grow into tea plants."
icon_state = 'seed-teaaspera'
lifespan = 20
endurance = 15
production = 5
plant_yield = 5
potency = 10
weed_growth_rate = 1
weed_resistance = 5
dead_Sprite = 'tea-dead'
genes = ["Perennial_Growth"]
mutates_into = ["tea_astra"]
species = 'teaaspera'
List_of_plants.append(tea)
class tea_astra():
name = 'tea_astra'
plantname = 'Tea Astra Plant'
Description = "These seeds grow into tea plants."
icon_state = 'seed-teaastra'
lifespan = 20
endurance = 15
production = 5
plant_yield = 5
potency = 10
weed_growth_rate = 1
weed_resistance = 5
dead_Sprite = 'tea-dead'
genes = ["Perennial_Growth"]
mutates_into = [""]
reagents_add = {'synaptizine': 0.1,'vitamin': 0.04,'toxin/teapowder': 0.1}
species = 'teaastra'
List_of_plants.append(tea_astra)
class coffee():
name = 'coffee'
plantname = 'Coffee Arabica Bush'
Description = "These seeds grow into coffee arabica bushes."
icon_state = 'seed-coffeea'
lifespan = 30
endurance = 20
production = 5
plant_yield = 5
potency = 10
weed_growth_rate = 1
weed_resistance = 5
dead_Sprite = 'coffee-dead'
genes = ["Perennial_Growth"]
mutates_into = ["coffee_robusta"]
reagents_add = {'vitamin': 0.04,'toxin/coffeepowder': 0.1}
species = 'coffeea'
List_of_plants.append(coffee)
class coffee_robusta():
name = 'coffee_robusta'
plantname = 'Coffee Robusta Bush'
Description = "These seeds grow into coffee robusta bushes."
icon_state = 'seed-coffeer'
lifespan = 30
endurance = 20
production = 5
plant_yield = 5
potency = 10
weed_growth_rate = 1
weed_resistance = 5
dead_Sprite = 'coffee-dead'
genes = ["Perennial_Growth"]
mutates_into = [""]
reagents_add = {'ephedrine': 0.1,'vitamin': 0.04,'toxin/coffeepowder': 0.1}
species = 'coffeer'
List_of_plants.append(coffee_robusta)
class tobacco():
name = 'tobacco'
plantname = 'Tobacco Plant'
Description = "These seeds grow into tobacco plants."
icon_state = 'seed-tobacco'
lifespan = 20
endurance = 15
production = 5
plant_yield = 10
potency = 10
weed_growth_rate = 1
weed_resistance = 5
dead_Sprite = 'tobacco-dead'
mutates_into = ["tobacco_space"]
reagents_add = {'nicotine': 0.03,'nutriment': 0.03}
species = 'tobacco'
List_of_plants.append(tobacco)
class tobacco_space():
name = 'tobacco_space'
plantname = 'Space Tobacco Plant'
Description = "These seeds grow into space tobacco plants."
icon_state = 'seed-stobacco'
lifespan = 20
endurance = 15
production = 5
plant_yield = 10
potency = 10
weed_growth_rate = 1
weed_resistance = 5
dead_Sprite = 'tobacco-dead'
mutates_into = [""]
reagents_add = {'salbutamol': 0.05,'nicotine': 0.08,'nutriment': 0.03}
species = 'stobacco'
List_of_plants.append(tobacco_space)
class tomato():
name = 'tomato'
plantname = 'Tomato Plants'
Description = "These seeds grow into tomato plants."
icon_state = 'seed-tomato'
lifespan = 25
endurance = 15
production = 6
plant_yield = 3
potency = 10
weed_growth_rate = 1
weed_resistance = 5
growing_icon = 'growing_fruits'
Grown_Sprite = 'tomato-grow'
dead_Sprite = 'tomato-dead'
genes = ["Liquid Contents", "Perennial_Growth"]
mutates_into = ["tomato_blue","tomato_blood","tomato_killer"]
reagents_add = {'vitamin': 0.04,'nutriment': 0.1}
species = 'tomato'
List_of_plants.append(tomato)
class tomato_blood():
name = 'tomato_blood'
plantname = 'Blood-Tomato Plants'
Description = "These seeds grow into blood-tomato plants."
icon_state = 'seed-bloodtomato'
lifespan = 25
endurance = 15
production = 6
plant_yield = 3
potency = 10
weed_growth_rate = 1
weed_resistance = 5
growing_icon = 'growing_fruits'
Grown_Sprite = 'tomato-grow'
dead_Sprite = 'tomato-dead'
genes = ["Liquid Contents", "Perennial_Growth"]
mutates_into = [""]
reagents_add = {'blood': 0.2,'vitamin': 0.04,'nutriment': 0.1}
species = 'bloodtomato'
List_of_plants.append(tomato_blood)
class tomato_blue():
name = 'tomato_blue'
plantname = 'Blue-Tomato Plants'
Description = "These seeds grow into blue-tomato plants."
icon_state = 'seed-bluetomato'
lifespan = 25
endurance = 15
production = 6
plant_yield = 2
potency = 10
weed_growth_rate = 1
weed_resistance = 5
growing_icon = 'growing_fruits'
Grown_Sprite = 'bluetomato-grow'
dead_Sprite = 'tomato-dead'
genes = ["Slippery Skin", "Perennial_Growth"]
mutates_into = ["tomato_blue_bluespace"]
reagents_add = {'lube': 0.2,'vitamin': 0.04,'nutriment': 0.1}
species = 'bluetomato'
List_of_plants.append(tomato_blue)
class tomato_blue_bluespace():
name = 'tomato_blue_bluespace'
plantname = 'Bluespace Tomato Plants'
Description = "These seeds grow into bluespace tomato plants."
icon_state = 'seed-bluespacetomato'
lifespan = 25
endurance = 15
production = 6
plant_yield = 2
potency = 10
weed_growth_rate = 1
weed_resistance = 5
growing_icon = 'growing_fruits'
Grown_Sprite = 'tomato-grow'
dead_Sprite = 'tomato-dead'
genes = ["Liquid Contents", "Slippery Skin", "Bluespace Activity", "Perennial_Growth"]
mutates_into = [""]
reagents_add = {'lube': 0.2,'bluespace': 0.2,'vitamin': 0.04,'nutriment': 0.1}
species = 'bluespacetomato'
List_of_plants.append(tomato_blue_bluespace)
class tomato_killer():
name = 'tomato_killer'
plantname = 'Killer-Tomato Plants'
Description = "These seeds grow into killer-tomato plants."
icon_state = 'seed-killertomato'
lifespan = 25
endurance = 15
production = 6
plant_yield = 2
potency = 10
weed_growth_rate = 1
weed_resistance = 5
growing_icon = 'growing_fruits'
Grown_Sprite = 'killertomato-grow'
dead_Sprite = 'killertomato-dead'
genes = ["Liquid Contents"]
mutates_into = [""]
reagents_add = {'vitamin': 0.04,'nutriment': 0.1}
species = 'killertomato'
List_of_plants.append(tomato_killer)
class tower():
name = 'tower'
plantname = 'Tower Caps'
Description = "This mycelium grows into tower-cap mushrooms."
icon_state = 'mycelium-tower'
lifespan = 80
endurance = 50
production = 1
plant_yield = 5
potency = 50
weed_growth_rate = 1
weed_resistance = 5
growing_icon = 'growing_mushrooms'
dead_Sprite = 'towercap-dead'
genes = ["Fungal Vitality"]
mutates_into = ["tower_steel"]
species = 'towercap'
List_of_plants.append(tower)
class tower_steel():
name = 'tower_steel'
plantname = 'Steel Caps'
Description = "This mycelium grows into steel logs."
icon_state = 'mycelium-steelcap'
lifespan = 80
endurance = 50
production = 1
plant_yield = 5
potency = 50
weed_growth_rate = 1
weed_resistance = 5
growing_icon = 'growing_mushrooms'
dead_Sprite = 'towercap-dead'
genes = ["Fungal Vitality"]
mutates_into = [""]
species = 'steelcap'
List_of_plants.append(tower_steel)
class bamboo():
name = 'bamboo'
plantname = 'Bamboo'
Description = "A plant known for its flexible and resistant logs."
icon_state = 'seed-bamboo'
lifespan = 80
endurance = 70
production = 2
plant_yield = 5
potency = 50
weed_growth_rate = 1
weed_resistance = 5
growing_icon = 'growing'
dead_Sprite = 'bamboo-dead'
genes = ["Perennial_Growth"]
species = 'bamboo'
List_of_plants.append(bamboo)
class ambrosia():
name = 'ambrosia'
List_of_produce.append(ambrosia)
class ambrosia_vulgaris():
name = 'ambrosia_vulgaris'
List_of_produce.append(ambrosia_vulgaris)
class ambrosia_deus():
name = 'ambrosia_deus'
List_of_produce.append(ambrosia_deus)
class apple():
name = 'apple'
List_of_produce.append(apple)
class banana():
name = 'banana'
List_of_produce.append(banana)
class banana_mime():
name = 'banana_mime'
List_of_produce.append(banana_mime)
class banana_bluespace():
name = 'banana_bluespace'
List_of_produce.append(banana_bluespace)
class soybeans():
name = 'soybeans'
List_of_produce.append(soybeans)
class berries():
name = 'berries'
List_of_produce.append(berries)
class berries_poison():
name = 'berries_poison'
List_of_produce.append(berries_poison)
class berries_death():
name = 'berries_death'
List_of_produce.append(berries_death)
class berries_glow():
name = 'berries_glow'
List_of_produce.append(berries_glow)
class cherries():
name = 'cherries'
List_of_produce.append(cherries)
class bluecherries():
name = 'bluecherries'
List_of_produce.append(bluecherries)
class cherrybulbs():
name = 'cherrybulbs'
List_of_produce.append(cherrybulbs)
class grapes():
name = 'grapes'
List_of_produce.append(grapes)
class cannabis():
name = 'cannabis'
List_of_produce.append(cannabis)
class cannabis_rainbow():
name = 'cannabis_rainbow'
List_of_produce.append(cannabis_rainbow)
class cannabis_death():
name = 'cannabis_death'
List_of_produce.append(cannabis_death)
class cannabis_white():
name = 'cannabis_white'
List_of_produce.append(cannabis_white)
class wheat():
name = 'wheat'
List_of_produce.append(wheat)
class oat():
name = 'oat'
List_of_produce.append(oat)
class rice():
name = 'rice'
List_of_produce.append(rice)
class meatwheat():
name = 'meatwheat'
List_of_produce.append(meatwheat)
class chili():
name = 'chili'
List_of_produce.append(chili)
class icepepper():
name = 'icepepper'
List_of_produce.append(icepepper)
class ghost_chili():
name = 'ghost_chili'
List_of_produce.append(ghost_chili)
class citrus():
name = 'citrus'
List_of_produce.append(citrus)
class citrus_lime():
name = 'citrus_lime'
List_of_produce.append(citrus_lime)
class citrus_orange():
name = 'citrus_orange'
List_of_produce.append(citrus_orange)
class citrus_lemon():
name = 'citrus_lemon'
List_of_produce.append(citrus_lemon)
class firelemon():
name = 'firelemon'
List_of_produce.append(firelemon)
class citrus_orange_3d():
name = 'citrus_orange_3d'
List_of_produce.append(citrus_orange_3d)
class cocoapod():
name = 'cocoapod'
List_of_produce.append(cocoapod)
class vanillapod():
name = 'vanillapod'
List_of_produce.append(vanillapod)
class bungofruit():
name = 'bungofruit'
List_of_produce.append(bungofruit)
class bungopit():
name = 'bungopit'
List_of_produce.append(bungopit)
class corn():
name = 'corn'
List_of_produce.append(corn)
class eggplant():
name = 'eggplant'
List_of_produce.append(eggplant)
class poppy():
name = 'poppy'
List_of_produce.append(poppy)
class poppy_lily():
name = 'poppy_lily'
List_of_produce.append(poppy_lily)
class trumpet():
name = 'trumpet'
List_of_produce.append(trumpet)
class poppy_geranium():
name = 'poppy_geranium'
List_of_produce.append(poppy_geranium)
class harebell():
name = 'harebell'
List_of_produce.append(harebell)
class moonflower():
name = 'moonflower'
List_of_produce.append(moonflower)
class grass():
name = 'grass'
List_of_produce.append(grass)
class grass_fairy():
name = 'grass_fairy'
List_of_produce.append(grass_fairy)
class watermelon():
name = 'watermelon'
List_of_produce.append(watermelon)
class holymelon():
name = 'holymelon'
List_of_produce.append(holymelon)
class galaxythistle():
name = 'galaxythistle'
List_of_produce.append(galaxythistle)
class cabbage():
name = 'cabbage'
List_of_produce.append(cabbage)
class sugarcane():
name = 'sugarcane'
List_of_produce.append(sugarcane)
class shell_gatfruit():
name = 'shell_gatfruit'
List_of_produce.append(shell_gatfruit)
class cherry_bomb():
name = 'cherry_bomb'
List_of_produce.append(cherry_bomb)
class mushroom():
name = 'mushroom'
List_of_produce.append(mushroom)
class mushroom_reishi():
name = 'mushroom_reishi'
List_of_produce.append(mushroom_reishi)
class mushroom_amanita():
name = 'mushroom_amanita'
List_of_produce.append(mushroom_amanita)
class mushroom_angel():
name = 'mushroom_angel'
List_of_produce.append(mushroom_angel)
class mushroom_libertycap():
name = 'mushroom_libertycap'
List_of_produce.append(mushroom_libertycap)
class mushroom_plumphelmet():
name = 'mushroom_plumphelmet'
List_of_produce.append(mushroom_plumphelmet)
class mushroom_walkingmushroom():
name = 'mushroom_walkingmushroom'
List_of_produce.append(mushroom_walkingmushroom)
class mushroom_chanterelle():
name = 'mushroom_chanterelle'
List_of_produce.append(mushroom_chanterelle)
class mushroom_jupitercup():
name = 'mushroom_jupitercup'
List_of_produce.append(mushroom_jupitercup)
class mushroom_glowshroom():
name = 'mushroom_glowshroom'
List_of_produce.append(mushroom_glowshroom)
class mushroom_glowshroom_glowcap():
name = 'mushroom_glowshroom_glowcap'
List_of_produce.append(mushroom_glowshroom_glowcap)
class mushroom_glowshroom_shadowshroom():
name = 'mushroom_glowshroom_shadowshroom'
List_of_produce.append(mushroom_glowshroom_shadowshroom)
class nettle():
name = 'nettle '
List_of_produce.append(nettle)
class nettle_basic():
name = 'nettle_basic'
List_of_produce.append(nettle_basic)
class nettle_death():
name = 'nettle_death'
List_of_produce.append(nettle_death)
class onion():
name = 'onion'
List_of_produce.append(onion)
class onion_red():
name = 'onion_red'
List_of_produce.append(onion_red)
class _obj_item_reagent_containers_food_snacks_onion_slice():
name = '_obj_item_reagent_containers_food_snacks_onion_slice'
List_of_produce.append(_obj_item_reagent_containers_food_snacks_onion_slice)
class potato():
name = 'potato'
List_of_produce.append(potato)
class potato_wedges():
name = 'potato_wedges'
List_of_produce.append(potato_wedges)
class pumpkin():
name = 'pumpkin'
List_of_produce.append(pumpkin)
class rainbow_flower():
name = 'rainbow_flower'
List_of_produce.append(rainbow_flower)
class random():
name = 'random'
List_of_produce.append(random)
class carrot():
name = 'carrot'
List_of_produce.append(carrot)
class parsnip():
name = 'parsnip'
List_of_produce.append(parsnip)
class whitebeet():
name = 'whitebeet'
List_of_produce.append(whitebeet)
class tea():
name = 'tea'
List_of_produce.append(tea)
class tea_astra():
name = 'tea_astra'
List_of_produce.append(tea_astra)
class coffee():
name = 'coffee'
List_of_produce.append(coffee)
class tobacco():
name = 'tobacco'
List_of_produce.append(tobacco)
class tomato():
name = 'tomato'
List_of_produce.append(tomato)
class tomato_blood():
name = 'tomato_blood'
List_of_produce.append(tomato_blood)
class tomato_blue():
name = 'tomato_blue'
List_of_produce.append(tomato_blue)
class tomato_blue_bluespace():
name = 'tomato_blue_bluespace'
List_of_produce.append(tomato_blue_bluespace)
class tomato_killer():
name = 'tomato_killer'
List_of_produce.append(tomato_killer) | unknown | codeparrot/codeparrot-clean | ||
#pragma once
#include <c10/core/Allocator.h>
#include <c10/util/Exception.h>
#include <c10/util/Registry.h>
#include <ATen/detail/AcceleratorHooksInterface.h>
// NB: Class must live in `at` due to limitations of Registry.h.
namespace at {
// Forward-declares at::cuda::NVRTC
namespace cuda {
struct NVRTC;
} // namespace cuda
#ifdef _MSC_VER
constexpr const char* CUDA_HELP =
"PyTorch splits its backend into two shared libraries: a CPU library "
"and a CUDA library; this error has occurred because you are trying "
"to use some CUDA functionality, but the CUDA library has not been "
"loaded by the dynamic linker for some reason. The CUDA library MUST "
"be loaded, EVEN IF you don't directly use any symbols from the CUDA library! "
"One common culprit is a lack of -INCLUDE:?warp_size@cuda@at@@YAHXZ "
"in your link arguments; many dynamic linkers will delete dynamic library "
"dependencies if you don't depend on any of their symbols. You can check "
"if this has occurred by using link on your binary to see if there is a "
"dependency on *_cuda.dll library.";
#else
constexpr const char* CUDA_HELP =
"PyTorch splits its backend into two shared libraries: a CPU library "
"and a CUDA library; this error has occurred because you are trying "
"to use some CUDA functionality, but the CUDA library has not been "
"loaded by the dynamic linker for some reason. The CUDA library MUST "
"be loaded, EVEN IF you don't directly use any symbols from the CUDA library! "
"One common culprit is a lack of -Wl,--no-as-needed in your link arguments; many "
"dynamic linkers will delete dynamic library dependencies if you don't "
"depend on any of their symbols. You can check if this has occurred by "
"using ldd on your binary to see if there is a dependency on *_cuda.so "
"library.";
#endif
// The CUDAHooksInterface is an omnibus interface for any CUDA functionality
// which we may want to call into from CPU code (and thus must be dynamically
// dispatched, to allow for separate compilation of CUDA code). How do I
// decide if a function should live in this class? There are two tests:
//
// 1. Does the *implementation* of this function require linking against
// CUDA libraries?
//
// 2. Is this function *called* from non-CUDA ATen code?
//
// (2) should filter out many ostensible use-cases, since many times a CUDA
// function provided by ATen is only really ever used by actual CUDA code.
//
// TODO: Consider putting the stub definitions in another class, so that one
// never forgets to implement each virtual function in the real implementation
// in CUDAHooks. This probably doesn't buy us much though.
struct TORCH_API CUDAHooksInterface : AcceleratorHooksInterface {
// This should never actually be implemented, but it is used to
// squelch -Werror=non-virtual-dtor
~CUDAHooksInterface() override = default;
// Initialize THCState and, transitively, the CUDA state
void init() const override {
TORCH_CHECK(false, "Cannot initialize CUDA without ATen_cuda library. ", CUDA_HELP);
}
const Generator& getDefaultGenerator(
[[maybe_unused]] DeviceIndex device_index = -1) const override {
TORCH_CHECK(
false,
"Cannot get default CUDA generator without ATen_cuda library. ",
CUDA_HELP);
}
Generator getNewGenerator(
[[maybe_unused]] DeviceIndex device_index = -1) const override {
TORCH_CHECK(
false,
"Cannot get CUDA generator without ATen_cuda library. ",
CUDA_HELP);
}
Device getDeviceFromPtr(void* /*data*/) const override {
TORCH_CHECK(false, "Cannot get device of pointer on CUDA without ATen_cuda library. ", CUDA_HELP);
}
bool isPinnedPtr(const void* /*data*/) const override {
return false;
}
virtual bool hasCUDA() const {
return false;
}
virtual bool hasCUDART() const {
return false;
}
virtual bool hasMAGMA() const {
return false;
}
virtual bool hasCuDNN() const {
return false;
}
virtual bool hasCuSOLVER() const {
return false;
}
virtual bool hasCuBLASLt() const {
return false;
}
virtual bool hasROCM() const {
return false;
}
virtual bool hasCKSDPA() const {
return false;
}
virtual bool hasCKGEMM() const {
return false;
}
virtual const at::cuda::NVRTC& nvrtc() const {
TORCH_CHECK(false, "NVRTC requires CUDA. ", CUDA_HELP);
}
bool hasPrimaryContext(DeviceIndex device_index) const override {
TORCH_CHECK(false, "Cannot call hasPrimaryContext(", device_index, ") without ATen_cuda library. ", CUDA_HELP);
}
virtual DeviceIndex current_device() const {
return -1;
}
Allocator* getPinnedMemoryAllocator() const override {
TORCH_CHECK(false, "Pinned memory requires CUDA. ", CUDA_HELP);
}
virtual Allocator* getCUDADeviceAllocator() const {
TORCH_CHECK(false, "CUDADeviceAllocator requires CUDA. ", CUDA_HELP);
}
virtual bool compiledWithCuDNN() const {
return false;
}
virtual bool compiledWithMIOpen() const {
return false;
}
virtual bool supportsDilatedConvolutionWithCuDNN() const {
return false;
}
virtual bool supportsDepthwiseConvolutionWithCuDNN() const {
return false;
}
virtual bool supportsBFloat16ConvolutionWithCuDNNv8() const {
return false;
}
virtual bool supportsBFloat16RNNWithCuDNN() const {
return false;
}
virtual long versionCuDNN() const {
TORCH_CHECK(false, "Cannot query cuDNN version without ATen_cuda library. ", CUDA_HELP);
}
virtual long versionRuntimeCuDNN() const {
TORCH_CHECK(false, "Cannot query cuDNN version without ATen_cuda library. ", CUDA_HELP);
}
virtual long versionCuDNNFrontend() const {
TORCH_CHECK(false, "Cannot query cuDNN Frontend version without ATen_cuda library. ", CUDA_HELP);
}
virtual long versionMIOpen() const {
TORCH_CHECK(false, "Cannot query MIOpen version without ATen_cuda library. ", CUDA_HELP);
}
virtual long versionHipBLASLt() const {
TORCH_CHECK(false, "Cannot query HipBLASLt version without ATen_cuda library. ", CUDA_HELP);
}
virtual long versionCUDART() const {
TORCH_CHECK(false, "Cannot query CUDART version without ATen_cuda library. ", CUDA_HELP);
}
virtual std::string showConfig() const {
TORCH_CHECK(false, "Cannot query detailed CUDA version without ATen_cuda library. ", CUDA_HELP);
}
virtual double batchnormMinEpsilonCuDNN() const {
TORCH_CHECK(false,
"Cannot query batchnormMinEpsilonCuDNN() without ATen_cuda library. ", CUDA_HELP);
}
virtual int64_t cuFFTGetPlanCacheMaxSize(DeviceIndex /*device_index*/) const {
TORCH_CHECK(false, "Cannot access cuFFT plan cache without ATen_cuda library. ", CUDA_HELP);
}
virtual void cuFFTSetPlanCacheMaxSize(DeviceIndex /*device_index*/, int64_t /*max_size*/) const {
TORCH_CHECK(false, "Cannot access cuFFT plan cache without ATen_cuda library. ", CUDA_HELP);
}
virtual int64_t cuFFTGetPlanCacheSize(DeviceIndex /*device_index*/) const {
TORCH_CHECK(false, "Cannot access cuFFT plan cache without ATen_cuda library. ", CUDA_HELP);
}
virtual void cuFFTClearPlanCache(DeviceIndex /*device_index*/) const {
TORCH_CHECK(false, "Cannot access cuFFT plan cache without ATen_cuda library. ", CUDA_HELP);
}
virtual int getNumGPUs() const {
return 0;
}
#ifdef USE_ROCM
virtual bool isGPUArch(const std::vector<std::string>& /*archs*/, DeviceIndex = -1 /*device_index*/) const {
TORCH_CHECK(false, "Cannot check GPU arch without ATen_cuda library. ", CUDA_HELP);
}
virtual const std::vector<std::string>& getHipblasltPreferredArchs() const {
static const std::vector<std::string> empty;
TORCH_CHECK(false, "Cannot get hipBLASLt preferred archs without ATen_cuda library. ", CUDA_HELP);
return empty;
}
virtual const std::vector<std::string>& getHipblasltSupportedArchs() const {
static const std::vector<std::string> empty;
TORCH_CHECK(false, "Cannot get hipBLASLt supported archs without ATen_cuda library. ", CUDA_HELP);
return empty;
}
#endif
virtual void deviceSynchronize(DeviceIndex /*device_index*/) const {
TORCH_CHECK(false, "Cannot synchronize CUDA device without ATen_cuda library. ", CUDA_HELP);
}
};
// NB: dummy argument to suppress "ISO C++11 requires at least one argument
// for the "..." in a variadic macro"
struct TORCH_API CUDAHooksArgs {};
TORCH_DECLARE_REGISTRY(CUDAHooksRegistry, CUDAHooksInterface, CUDAHooksArgs);
#define REGISTER_CUDA_HOOKS(clsname) \
C10_REGISTER_CLASS(CUDAHooksRegistry, clsname, clsname)
namespace detail {
TORCH_API const CUDAHooksInterface& getCUDAHooks();
} // namespace detail
} // namespace at | c | github | https://github.com/pytorch/pytorch | aten/src/ATen/detail/CUDAHooksInterface.h |
"""
Python Interchangeable Virtual Instrument Library
Copyright (c) 2012-2014 Alex Forencich
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
from .agilent7000A import *
class agilentDSO7032A(agilent7000A):
"Agilent InfiniiVision DSO7032A IVI oscilloscope driver"
def __init__(self, *args, **kwargs):
self.__dict__.setdefault('_instrument_id', 'DSO7032A')
super(agilentDSO7032A, self).__init__(*args, **kwargs)
self._analog_channel_count = 2
self._digital_channel_count = 0
self._channel_count = self._analog_channel_count + self._digital_channel_count
self._bandwidth = 350e6
self._init_channels() | unknown | codeparrot/codeparrot-clean | ||
import os
import tempfile
from importlib import import_module
from django import conf
from django.contrib import admin
from django.test import TestCase, override_settings
from django.test.utils import extend_sys_path
from django.utils._os import npath, upath
from django.utils.autoreload import gen_filenames
LOCALE_PATH = os.path.join(os.path.dirname(__file__), 'locale')
class TestFilenameGenerator(TestCase):
def setUp(self):
# Empty cached variables
from django.utils import autoreload
autoreload._cached_modules = set()
autoreload._cached_filenames = []
def test_django_locales(self):
"""
Test that gen_filenames() also yields the built-in django locale files.
"""
filenames = list(gen_filenames())
self.assertIn(os.path.join(os.path.dirname(conf.__file__), 'locale',
'nl', 'LC_MESSAGES', 'django.mo'),
filenames)
@override_settings(LOCALE_PATHS=[LOCALE_PATH])
def test_locale_paths_setting(self):
"""
Test that gen_filenames also yields from LOCALE_PATHS locales.
"""
filenames = list(gen_filenames())
self.assertIn(os.path.join(LOCALE_PATH, 'nl', 'LC_MESSAGES', 'django.mo'),
filenames)
@override_settings(INSTALLED_APPS=[])
def test_project_root_locale(self):
"""
Test that gen_filenames also yields from the current directory (project
root).
"""
old_cwd = os.getcwd()
os.chdir(os.path.dirname(__file__))
try:
filenames = list(gen_filenames())
self.assertIn(
os.path.join(LOCALE_PATH, 'nl', 'LC_MESSAGES', 'django.mo'),
filenames)
finally:
os.chdir(old_cwd)
@override_settings(INSTALLED_APPS=['django.contrib.admin'])
def test_app_locales(self):
"""
Test that gen_filenames also yields from locale dirs in installed apps.
"""
filenames = list(gen_filenames())
self.assertIn(
os.path.join(os.path.dirname(upath(admin.__file__)), 'locale', 'nl', 'LC_MESSAGES', 'django.mo'),
filenames
)
@override_settings(USE_I18N=False)
def test_no_i18n(self):
"""
If i18n machinery is disabled, there is no need for watching the
locale files.
"""
filenames = list(gen_filenames())
self.assertNotIn(
os.path.join(os.path.dirname(upath(conf.__file__)), 'locale', 'nl', 'LC_MESSAGES', 'django.mo'),
filenames
)
def test_only_new_files(self):
"""
When calling a second time gen_filenames with only_new = True, only
files from newly loaded modules should be given.
"""
list(gen_filenames())
from fractions import Fraction # NOQA
filenames2 = list(gen_filenames(only_new=True))
self.assertEqual(len(filenames2), 1)
self.assertTrue(filenames2[0].endswith('fractions.py'))
self.assertFalse(any(f.endswith('.pyc') for f in gen_filenames()))
def test_deleted_removed(self):
dirname = tempfile.mkdtemp()
filename = os.path.join(dirname, 'test_deleted_removed_module.py')
with open(filename, 'w'):
pass
with extend_sys_path(dirname):
import_module('test_deleted_removed_module')
self.assertIn(npath(filename), gen_filenames())
os.unlink(filename)
self.assertNotIn(filename, gen_filenames()) | unknown | codeparrot/codeparrot-clean | ||
#
# Part of p5: A Python package based on Processing
# Copyright (C) 2017-2019 Abhik Pal
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
from numpy.linalg import inv
from dataclasses import dataclass
from sys import stderr
import numpy as np
import math
from p5.pmath import matrix
import builtins
from vispy import gloo
from vispy.gloo import Texture2D, Program
from contextlib import contextmanager
from p5.core.constants import Z_EPSILON
from p5.core.geometry import Geometry
from ..Vispy2DRenderer.shape import PShape
from p5.pmath.matrix import translation_matrix
from ..Vispy2DRenderer.openglrenderer import OpenGLRenderer, get_render_primitives, to_3x3, Style, COLOR_WHITE
from .shaders3d import src_default, src_fbuffer, src_normal, src_phong
from p5.core.material import BasicMaterial, NormalMaterial, BlinnPhongMaterial
class GlslList:
"""List of objects to be used in glsl
"""
def __init__(self, max_size, obj_size, dtype):
"""Initialize GlslList
max_size: The maximum size of the list
obj_size: The length of an individual object
dtype: The data type of this list
"""
list_shape = (max_size, obj_size)
self.data = np.zeros(list_shape, dtype=dtype)
self.size = 0
self.max_size = max_size
def add(self, obj):
if self.size == self.max_size:
print("Too many instances of {} are added. Max size {}.".format(type(obj), self.max_size),
file=stderr)
return
self.data[self.size] = obj
self.size += 1
def clear(self):
self.data = np.zeros_like(self.data)
self.size = 0
@dataclass
class Style3D(Style):
ambient = np.array([0.2] * 3)
diffuse = np.array([0.6] * 3)
specular = np.array([0.8] * 3)
shininess = 8
material = BasicMaterial(COLOR_WHITE)
class Renderer3D(OpenGLRenderer):
def __init__(self):
super().__init__(src_fbuffer, src_default)
self.style = Style3D()
self.normal_prog = Program(src_normal.vert, src_normal.frag)
self.phong_prog = Program(src_phong.vert, src_phong.frag)
self.lookat_matrix = np.identity(4)
# Camera position
self.camera_pos = np.zeros(3)
# Lights
self.MAX_LIGHTS_PER_CATEGORY = 8
self.ambient_light_color = GlslList(
self.MAX_LIGHTS_PER_CATEGORY, 3, np.float32)
self.directional_light_dir = GlslList(
self.MAX_LIGHTS_PER_CATEGORY, 3, np.float32)
self.directional_light_color = GlslList(
self.MAX_LIGHTS_PER_CATEGORY, 3, np.float32)
self.directional_light_specular = GlslList(
self.MAX_LIGHTS_PER_CATEGORY, 3, np.float32)
self.point_light_color = GlslList(
self.MAX_LIGHTS_PER_CATEGORY, 3, np.float32)
self.point_light_pos = GlslList(
self.MAX_LIGHTS_PER_CATEGORY, 3, np.float32)
self.point_light_specular = GlslList(
self.MAX_LIGHTS_PER_CATEGORY, 3, np.float32)
self.const_falloff = GlslList(
self.MAX_LIGHTS_PER_CATEGORY, 1, np.float32)
self.linear_falloff = GlslList(
self.MAX_LIGHTS_PER_CATEGORY, 1, np.float32)
self.quadratic_falloff = GlslList(
self.MAX_LIGHTS_PER_CATEGORY, 1, np.float32)
self.curr_linear_falloff, self.curr_quadratic_falloff, self.curr_constant_falloff = 0.0, 0.0, 0.0
self.light_specular = np.array([0.0] * 3)
def initialize_renderer(self):
super().initialize_renderer()
self.reset_view()
def reset_view(self):
self.viewport = (
0,
0,
int(builtins.width * builtins.pixel_x_density),
int(builtins.height * builtins.pixel_y_density),
)
self.texture_viewport = (
0,
0,
builtins.width,
builtins.height,
)
gloo.set_viewport(*self.viewport) # pylint: disable=no-member
cz = (builtins.height / 2) / math.tan(math.radians(30))
self.projection_matrix = matrix.perspective_matrix(
math.radians(60),
builtins.width / builtins.height,
0.1 * cz,
10 * cz
)
self.transform_matrix = np.identity(4)
self._update_shader_transforms()
self.fbuffer_tex_front = Texture2D(
(builtins.height, builtins.width, 3))
self.fbuffer_tex_back = Texture2D((builtins.height, builtins.width, 3))
self.fbuffer.depth_buffer = gloo.RenderBuffer(
(builtins.height, builtins.width))
for buf in [self.fbuffer_tex_front, self.fbuffer_tex_back]:
self.fbuffer.color_buffer = buf
with self.fbuffer:
self.clear()
def clear(self, color=True, depth=True):
"""Clear the renderer background."""
gloo.set_state(clear_color=self.style.background_color) # pylint: disable=no-member
gloo.clear(color=color, depth=depth) # pylint: disable=no-member
def clear_lights(self):
self.ambient_light_color.clear()
self.directional_light_color.clear()
self.directional_light_dir.clear()
self.directional_light_specular.clear()
self.point_light_color.clear()
self.point_light_pos.clear()
self.point_light_specular.clear()
self.const_falloff.clear()
self.linear_falloff.clear()
self.quadratic_falloff.clear()
def _comm_toggles(self, state=True):
gloo.set_state(blend=state) # pylint: disable=no-member
gloo.set_state(depth_test=state) # pylint: disable=no-member
if state:
gloo.set_state(blend_func=('src_alpha', 'one_minus_src_alpha')) # pylint: disable=no-member
gloo.set_state(depth_func='lequal') # pylint: disable=no-member
def _update_shader_transforms(self):
# Default shader
self.default_prog['projection'] = self.projection_matrix.T.flatten()
self.default_prog['perspective_matrix'] = self.lookat_matrix.T.flatten()
# Normal shader
self.normal_prog['projection'] = self.projection_matrix.T.flatten()
self.normal_prog['perspective'] = self.lookat_matrix.T.flatten()
# This is a no-op, meaning that the normals stay in world space, which
# matches the behavior in p5.js
normal_transform = np.identity(3)
# I think the transformation below takes the vertices to camera space, but
# the results are funky, so it's probably incorrect? - ziyaointl, 2020/07/20
# normal_transform = np.linalg.inv(self.projection_matrix[:3, :3] @ self.lookat_matrix[:3, :3])
self.normal_prog['normal_transform'] = normal_transform.flatten()
# Blinn-Phong Shader
self.phong_prog['projection'] = self.projection_matrix.T.flatten()
self.phong_prog['perspective'] = self.lookat_matrix.T.flatten()
@contextmanager
def draw_loop(self):
"""The main draw loop context manager.
"""
self.transform_matrix = np.identity(4)
self._update_shader_transforms()
self.fbuffer.color_buffer = self.fbuffer_tex_back
with self.fbuffer:
gloo.set_viewport(*self.texture_viewport) # pylint: disable=no-member
self._comm_toggles()
self.fbuffer_prog['texture'] = self.fbuffer_tex_front
self.fbuffer_prog.draw('triangle_strip')
self.clear(color=False, depth=True)
self.clear_lights()
yield
self.flush_geometry()
self.transform_matrix = np.identity(4)
gloo.set_viewport(*self.viewport) # pylint: disable=no-member
self._comm_toggles(False)
self.clear()
self.fbuffer_prog['texture'] = self.fbuffer_tex_back
self.fbuffer_prog.draw('triangle_strip')
self.fbuffer_tex_front, self.fbuffer_tex_back = self.fbuffer_tex_back, self.fbuffer_tex_front
def _add_to_draw_queue_simple(self, stype, vertices, idx, color):
"""Adds shape of stype to draw queue
"""
self.draw_queue.append((stype, (vertices, idx, color, None, None)))
def tnormals(self, shape):
"""Obtain a list of vertex normals in world coordinates
"""
if isinstance(shape.material,
BasicMaterial): # Basic shader doesn't need this
return None
return shape.vertex_normals @ np.linalg.inv(
to_3x3(self.transform_matrix) @ to_3x3(shape.matrix))
def render(self, shape):
if isinstance(shape, Geometry):
n = len(shape.vertices)
# Perform model transform
# TODO: Investigate moving model transform from CPU to the GPU
tverts = self._transform_vertices(
np.hstack([shape.vertices, np.ones((n, 1))]),
shape.matrix,
self.transform_matrix)
tnormals = self.tnormals(shape)
edges = shape.edges
faces = shape.faces
self.add_to_draw_queue(
'poly',
tverts,
edges,
faces,
self.style.fill_color,
self.style.stroke_color,
tnormals,
self.style.material)
elif isinstance(shape, PShape):
fill = shape.fill.normalized if shape.fill else None
stroke = shape.stroke.normalized if shape.stroke else None
obj_list = get_render_primitives(shape)
for obj in obj_list:
stype, vertices, idx = obj
# Transform vertices
vertices = self._transform_vertices(
np.hstack([vertices, np.ones((len(vertices), 1))]),
shape._matrix,
self.transform_matrix)
# Add to draw queue
self._add_to_draw_queue_simple(
stype, vertices, idx, stroke if stype == 'lines' else fill)
def shape(self, vertices, contours, shape_type, *args):
"""Render a PShape"""
self.render(PShape(vertices=vertices, contours=contours, shape_type=shape_type))
def add_to_draw_queue(self, stype, vertices, edges, faces,
fill=None, stroke=None, normals=None, material=None):
"""Add the given vertex data to the draw queue.
:param stype: type of shape to be added. Should be one of {'poly',
'path', 'point'}
:type stype: str
:param vertices: (N, 3) array containing the vertices to be drawn.
:type vertices: np.ndarray
:param edges: (N, 2) array containing edges as tuples of indices
into the vertex array. This can be None when not appropriate
(eg. for points)
:type edges: None | np.ndarray
:param faces: (N, 3) array containing faces as tuples of indices
into the vertex array. For 'point' and 'path' shapes, this can
be None
:type faces: np.ndarray
:param fill: Fill color of the shape as a normalized RGBA tuple.
When set to `None` the shape doesn't get a fill (default: None)
:type fill: None | tuple
:param stroke: Stroke color of the shape as a normalized RGBA
tuple. When set to `None` the shape doesn't get stroke
(default: None)
:type stroke: None | tuple
// TODO: Update documentation
// TODO: Unite style-related attributes for both 2D and 3D under one material class
"""
fill_shape = self.style.fill_enabled and not (fill is None)
stroke_shape = self.style.stroke_enabled and not (stroke is None)
if fill_shape and stype not in ['point', 'path']:
idx = np.array(faces, dtype=np.uint32).ravel()
self.draw_queue.append(
["triangles", (vertices, idx, fill, normals, material)])
if stroke_shape:
if stype == 'point':
idx = np.arange(0, len(vertices), dtype=np.uint32)
self.draw_queue.append(
["points", (vertices, idx, stroke, normals, material)])
else:
idx = np.array(edges, dtype=np.uint32).ravel()
self.draw_queue.append(
["lines", (vertices, idx, stroke, normals, material)])
def render_with_shaders(self, draw_type, draw_obj):
vertices, idx, color, normals, material = draw_obj
"""Like render_default but is aware of shaders other than the basic one"""
# 0. If material does not need normals nor extra info, strip them out
# and use the method from superclass
if material is None or isinstance(material, BasicMaterial) or draw_type in [
'points', 'lines']:
OpenGLRenderer.render_default(self, draw_type, [draw_obj[:3]])
return
# 1. Get the number of vertices
num_vertices = len(vertices)
# 2. Create empty buffers based on the number of vertices.
#
data = np.zeros(num_vertices,
dtype=[('position', np.float32, 3),
('normal', np.float32, 3)])
# 3. Loop through all the shapes in the geometry queue adding
# it's information to the buffer.
#
draw_indices = []
data['position'][0:num_vertices, ] = np.array(vertices)
draw_indices.append(idx)
data['normal'][0:num_vertices, ] = np.array(normals)
self.vertex_buffer.set_data(data)
self.index_buffer.set_data(np.hstack(draw_indices))
if isinstance(material, NormalMaterial):
# 4. Bind the buffer to the shader.
#
self.normal_prog.bind(self.vertex_buffer)
# 5. Draw the shape using the proper shape type and get rid of
# the buffers.
#
self.normal_prog.draw(draw_type, indices=self.index_buffer)
elif isinstance(material, BlinnPhongMaterial):
self.phong_prog.bind(self.vertex_buffer)
self.phong_prog['u_cam_pos'] = self.camera_pos
# Material attributes
self.phong_prog['u_ambient_color'] = material.ambient
self.phong_prog['u_diffuse_color'] = material.diffuse
self.phong_prog['u_specular_color'] = material.specular
self.phong_prog['u_shininess'] = material.shininess
# Directional lights
self.phong_prog['u_directional_light_count'] = self.directional_light_color.size
self.phong_prog['u_directional_light_dir'] = self.directional_light_dir.data
self.phong_prog['u_directional_light_color'] = self.directional_light_color.data
self.phong_prog['u_directional_light_specular'] = self.directional_light_specular.data
# Ambient lights
self.phong_prog['u_ambient_light_count'] = self.ambient_light_color.size
self.phong_prog['u_ambient_light_color'] = self.ambient_light_color.data
# Point lights
self.phong_prog['u_point_light_count'] = self.point_light_color.size
self.phong_prog['u_point_light_color'] = self.point_light_color.data
self.phong_prog['u_point_light_pos'] = self.point_light_pos.data
self.phong_prog['u_point_light_specular'] = self.point_light_specular.data
# Point light falloffs
self.phong_prog['u_const_falloff'] = self.const_falloff.data
self.phong_prog['u_linear_falloff'] = self.linear_falloff.data
self.phong_prog['u_quadratic_falloff'] = self.quadratic_falloff.data
# Draw
self.phong_prog.draw(draw_type, indices=self.index_buffer)
else:
raise NotImplementedError("Material not implemented")
def flush_geometry(self):
"""Flush all the shape geometry from the draw queue to the GPU.
"""
for index, shape in enumerate(self.draw_queue):
current_shape, current_obj = self.draw_queue[index][0], self.draw_queue[index][1]
# If current_shape is lines, bring it to the front by epsilon
# to resolve z-fighting
if current_shape == 'lines':
# line_transform is used whenever we render lines to break ties in depth
# We transform the points to camera space, move them by
# Z_EPSILON, and them move them back to world space
line_transform = inv(
self.lookat_matrix).dot(
translation_matrix(
0,
0,
Z_EPSILON).dot(
self.lookat_matrix))
vertices = current_obj[0]
current_obj = (np.hstack([vertices, np.ones((vertices.shape[0], 1))]).dot(line_transform.T)[:, :3],
*current_obj[1:])
self.render_with_shaders(current_shape, current_obj)
self.draw_queue = []
def cleanup(self):
super(Renderer3D, self).cleanup()
self.normal_prog.delete()
self.phong_prog.delete()
def add_ambient_light(self, r, g, b):
self.ambient_light_color.add(np.array((r, g, b)))
def add_directional_light(self, r, g, b, x, y, z):
self.directional_light_color.add(np.array((r, g, b)))
self.directional_light_dir.add(np.array((x, y, z)))
self.directional_light_specular.add(self.light_specular)
def add_point_light(self, r, g, b, x, y, z):
self.point_light_color.add(np.array((r, g, b)))
self.point_light_pos.add(np.array((x, y, z)))
self.point_light_specular.add(self.light_specular)
self.const_falloff.add(self.curr_constant_falloff)
self.linear_falloff.add(self.curr_linear_falloff)
self.quadratic_falloff.add(self.curr_quadratic_falloff) | unknown | codeparrot/codeparrot-clean | ||
########################################################################
#
# File Name: HTMLCollection.py
#
#
"""
WWW: http://4suite.com/4DOM e-mail: support@4suite.com
Copyright (c) 2000 Fourthought Inc, USA. All Rights Reserved.
See http://4suite.com/COPYRIGHT for license and copyright information
"""
from pyxml.dom import Node
from pyxml.dom import NoModificationAllowedErr
from pyxml.dom.html import HTML_NAME_ALLOWED
import UserList
class HTMLCollection(UserList.UserList):
def __init__(self, list=None):
UserList.UserList.__init__(self, list or [])
### Attribute Access Methods ###
def __getattr__(self, name):
if name == 'length':
return self._get_length()
# Pass-through
return getattr(HTMLCollection, name)
def __setattr__(self, name, value):
if name == 'length':
self._set_length(value)
# Pass-through
self.__dict__[name] = value
### Attribute Methods ###
def _get_length(self):
return self.__len__()
def _set_length(self, value):
raise NoModificationAllowedErr()
### Methods ###
def item(self, index):
if index >= self.__len__():
return None
else:
return self[int(index)]
def namedItem(self, name):
found_node = None
for node in self:
# IDs take presedence over NAMEs
if node.getAttribute('ID') == name:
found_node = node
break
if not found_node and node.getAttribute('NAME') == name \
and node.tagName in HTML_NAME_ALLOWED:
# We found a node with NAME attribute, but we have to wait
# until all nodes are done (one might have an ID that matches)
found_node = node
print 'found:', found_node
return found_node
### Overridden Methods ###
def __repr__(self):
st = "<HTMLCollection at %x: [" % (id(self))
if len(self):
for i in self[:-1]:
st = st + repr(i) + ', '
st = st + repr(self[-1])
st = st + ']>'
return st | unknown | codeparrot/codeparrot-clean | ||
# -*- coding: utf-8 -*-
"""
pythoncompat
"""
from .packages import chardet
import sys
# -------
# Pythons
# -------
# Syntax sugar.
_ver = sys.version_info
#: Python 2.x?
is_py2 = (_ver[0] == 2)
#: Python 3.x?
is_py3 = (_ver[0] == 3)
#: Python 3.0.x
is_py30 = (is_py3 and _ver[1] == 0)
#: Python 3.1.x
is_py31 = (is_py3 and _ver[1] == 1)
#: Python 3.2.x
is_py32 = (is_py3 and _ver[1] == 2)
#: Python 3.3.x
is_py33 = (is_py3 and _ver[1] == 3)
#: Python 3.4.x
is_py34 = (is_py3 and _ver[1] == 4)
#: Python 2.7.x
is_py27 = (is_py2 and _ver[1] == 7)
#: Python 2.6.x
is_py26 = (is_py2 and _ver[1] == 6)
#: Python 2.5.x
is_py25 = (is_py2 and _ver[1] == 5)
#: Python 2.4.x
is_py24 = (is_py2 and _ver[1] == 4) # I'm assuming this is not by choice.
# ---------
# Platforms
# ---------
# Syntax sugar.
_ver = sys.version.lower()
is_pypy = ('pypy' in _ver)
is_jython = ('jython' in _ver)
is_ironpython = ('iron' in _ver)
# Assume CPython, if nothing else.
is_cpython = not any((is_pypy, is_jython, is_ironpython))
# Windows-based system.
is_windows = 'win32' in str(sys.platform).lower()
# Standard Linux 2+ system.
is_linux = ('linux' in str(sys.platform).lower())
is_osx = ('darwin' in str(sys.platform).lower())
is_hpux = ('hpux' in str(sys.platform).lower()) # Complete guess.
is_solaris = ('solar==' in str(sys.platform).lower()) # Complete guess.
try:
import simplejson as json
except ImportError:
import json
# ---------
# Specifics
# ---------
if is_py2:
from urllib import quote, unquote, quote_plus, unquote_plus, urlencode, getproxies, proxy_bypass
from urlparse import urlparse, urlunparse, urljoin, urlsplit, urldefrag
from urllib2 import parse_http_list
import cookielib
from Cookie import Morsel
from StringIO import StringIO
from .packages.urllib3.packages.ordered_dict import OrderedDict
from httplib import IncompleteRead
builtin_str = str
bytes = str
str = unicode
basestring = basestring
numeric_types = (int, long, float)
elif is_py3:
from urllib.parse import urlparse, urlunparse, urljoin, urlsplit, urlencode, quote, unquote, quote_plus, unquote_plus, urldefrag
from urllib.request import parse_http_list, getproxies, proxy_bypass
from http import cookiejar as cookielib
from http.cookies import Morsel
from io import StringIO
from collections import OrderedDict
from http.client import IncompleteRead
builtin_str = str
str = str
bytes = bytes
basestring = (str, bytes)
numeric_types = (int, float) | unknown | codeparrot/codeparrot-clean | ||
#!/usr/bin/env python
# coding: latin-1
# Imagem Cinemática is a free software intended to be used as a tool for teachers
# and students. It utilizes Computer Vision techniques to extract the trajectory
# of moving objects from video data.
#
# The code contained in this project follows the Google Python Style Guide
# Revision 2.59.
# The specifics can be found at http://google.github.io/styleguide/pyguide.html
#
#
# Copyright (C) 2016 Bruno Abude Cardoso
#
# Imagem Cinemática is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Imagem Cinemática is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""Module responsible for loading the main components of the application.
"""
import sys
import logging
import os
from PyQt4.QtGui import QApplication
from PyQt4.QtCore import Qt
from ic import log as ic_log
LOG = None
def create_logger():
"""Set up the logging utility, using the formatter that best matches the OS.
"""
global LOG
handler = logging.StreamHandler()
root_logger = logging.getLogger()
if sys.platform == "linux2":
handler.setFormatter(ic_log.ANSIFormatter())
else:
handler.setFormatter(ic_log.ColorlessFormatter())
root_logger.addHandler(handler)
handler.setLevel(logging.DEBUG)
root_logger.setLevel(logging.NOTSET)
# Filter out the annoying PyQt4 logging messages
handler.addFilter(ic_log.NameFilter("PyQt4"))
LOG = logging.getLogger(__name__)
def main():
from gui import application
from gui import main_window
from ic import engine
from ic import plugin
from ic import settings
from ic import messages
settings.change("app_path", sys.path[0])
app = application.Application(sys.argv)
messages.start_timer()
mainwindow = main_window.MainWindow()
desktop = QApplication.desktop().screen()
mainwindow.show()
mainwindow.move(mainwindow.frameGeometry().left()-mainwindow.geometry().left(), 0)
mainwindow.resize(desktop.frameGeometry().width(), 150)
bordas = mainwindow.frameGeometry().width() - mainwindow.geometry().width()
mainwindow.filter_rack_window.move(0, mainwindow.frameGeometry().bottom())
app.exec_()
if __name__ == "__main__":
create_logger()
main() | unknown | codeparrot/codeparrot-clean | ||
#!/usr/bin/env python
# Copyright (C) 2006-2013 Music Technology Group - Universitat Pompeu Fabra
#
# This file is part of Essentia
#
# Essentia is free software: you can redistribute it and/or modify it under
# the terms of the GNU Affero General Public License as published by the Free
# Software Foundation (FSF), either version 3 of the License, or (at your
# option) any later version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the Affero GNU General Public License
# version 3 along with this program. If not, see http://www.gnu.org/licenses/
from numpy import array
from essentia_test import *
class TestSBic(TestCase):
def atestNotEnoughFrames(self):
self.assertComputeFails( SBic(), array([[]]) )
self.assertComputeFails( SBic(), array([[1]]) )
self.assertComputeFails( SBic(), array([[1], [1], [1]]) )
def atestOneFeature(self):
features = array([[0, 1, 2, 3, 4]])
self.assertEqualVector(SBic(minLength=1)(features), [0, len(features[0])-1])
# the following test is commented, as it fails due to rounding errors.
# to solve this problem a quick solution would be to use a threshold higher
# than 1e-10 (i.e. log10 = diag_cov > 1e-6 ? logd : -6
def atestConstantFeature(self):
features = array([ [10]*1000 ])
self.assertEqualVector(SBic()(features), [0, len(features[0])-1])
def atestTwoSegments(self):
# Half 1s and half 0s
# [ [ 1, ..., 1, 0, ..., 0],
# [ 1, ..., 1, 0, ..., 0] ]
features = array( [ [1]*200 + [0]*200 ] +
[ [1]*200 + [0]*200 ])
segments = SBic()(features)
self.assertAlmostEqualVector(segments, [0, 199, 399], .2)
# The following test is commented because for some reason reducing the
# increment parameters create a lot of false positives (incorrect
# segmentation points). This is probably due to the fact that the BIC is
# trying to overfit the given data.
def atestSmallIncs(self):
# Half 1s and half 0s
# [ [ 1, ..., 1, 0, ..., 0],
# [ 1, ..., 1, 0, ..., 0] ]
# This test causes duplicates in the segmentation array, and these
# duplicates caused a crash due to empty subarrays being created
# (because from one segment to the next is zero length, because they
# are the same position (sorry if that didn't make any sense)).
features = array( [ [1]*200 + [0]*200 ] +
[ [1]*200 + [0]*200 ])
segments = SBic(inc1=4, inc2=2)(features)
self.assertAlmostEqualVector(segments, [0, 199, 399], .1)
def atestSmallMinLength(self):
features = array( [ [1]*200 + [0]*200 ] +
[ [1]*200 + [0]*200 ])
segments = SBic(minLength=1)(features)
self.assertAlmostEqualVector(segments, [0, 199, 399], .2)
def atestLargeMinLength(self):
loader = MonoLoader(filename = join(testdata.audio_dir, 'recorded',
'Vivaldi_Sonata_5_II_Allegro.wav'),
downmix='left', sampleRate=441000)
if sys.platform == 'win32' and getattr(loader, 'hasDoubleCompute', False):
print 'WARNING: skipping this test as Windows seems to do weird things with memory...'
return
audio = loader()
w = Windowing(type='blackmanharris62', size=2048)
s = Spectrum(size=2048)
m = MFCC(highFrequencyBound=8000)
features = []
for frame in FrameGenerator(audio, frameSize=2048, hopSize=1024):
if isSilent(frame):
continue
(_,mfcc) = m(s(w(frame)))
features.append(mfcc)
# compute transpose of features array
features_transpose = []
for i in range(len(features[0])):
featureVals = []
for f in features:
featureVals.append(f[i])
features_transpose.append(featureVals)
features_transpose = array(features_transpose)
nFrames = len(features)
segments = SBic(minLength=nFrames*2, cpw=1.5, size1=1000,
inc1=300, size2=600, inc2=50)(features_transpose)
# since the minLength is so high, the entire audio signal should be
# considered as one segment
expected = [0, nFrames-1]
self.assertEqualVector(segments, expected)
def testRegression(self):
audio = MonoLoader(filename = join(testdata.audio_dir, 'recorded',\
'Vivaldi_Sonata_5_II_Allegro.wav'),
downmix='left', sampleRate=44100)()
w = Windowing(type='blackmanharris62', size=2048)
s = Spectrum(size=2048)
m = MFCC(highFrequencyBound=8000)
features = []
for frame in FrameGenerator(audio, frameSize=2048, hopSize=1024):
(_,mfcc) = m(s(w(frame)))
features.append(mfcc)
# compute transpose of features array
features_transpose = []
for i in range(len(features[0])):
featureVals = []
for f in features:
featureVals.append(f[i])
features_transpose.append(featureVals)
features_transpose = array(features_transpose)
segments = SBic(cpw=1.5, size1=1000, inc1=300, size2=600, inc2=50)(features_transpose)
expected = [0., 49., 997., 1296., 1845., 2994., 3943., 4196.]
self.assertEqualVector(segments, expected)
def atestMinLengthEqualToAudioFrames(self):
audio = MonoLoader(filename = join(testdata.audio_dir, 'recorded',\
'britney.wav'),
downmix='left', sampleRate=441000)()
w = Windowing(type='blackmanharris62', size=2048)
s = Spectrum(size=2048)
m = MFCC(highFrequencyBound=8000)
features = []
for frame in FrameGenerator(audio, frameSize=2048, hopSize=1024):
if isSilent(frame):
continue
(_,mfcc) = m(s(w(frame)))
features.append(mfcc)
# compute transpose of features array
features_transpose = []
for i in range(len(features[0])):
featureVals = []
for f in features:
featureVals.append(f[i])
features_transpose.append(featureVals)
bands, nFrames = numpy.shape(features_transpose)
features_transpose = array(features_transpose)
sbic = SBic(cpw=1.5, size1=1000, inc1=300, size2=600, inc2=50, minLength=nFrames)
segments = sbic(features_transpose)
expected = [0., nFrames-1]
self.assertEqualVector(segments, expected)
def atestMinLengthLargerThanAudioFrames(self):
audio = MonoLoader(filename = join(testdata.audio_dir, 'recorded',\
'britney.wav'),
downmix='left', sampleRate=441000)()
w = Windowing(type='blackmanharris62', size=2048)
s = Spectrum(size=2048)
m = MFCC(highFrequencyBound=8000)
features = []
for frame in FrameGenerator(audio, frameSize=2048, hopSize=1024):
if isSilent(frame):
continue
(_,mfcc) = m(s(w(frame)))
features.append(mfcc)
# compute transpose of features array
features_transpose = []
for i in range(len(features[0])):
featureVals = []
for f in features:
featureVals.append(f[i])
features_transpose.append(featureVals)
bands, nFrames = numpy.shape(features_transpose)
features_transpose = array(features_transpose)
sbic = SBic(cpw=1.5, size1=1000, inc1=300, size2=600, inc2=50, minLength=nFrames+2)
segments = sbic(features_transpose)
expected = [0., nFrames-1]
self.assertEqualVector(segments, expected)
def atestSize2LargerThanSize1(self):
# Half 1s and half 0s
# [ [ 1, ..., 1, 0, ..., 0],
# [ 1, ..., 1, 0, ..., 0] ]
from numpy.random import normal
features = zeros([2, 400])
for i in range(200):
features[0][i] = normal()
features[1][i] = normal()
segments = SBic(size1=25, size2=50)(features)
self.assertAlmostEqualVector(segments, [0, 199, 399], .15)
suite = allTests(TestSBic)
if __name__ == '__main__':
TextTestRunner(verbosity=2).run(suite) | unknown | codeparrot/codeparrot-clean | ||
import numpy as np
import pytest
from pandas._libs import lib
import pandas as pd
from pandas import (
Index,
MultiIndex,
)
import pandas._testing as tm
@pytest.mark.parametrize(
"input_index, input_columns, input_values, "
"expected_values, expected_columns, expected_index",
[
(
["lev4"],
"lev3",
"values",
[
[0.0, np.nan],
[np.nan, 1.0],
[2.0, np.nan],
[np.nan, 3.0],
[4.0, np.nan],
[np.nan, 5.0],
[6.0, np.nan],
[np.nan, 7.0],
],
Index([1, 2], name="lev3"),
Index([1, 2, 3, 4, 5, 6, 7, 8], name="lev4"),
),
(
["lev4"],
"lev3",
lib.no_default,
[
[1.0, np.nan, 1.0, np.nan, 0.0, np.nan],
[np.nan, 1.0, np.nan, 1.0, np.nan, 1.0],
[1.0, np.nan, 2.0, np.nan, 2.0, np.nan],
[np.nan, 1.0, np.nan, 2.0, np.nan, 3.0],
[2.0, np.nan, 1.0, np.nan, 4.0, np.nan],
[np.nan, 2.0, np.nan, 1.0, np.nan, 5.0],
[2.0, np.nan, 2.0, np.nan, 6.0, np.nan],
[np.nan, 2.0, np.nan, 2.0, np.nan, 7.0],
],
MultiIndex.from_tuples(
[
("lev1", 1),
("lev1", 2),
("lev2", 1),
("lev2", 2),
("values", 1),
("values", 2),
],
names=[None, "lev3"],
),
Index([1, 2, 3, 4, 5, 6, 7, 8], name="lev4"),
),
(
["lev1", "lev2"],
"lev3",
"values",
[[0, 1], [2, 3], [4, 5], [6, 7]],
Index([1, 2], name="lev3"),
MultiIndex.from_tuples(
[(1, 1), (1, 2), (2, 1), (2, 2)], names=["lev1", "lev2"]
),
),
(
["lev1", "lev2"],
"lev3",
lib.no_default,
[[1, 2, 0, 1], [3, 4, 2, 3], [5, 6, 4, 5], [7, 8, 6, 7]],
MultiIndex.from_tuples(
[("lev4", 1), ("lev4", 2), ("values", 1), ("values", 2)],
names=[None, "lev3"],
),
MultiIndex.from_tuples(
[(1, 1), (1, 2), (2, 1), (2, 2)], names=["lev1", "lev2"]
),
),
],
)
def test_pivot_list_like_index(
input_index,
input_columns,
input_values,
expected_values,
expected_columns,
expected_index,
):
# GH 21425, test when index is given a list
df = pd.DataFrame(
{
"lev1": [1, 1, 1, 1, 2, 2, 2, 2],
"lev2": [1, 1, 2, 2, 1, 1, 2, 2],
"lev3": [1, 2, 1, 2, 1, 2, 1, 2],
"lev4": [1, 2, 3, 4, 5, 6, 7, 8],
"values": [0, 1, 2, 3, 4, 5, 6, 7],
}
)
result = df.pivot(index=input_index, columns=input_columns, values=input_values)
expected = pd.DataFrame(
expected_values, columns=expected_columns, index=expected_index
)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"input_index, input_columns, input_values, "
"expected_values, expected_columns, expected_index",
[
(
"lev4",
["lev3"],
"values",
[
[0.0, np.nan],
[np.nan, 1.0],
[2.0, np.nan],
[np.nan, 3.0],
[4.0, np.nan],
[np.nan, 5.0],
[6.0, np.nan],
[np.nan, 7.0],
],
Index([1, 2], name="lev3"),
Index([1, 2, 3, 4, 5, 6, 7, 8], name="lev4"),
),
(
["lev1", "lev2"],
["lev3"],
"values",
[[0, 1], [2, 3], [4, 5], [6, 7]],
Index([1, 2], name="lev3"),
MultiIndex.from_tuples(
[(1, 1), (1, 2), (2, 1), (2, 2)], names=["lev1", "lev2"]
),
),
(
["lev1"],
["lev2", "lev3"],
"values",
[[0, 1, 2, 3], [4, 5, 6, 7]],
MultiIndex.from_tuples(
[(1, 1), (1, 2), (2, 1), (2, 2)], names=["lev2", "lev3"]
),
Index([1, 2], name="lev1"),
),
(
["lev1", "lev2"],
["lev3", "lev4"],
"values",
[
[0.0, 1.0, np.nan, np.nan, np.nan, np.nan, np.nan, np.nan],
[np.nan, np.nan, 2.0, 3.0, np.nan, np.nan, np.nan, np.nan],
[np.nan, np.nan, np.nan, np.nan, 4.0, 5.0, np.nan, np.nan],
[np.nan, np.nan, np.nan, np.nan, np.nan, np.nan, 6.0, 7.0],
],
MultiIndex.from_tuples(
[(1, 1), (2, 2), (1, 3), (2, 4), (1, 5), (2, 6), (1, 7), (2, 8)],
names=["lev3", "lev4"],
),
MultiIndex.from_tuples(
[(1, 1), (1, 2), (2, 1), (2, 2)], names=["lev1", "lev2"]
),
),
],
)
def test_pivot_list_like_columns(
input_index,
input_columns,
input_values,
expected_values,
expected_columns,
expected_index,
):
# GH 21425, test when columns is given a list
df = pd.DataFrame(
{
"lev1": [1, 1, 1, 1, 2, 2, 2, 2],
"lev2": [1, 1, 2, 2, 1, 1, 2, 2],
"lev3": [1, 2, 1, 2, 1, 2, 1, 2],
"lev4": [1, 2, 3, 4, 5, 6, 7, 8],
"values": [0, 1, 2, 3, 4, 5, 6, 7],
}
)
result = df.pivot(index=input_index, columns=input_columns, values=input_values)
expected = pd.DataFrame(
expected_values, columns=expected_columns, index=expected_index
)
tm.assert_frame_equal(result, expected)
def test_pivot_multiindexed_rows_and_cols():
# GH 36360
df = pd.DataFrame(
data=np.arange(12).reshape(4, 3),
columns=MultiIndex.from_tuples(
[(0, 0), (0, 1), (0, 2)], names=["col_L0", "col_L1"]
),
index=MultiIndex.from_tuples(
[(0, 0, 0), (0, 0, 1), (1, 1, 1), (1, 0, 0)],
names=["idx_L0", "idx_L1", "idx_L2"],
),
)
res = df.pivot_table(
index=["idx_L0"],
columns=["idx_L1"],
values=[(0, 1)],
aggfunc=lambda col: col.values.sum(),
)
expected = pd.DataFrame(
data=[[5, np.nan], [10, 7.0]],
columns=MultiIndex.from_tuples(
[(0, 1, 0), (0, 1, 1)], names=["col_L0", "col_L1", "idx_L1"]
),
index=Index([0, 1], dtype="int64", name="idx_L0"),
)
expected = expected.astype("float64")
tm.assert_frame_equal(res, expected)
def test_pivot_df_multiindex_index_none():
# GH 23955
df = pd.DataFrame(
[
["A", "A1", "label1", 1],
["A", "A2", "label2", 2],
["B", "A1", "label1", 3],
["B", "A2", "label2", 4],
],
columns=["index_1", "index_2", "label", "value"],
)
df = df.set_index(["index_1", "index_2"])
result = df.pivot(columns="label", values="value")
expected = pd.DataFrame(
[[1.0, np.nan], [np.nan, 2.0], [3.0, np.nan], [np.nan, 4.0]],
index=df.index,
columns=Index(["label1", "label2"], name="label"),
)
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"index, columns, e_data, e_index, e_cols",
[
(
"index",
["col", "value"],
[
[50.0, np.nan, 100.0, np.nan],
[np.nan, 100.0, np.nan, 200.0],
],
Index(data=["A", "B"], name="index"),
MultiIndex.from_arrays(
arrays=[[1, 1, 2, 2], [50, 100, 100, 200]], names=["col", "value"]
),
),
(
["index", "value"],
"col",
[
[50.0, np.nan],
[np.nan, 100.0],
[100.0, np.nan],
[np.nan, 200.0],
],
MultiIndex.from_arrays(
arrays=[["A", "A", "B", "B"], [50, 100, 100, 200]],
names=["index", "value"],
),
Index(data=[1, 2], name="col"),
),
],
ids=["values-and-columns", "values-and-index"],
)
def test_pivot_table_multiindex_values_as_two_params(
index, columns, e_data, e_index, e_cols
):
# GH#61292
data = [
["A", 1, 50, -1],
["B", 1, 100, -2],
["A", 2, 100, -2],
["B", 2, 200, -4],
]
df = pd.DataFrame(data=data, columns=["index", "col", "value", "extra"])
result = df.pivot_table(values="value", index=index, columns=columns)
expected = pd.DataFrame(data=e_data, index=e_index, columns=e_cols)
tm.assert_frame_equal(result, expected) | python | github | https://github.com/pandas-dev/pandas | pandas/tests/reshape/test_pivot_multilevel.py |
from south.db import db
from django.db import models
from ella.positions.models import *
class Migration:
depends_on = (
("core", "0001_initial"),
)
def forwards(self, orm):
# Adding model 'Position'
db.create_table('positions_position', (
('id', models.AutoField(primary_key=True)),
('category', models.ForeignKey(orm['core.Category'], verbose_name=_('Category'))),
('name', models.CharField(_('Name'), max_length=200)),
('target_ct', models.ForeignKey(orm['contenttypes.ContentType'], null=True, verbose_name=_('Target content type'), blank=True)),
('target_id', models.PositiveIntegerField(_('Target id'), null=True, blank=True)),
('active_from', models.DateTimeField(_('Position active from'), null=True, blank=True)),
('active_till', models.DateTimeField(_('Position active till'), null=True, blank=True)),
('box_type', models.CharField(_('Box type'), max_length=200, blank=True)),
('text', models.TextField(_('Definition'), blank=True)),
('disabled', models.BooleanField(_('Disabled'), default=False)),
))
db.send_create_signal('positions', ['Position'])
def backwards(self, orm):
# Deleting model 'Position'
db.delete_table('positions_position')
models = {
'core.category': {
'Meta': {'ordering': "('site','tree_path',)", 'unique_together': "(('site','tree_path'),)"},
'_stub': True,
'id': ('models.AutoField', [], {'primary_key': 'True'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label','model'),)", 'db_table': "'django_content_type'"},
'_stub': True,
'id': ('models.AutoField', [], {'primary_key': 'True'})
},
'positions.position': {
'active_from': ('models.DateTimeField', ["_('Position active from')"], {'null': 'True', 'blank': 'True'}),
'active_till': ('models.DateTimeField', ["_('Position active till')"], {'null': 'True', 'blank': 'True'}),
'box_type': ('models.CharField', ["_('Box type')"], {'max_length': '200', 'blank': 'True'}),
'category': ('models.ForeignKey', ["orm['core.Category']"], {'verbose_name': "_('Category')"}),
'disabled': ('models.BooleanField', ["_('Disabled')"], {'default': 'False'}),
'id': ('models.AutoField', [], {'primary_key': 'True'}),
'name': ('models.CharField', ["_('Name')"], {'max_length': '200'}),
'target_ct': ('models.ForeignKey', ["orm['contenttypes.ContentType']"], {'null': 'True', 'verbose_name': "_('Target content type')", 'blank': 'True'}),
'target_id': ('models.PositiveIntegerField', ["_('Target id')"], {'null': 'True', 'blank': 'True'}),
'text': ('models.TextField', ["_('Definition')"], {'blank': 'True'})
}
}
complete_apps = ['positions'] | unknown | codeparrot/codeparrot-clean | ||
/*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.kafka.connect.data;
import org.apache.kafka.connect.errors.DataException;
import java.math.BigDecimal;
import java.math.BigInteger;
/**
* <p>
* An arbitrary-precision signed decimal number. The value is unscaled * 10 ^ -scale where:
* <ul>
* <li>unscaled is an integer </li>
* <li>scale is an integer representing how many digits the decimal point should be shifted on the unscaled value</li>
* </ul>
* </p>
* <p>
* Decimal does not provide a fixed schema because it is parameterized by the scale, which is fixed on the schema
* rather than being part of the value.
* </p>
* <p>
* The underlying representation of this type is bytes containing a two's complement integer
* </p>
*/
public class Decimal {
public static final String LOGICAL_NAME = "org.apache.kafka.connect.data.Decimal";
public static final String SCALE_FIELD = "scale";
/**
* Returns a SchemaBuilder for a Decimal with the given scale factor. By returning a SchemaBuilder you can override
* additional schema settings such as required/optional, default value, and documentation.
* @param scale the scale factor to apply to unscaled values
* @return a SchemaBuilder
*/
public static SchemaBuilder builder(int scale) {
return SchemaBuilder.bytes()
.name(LOGICAL_NAME)
.parameter(SCALE_FIELD, Integer.toString(scale))
.version(1);
}
public static Schema schema(int scale) {
return builder(scale).build();
}
/**
* Convert a value from its logical format ({@link BigDecimal}) to its encoded format (byte[]).
* @param value the logical value
* @return the encoded value
*/
public static byte[] fromLogical(Schema schema, BigDecimal value) {
int schemaScale = scale(schema);
if (value.scale() != schemaScale)
throw new DataException(String.format(
"Decimal value has mismatching scale for given Decimal schema. "
+ "Schema has scale %d, value has scale %d.",
schemaScale,
value.scale()
));
return value.unscaledValue().toByteArray();
}
/**
* Convert a value from its encoded format (byte[]) to its logical format ({@link BigDecimal}).
* @param value the encoded value
* @return the logical value
*/
public static BigDecimal toLogical(Schema schema, byte[] value) {
return new BigDecimal(new BigInteger(value), scale(schema));
}
private static int scale(Schema schema) {
String scaleString = schema.parameters().get(SCALE_FIELD);
if (scaleString == null)
throw new DataException("Invalid Decimal schema: scale parameter not found.");
try {
return Integer.parseInt(scaleString);
} catch (NumberFormatException e) {
throw new DataException("Invalid scale parameter found in Decimal schema: ", e);
}
}
} | java | github | https://github.com/apache/kafka | connect/api/src/main/java/org/apache/kafka/connect/data/Decimal.java |
# -*- coding: utf-8 -*-
import re
from module.plugins.internal.MultiHook import MultiHook
class LinkdecrypterComHook(MultiHook):
__name__ = "LinkdecrypterComHook"
__type__ = "hook"
__version__ = "1.07"
__status__ = "testing"
__config__ = [("activated" , "bool" , "Activated" , True ),
("pluginmode" , "all;listed;unlisted", "Use for plugins" , "all"),
("pluginlist" , "str" , "Plugin list (comma separated)", "" ),
("reload" , "bool" , "Reload plugin list" , True ),
("reloadinterval", "int" , "Reload interval in hours" , 12 )]
__description__ = """Linkdecrypter.com hook plugin"""
__license__ = "GPLv3"
__authors__ = [("Walter Purcaro", "vuolter@gmail.com")]
def get_hosters(self):
list = re.search(r'>Supported\(\d+\)</b>: <i>(.[\w.\-, ]+)',
self.load("http://linkdecrypter.com/").replace("(g)", "")).group(1).split(', ')
try:
list.remove("download.serienjunkies.org")
except ValueError:
pass
return list | unknown | codeparrot/codeparrot-clean | ||
import os
import sys
execfile('/etc/courtlistener')
sys.path.append(INSTALL_ROOT)
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "settings")
from alert.search.models import Document, Citation
from alert.lib.db_tools import queryset_generator
from alert.lib.string_utils import clean_string
from alert.lib.string_utils import harmonize
from alert.lib.string_utils import titlecase
from optparse import OptionParser
def link_fixer(link):
"""Fixes the errors in a link
Orig: http://bulk.resource.org/courts.gov/c/US/819/996.F2d.311.html
Fixed: http://bulk.resource.org/courts.gov/c/F2/996/996.F2d.311.html
"""
# Very crude and lazy replacement of US with F2
link_parts = link.split('US')
fixed = 'F2'.join(link_parts)
# Fixes the number
link_parts = fixed.split('/')
number = int(link_parts[-2]) + 177
fixed = '/'.join(link_parts[0:-2]) + "/" + str(number) + "/" + str(link_parts[-1])
return fixed
def cleaner(simulate=False, verbose=False):
docs = queryset_generator(Document.objects.filter(source = 'R', time_retrieved__gt = '2011-06-01'))
for doc in docs:
original_link = doc.download_url
fixed = link_fixer(original_link)
doc.download_url = fixed
if verbose:
print "Changing: " + original_link
print " to: " + fixed
if not simulate:
doc.save()
def main():
usage = "usage: %prog [--verbose] [---simulate]"
parser = OptionParser(usage)
parser.add_option('-v', '--verbose', action="store_true", dest='verbose',
default=False, help="Display log during execution")
parser.add_option('-s', '--simulate', action="store_true",
dest='simulate', default=False, help="Simulate the corrections without " + \
"actually making them.")
(options, args) = parser.parse_args()
verbose = options.verbose
simulate = options.simulate
if simulate:
print "*******************************************"
print "* SIMULATE MODE - NO CHANGES WILL BE MADE *"
print "*******************************************"
return cleaner(simulate, verbose)
if __name__ == '__main__':
main() | unknown | codeparrot/codeparrot-clean | ||
"""Unit tests for buildscripts/resmokelib/testing/hooks/fuzz_runtime_parameters.py."""
import random
import sys
import unittest
import mock
from buildscripts.resmokelib.testing.hooks import fuzz_runtime_parameters as _runtime_fuzzer
class TestRuntimeFuzzGeneration(unittest.TestCase):
def assert_parameter_values_ok(self, spec, generated_values):
for name, val in generated_values.items():
options = spec[name]
if "isRandomizedChoice" in options:
lb = options["lower_bound"]
ub = options["upper_bound"]
self.assertTrue(lb <= val <= ub)
elif "choices" in options:
self.assertIn(val, options["choices"])
elif "min" and "max" in options:
self.assertTrue(options["min"] <= val <= options["max"])
else:
self.assertIn("default", options)
self.assertEqual(val, options["default"])
@mock.patch("buildscripts.resmokelib.testing.hooks.fuzz_runtime_parameters.time.time")
def test_frequency_respected(self, mock_time):
start_time = 1625140800
mock_time.return_value = start_time
test_runtime_params = {
"mongod": {
"ShardingTaskExecutorPoolMinSize": {"min": 1, "max": 50, "period": 5},
"ingressAdmissionControllerTicketPoolSize": {
"choices": [500_000, 1_000_000, 2_000_000],
"lower_bound": 1000,
"upper_bound": 5_000_000,
"isRandomizedChoice": True,
"period": 1,
},
"ingressAdmissionControlEnabled": {
"choices": [True, False],
"period": 10,
},
},
"mongos": {
"ShardingTaskExecutorPoolMinSize": {"min": 1, "max": 50, "period": 5},
},
}
mongod_spec = test_runtime_params["mongod"]
runtimeFuzzerParamState = _runtime_fuzzer.RuntimeParametersState(
mongod_spec, random.randrange(sys.maxsize)
)
# No time has passed; we wouldn't want to set any of these yet.
ret = runtimeFuzzerParamState.generate_parameters()
self.assertEqual(ret, {})
mock_time.return_value = start_time + 1
ret = runtimeFuzzerParamState.generate_parameters()
# We should set ingressAdmissionControllerTicketPoolSize now, but not ingressAdmissionControlEnabled or ShardingTaskExecutorPoolMinSize
param_names_to_set = ret.keys()
self.assertIn("ingressAdmissionControllerTicketPoolSize", param_names_to_set)
self.assertNotIn("ingressAdmissionControlEnabled", param_names_to_set)
self.assertNotIn("ShardingTaskExecutorPoolMinSize", param_names_to_set)
self.assert_parameter_values_ok(mongod_spec, ret)
# Don't advance time, and generate the values again. Since no time has passed, nothing should be set.
ret = runtimeFuzzerParamState.generate_parameters()
self.assertEqual(ret, {})
# Now advance the time enough such that ShardingTaskExecutorPoolMinSize should be set also.
mock_time.return_value = start_time + 5
ret = runtimeFuzzerParamState.generate_parameters()
param_names_to_set = ret.keys()
self.assertIn("ingressAdmissionControllerTicketPoolSize", param_names_to_set)
self.assertIn("ShardingTaskExecutorPoolMinSize", param_names_to_set)
self.assertNotIn("ingressAdmissionControlEnabled", param_names_to_set)
self.assert_parameter_values_ok(mongod_spec, ret)
# Don't advance time, and generate the values again. Since no time has passed, nothing should be set.
ret = runtimeFuzzerParamState.generate_parameters()
self.assertEqual(ret, {})
# Now advance the time enough such that all 3 should be set.
mock_time.return_value = start_time + 10
ret = runtimeFuzzerParamState.generate_parameters()
param_names_to_set = ret.keys()
self.assertIn("ingressAdmissionControllerTicketPoolSize", param_names_to_set)
self.assertIn("ShardingTaskExecutorPoolMinSize", param_names_to_set)
self.assertIn("ingressAdmissionControlEnabled", param_names_to_set)
self.assert_parameter_values_ok(mongod_spec, ret)
def test_runtime_param_spec_validation(self):
bad_spec_value_not_dict = {"fakeRuntimeParam": 1}
bad_spec_value_no_period = {"fakeRuntimeParam": {"max": 50, "min": 10}}
good_spec = {"fakeRuntimeParam": {"max": 50, "min": 10, "period": 5}}
with self.assertRaises(ValueError):
_runtime_fuzzer.validate_runtime_parameter_spec(bad_spec_value_not_dict)
with self.assertRaises(ValueError):
_runtime_fuzzer.validate_runtime_parameter_spec(bad_spec_value_no_period)
# No exception for good dict
_runtime_fuzzer.validate_runtime_parameter_spec(good_spec) | python | github | https://github.com/mongodb/mongo | buildscripts/tests/resmokelib/testing/hooks/test_runtime_parameter_fuzzing.py |
"""Test the covtype loader, if the data is available,
or if specifically requested via environment variable
(e.g. for CI jobs)."""
from functools import partial
import pytest
from sklearn.datasets.tests.test_common import check_return_X_y
def test_fetch(fetch_covtype_fxt, global_random_seed):
data1 = fetch_covtype_fxt(shuffle=True, random_state=global_random_seed)
data2 = fetch_covtype_fxt(shuffle=True, random_state=global_random_seed + 1)
X1, X2 = data1["data"], data2["data"]
assert (581012, 54) == X1.shape
assert X1.shape == X2.shape
assert X1.sum() == X2.sum()
y1, y2 = data1["target"], data2["target"]
assert (X1.shape[0],) == y1.shape
assert (X1.shape[0],) == y2.shape
descr_prefix = ".. _covtype_dataset:"
assert data1.DESCR.startswith(descr_prefix)
assert data2.DESCR.startswith(descr_prefix)
# test return_X_y option
fetch_func = partial(fetch_covtype_fxt)
check_return_X_y(data1, fetch_func)
def test_fetch_asframe(fetch_covtype_fxt):
pytest.importorskip("pandas")
bunch = fetch_covtype_fxt(as_frame=True)
assert hasattr(bunch, "frame")
frame = bunch.frame
assert frame.shape == (581012, 55)
assert bunch.data.shape == (581012, 54)
assert bunch.target.shape == (581012,)
column_names = set(frame.columns)
# enumerated names are added correctly
assert set(f"Wilderness_Area_{i}" for i in range(4)) < column_names
assert set(f"Soil_Type_{i}" for i in range(40)) < column_names
def test_pandas_dependency_message(fetch_covtype_fxt, hide_available_pandas):
expected_msg = "fetch_covtype with as_frame=True requires pandas"
with pytest.raises(ImportError, match=expected_msg):
fetch_covtype_fxt(as_frame=True) | python | github | https://github.com/scikit-learn/scikit-learn | sklearn/datasets/tests/test_covtype.py |
/*
* Copyright (C) Igor Sysoev
* Copyright (C) Nginx, Inc.
*/
#include <ngx_config.h>
#include <ngx_core.h>
#include <ngx_event.h>
#include <ngx_mail.h>
static char *ngx_mail_block(ngx_conf_t *cf, ngx_command_t *cmd, void *conf);
static ngx_int_t ngx_mail_add_ports(ngx_conf_t *cf, ngx_array_t *ports,
ngx_mail_listen_t *listen);
static char *ngx_mail_optimize_servers(ngx_conf_t *cf, ngx_array_t *ports);
static ngx_int_t ngx_mail_add_addrs(ngx_conf_t *cf, ngx_mail_port_t *mport,
ngx_mail_conf_addr_t *addr);
#if (NGX_HAVE_INET6)
static ngx_int_t ngx_mail_add_addrs6(ngx_conf_t *cf, ngx_mail_port_t *mport,
ngx_mail_conf_addr_t *addr);
#endif
static ngx_int_t ngx_mail_cmp_conf_addrs(const void *one, const void *two);
ngx_uint_t ngx_mail_max_module;
static ngx_command_t ngx_mail_commands[] = {
{ ngx_string("mail"),
NGX_MAIN_CONF|NGX_CONF_BLOCK|NGX_CONF_NOARGS,
ngx_mail_block,
0,
0,
NULL },
ngx_null_command
};
static ngx_core_module_t ngx_mail_module_ctx = {
ngx_string("mail"),
NULL,
NULL
};
ngx_module_t ngx_mail_module = {
NGX_MODULE_V1,
&ngx_mail_module_ctx, /* module context */
ngx_mail_commands, /* module directives */
NGX_CORE_MODULE, /* module type */
NULL, /* init master */
NULL, /* init module */
NULL, /* init process */
NULL, /* init thread */
NULL, /* exit thread */
NULL, /* exit process */
NULL, /* exit master */
NGX_MODULE_V1_PADDING
};
static char *
ngx_mail_block(ngx_conf_t *cf, ngx_command_t *cmd, void *conf)
{
char *rv;
ngx_uint_t i, m, mi, s;
ngx_conf_t pcf;
ngx_array_t ports;
ngx_mail_listen_t *listen;
ngx_mail_module_t *module;
ngx_mail_conf_ctx_t *ctx;
ngx_mail_core_srv_conf_t **cscfp;
ngx_mail_core_main_conf_t *cmcf;
if (*(ngx_mail_conf_ctx_t **) conf) {
return "is duplicate";
}
/* the main mail context */
ctx = ngx_pcalloc(cf->pool, sizeof(ngx_mail_conf_ctx_t));
if (ctx == NULL) {
return NGX_CONF_ERROR;
}
*(ngx_mail_conf_ctx_t **) conf = ctx;
/* count the number of the mail modules and set up their indices */
ngx_mail_max_module = ngx_count_modules(cf->cycle, NGX_MAIL_MODULE);
/* the mail main_conf context, it is the same in the all mail contexts */
ctx->main_conf = ngx_pcalloc(cf->pool,
sizeof(void *) * ngx_mail_max_module);
if (ctx->main_conf == NULL) {
return NGX_CONF_ERROR;
}
/*
* the mail null srv_conf context, it is used to merge
* the server{}s' srv_conf's
*/
ctx->srv_conf = ngx_pcalloc(cf->pool, sizeof(void *) * ngx_mail_max_module);
if (ctx->srv_conf == NULL) {
return NGX_CONF_ERROR;
}
/*
* create the main_conf's and the null srv_conf's of the all mail modules
*/
for (m = 0; cf->cycle->modules[m]; m++) {
if (cf->cycle->modules[m]->type != NGX_MAIL_MODULE) {
continue;
}
module = cf->cycle->modules[m]->ctx;
mi = cf->cycle->modules[m]->ctx_index;
if (module->create_main_conf) {
ctx->main_conf[mi] = module->create_main_conf(cf);
if (ctx->main_conf[mi] == NULL) {
return NGX_CONF_ERROR;
}
}
if (module->create_srv_conf) {
ctx->srv_conf[mi] = module->create_srv_conf(cf);
if (ctx->srv_conf[mi] == NULL) {
return NGX_CONF_ERROR;
}
}
}
/* parse inside the mail{} block */
pcf = *cf;
cf->ctx = ctx;
cf->module_type = NGX_MAIL_MODULE;
cf->cmd_type = NGX_MAIL_MAIN_CONF;
rv = ngx_conf_parse(cf, NULL);
if (rv != NGX_CONF_OK) {
*cf = pcf;
return rv;
}
/* init mail{} main_conf's, merge the server{}s' srv_conf's */
cmcf = ctx->main_conf[ngx_mail_core_module.ctx_index];
cscfp = cmcf->servers.elts;
for (m = 0; cf->cycle->modules[m]; m++) {
if (cf->cycle->modules[m]->type != NGX_MAIL_MODULE) {
continue;
}
module = cf->cycle->modules[m]->ctx;
mi = cf->cycle->modules[m]->ctx_index;
/* init mail{} main_conf's */
cf->ctx = ctx;
if (module->init_main_conf) {
rv = module->init_main_conf(cf, ctx->main_conf[mi]);
if (rv != NGX_CONF_OK) {
*cf = pcf;
return rv;
}
}
for (s = 0; s < cmcf->servers.nelts; s++) {
/* merge the server{}s' srv_conf's */
cf->ctx = cscfp[s]->ctx;
if (module->merge_srv_conf) {
rv = module->merge_srv_conf(cf,
ctx->srv_conf[mi],
cscfp[s]->ctx->srv_conf[mi]);
if (rv != NGX_CONF_OK) {
*cf = pcf;
return rv;
}
}
}
}
*cf = pcf;
if (ngx_array_init(&ports, cf->temp_pool, 4, sizeof(ngx_mail_conf_port_t))
!= NGX_OK)
{
return NGX_CONF_ERROR;
}
listen = cmcf->listen.elts;
for (i = 0; i < cmcf->listen.nelts; i++) {
if (ngx_mail_add_ports(cf, &ports, &listen[i]) != NGX_OK) {
return NGX_CONF_ERROR;
}
}
return ngx_mail_optimize_servers(cf, &ports);
}
static ngx_int_t
ngx_mail_add_ports(ngx_conf_t *cf, ngx_array_t *ports,
ngx_mail_listen_t *listen)
{
in_port_t p;
ngx_uint_t i;
struct sockaddr *sa;
ngx_mail_conf_port_t *port;
ngx_mail_conf_addr_t *addr;
sa = listen->sockaddr;
p = ngx_inet_get_port(sa);
port = ports->elts;
for (i = 0; i < ports->nelts; i++) {
if (p == port[i].port && sa->sa_family == port[i].family) {
/* a port is already in the port list */
port = &port[i];
goto found;
}
}
/* add a port to the port list */
port = ngx_array_push(ports);
if (port == NULL) {
return NGX_ERROR;
}
port->family = sa->sa_family;
port->port = p;
if (ngx_array_init(&port->addrs, cf->temp_pool, 2,
sizeof(ngx_mail_conf_addr_t))
!= NGX_OK)
{
return NGX_ERROR;
}
found:
addr = ngx_array_push(&port->addrs);
if (addr == NULL) {
return NGX_ERROR;
}
addr->opt = *listen;
return NGX_OK;
}
static char *
ngx_mail_optimize_servers(ngx_conf_t *cf, ngx_array_t *ports)
{
ngx_uint_t i, p, last, bind_wildcard;
ngx_listening_t *ls;
ngx_mail_port_t *mport;
ngx_mail_conf_port_t *port;
ngx_mail_conf_addr_t *addr;
ngx_mail_core_srv_conf_t *cscf;
port = ports->elts;
for (p = 0; p < ports->nelts; p++) {
ngx_sort(port[p].addrs.elts, (size_t) port[p].addrs.nelts,
sizeof(ngx_mail_conf_addr_t), ngx_mail_cmp_conf_addrs);
addr = port[p].addrs.elts;
last = port[p].addrs.nelts;
/*
* if there is the binding to the "*:port" then we need to bind()
* to the "*:port" only and ignore the other bindings
*/
if (addr[last - 1].opt.wildcard) {
addr[last - 1].opt.bind = 1;
bind_wildcard = 1;
} else {
bind_wildcard = 0;
}
i = 0;
while (i < last) {
if (bind_wildcard && !addr[i].opt.bind) {
i++;
continue;
}
ls = ngx_create_listening(cf, addr[i].opt.sockaddr,
addr[i].opt.socklen);
if (ls == NULL) {
return NGX_CONF_ERROR;
}
ls->addr_ntop = 1;
ls->handler = ngx_mail_init_connection;
ls->pool_size = 256;
cscf = addr->opt.ctx->srv_conf[ngx_mail_core_module.ctx_index];
ls->logp = cscf->error_log;
ls->log.data = &ls->addr_text;
ls->log.handler = ngx_accept_log_error;
ls->backlog = addr[i].opt.backlog;
ls->rcvbuf = addr[i].opt.rcvbuf;
ls->sndbuf = addr[i].opt.sndbuf;
ls->keepalive = addr[i].opt.so_keepalive;
#if (NGX_HAVE_KEEPALIVE_TUNABLE)
ls->keepidle = addr[i].opt.tcp_keepidle;
ls->keepintvl = addr[i].opt.tcp_keepintvl;
ls->keepcnt = addr[i].opt.tcp_keepcnt;
#endif
#if (NGX_HAVE_INET6)
ls->ipv6only = addr[i].opt.ipv6only;
#endif
mport = ngx_palloc(cf->pool, sizeof(ngx_mail_port_t));
if (mport == NULL) {
return NGX_CONF_ERROR;
}
ls->servers = mport;
mport->naddrs = i + 1;
switch (ls->sockaddr->sa_family) {
#if (NGX_HAVE_INET6)
case AF_INET6:
if (ngx_mail_add_addrs6(cf, mport, addr) != NGX_OK) {
return NGX_CONF_ERROR;
}
break;
#endif
default: /* AF_INET */
if (ngx_mail_add_addrs(cf, mport, addr) != NGX_OK) {
return NGX_CONF_ERROR;
}
break;
}
addr++;
last--;
}
}
return NGX_CONF_OK;
}
static ngx_int_t
ngx_mail_add_addrs(ngx_conf_t *cf, ngx_mail_port_t *mport,
ngx_mail_conf_addr_t *addr)
{
ngx_uint_t i;
ngx_mail_in_addr_t *addrs;
struct sockaddr_in *sin;
mport->addrs = ngx_pcalloc(cf->pool,
mport->naddrs * sizeof(ngx_mail_in_addr_t));
if (mport->addrs == NULL) {
return NGX_ERROR;
}
addrs = mport->addrs;
for (i = 0; i < mport->naddrs; i++) {
sin = (struct sockaddr_in *) addr[i].opt.sockaddr;
addrs[i].addr = sin->sin_addr.s_addr;
addrs[i].conf.ctx = addr[i].opt.ctx;
#if (NGX_MAIL_SSL)
addrs[i].conf.ssl = addr[i].opt.ssl;
#endif
addrs[i].conf.proxy_protocol = addr[i].opt.proxy_protocol;
addrs[i].conf.addr_text = addr[i].opt.addr_text;
}
return NGX_OK;
}
#if (NGX_HAVE_INET6)
static ngx_int_t
ngx_mail_add_addrs6(ngx_conf_t *cf, ngx_mail_port_t *mport,
ngx_mail_conf_addr_t *addr)
{
ngx_uint_t i;
ngx_mail_in6_addr_t *addrs6;
struct sockaddr_in6 *sin6;
mport->addrs = ngx_pcalloc(cf->pool,
mport->naddrs * sizeof(ngx_mail_in6_addr_t));
if (mport->addrs == NULL) {
return NGX_ERROR;
}
addrs6 = mport->addrs;
for (i = 0; i < mport->naddrs; i++) {
sin6 = (struct sockaddr_in6 *) addr[i].opt.sockaddr;
addrs6[i].addr6 = sin6->sin6_addr;
addrs6[i].conf.ctx = addr[i].opt.ctx;
#if (NGX_MAIL_SSL)
addrs6[i].conf.ssl = addr[i].opt.ssl;
#endif
addrs6[i].conf.proxy_protocol = addr[i].opt.proxy_protocol;
addrs6[i].conf.addr_text = addr[i].opt.addr_text;
}
return NGX_OK;
}
#endif
static ngx_int_t
ngx_mail_cmp_conf_addrs(const void *one, const void *two)
{
ngx_mail_conf_addr_t *first, *second;
first = (ngx_mail_conf_addr_t *) one;
second = (ngx_mail_conf_addr_t *) two;
if (first->opt.wildcard) {
/* a wildcard must be the last resort, shift it to the end */
return 1;
}
if (second->opt.wildcard) {
/* a wildcard must be the last resort, shift it to the end */
return -1;
}
if (first->opt.bind && !second->opt.bind) {
/* shift explicit bind()ed addresses to the start */
return -1;
}
if (!first->opt.bind && second->opt.bind) {
/* shift explicit bind()ed addresses to the start */
return 1;
}
/* do not sort by default */
return 0;
} | c | github | https://github.com/nginx/nginx | src/mail/ngx_mail.c |
#!/usr/bin/python
#
# Copyright (C) 2006 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Contains extensions to ElementWrapper objects used with Google Calendar."""
__author__ = 'api.vli (Vivian Li), api.rboyd (Ryan Boyd)'
try:
from xml.etree import cElementTree as ElementTree
except ImportError:
try:
import cElementTree as ElementTree
except ImportError:
try:
from xml.etree import ElementTree
except ImportError:
from elementtree import ElementTree
import atom
import gdata
# XML namespaces which are often used in Google Calendar entities.
GCAL_NAMESPACE = 'http://schemas.google.com/gCal/2005'
GCAL_TEMPLATE = '{http://schemas.google.com/gCal/2005}%s'
WEB_CONTENT_LINK_REL = '%s/%s' % (GCAL_NAMESPACE, 'webContent')
GACL_NAMESPACE = gdata.GACL_NAMESPACE
GACL_TEMPLATE = gdata.GACL_TEMPLATE
class ValueAttributeContainer(atom.AtomBase):
"""A parent class for all Calendar classes which have a value attribute.
Children include Color, AccessLevel, Hidden
"""
_children = atom.AtomBase._children.copy()
_attributes = atom.AtomBase._attributes.copy()
_attributes['value'] = 'value'
def __init__(self, value=None, extension_elements=None,
extension_attributes=None, text=None):
self.value = value
self.text = text
self.extension_elements = extension_elements or []
self.extension_attributes = extension_attributes or {}
class Color(ValueAttributeContainer):
"""The Google Calendar color element"""
_tag = 'color'
_namespace = GCAL_NAMESPACE
_children = ValueAttributeContainer._children.copy()
_attributes = ValueAttributeContainer._attributes.copy()
class AccessLevel(ValueAttributeContainer):
"""The Google Calendar accesslevel element"""
_tag = 'accesslevel'
_namespace = GCAL_NAMESPACE
_children = ValueAttributeContainer._children.copy()
_attributes = ValueAttributeContainer._attributes.copy()
class Hidden(ValueAttributeContainer):
"""The Google Calendar hidden element"""
_tag = 'hidden'
_namespace = GCAL_NAMESPACE
_children = ValueAttributeContainer._children.copy()
_attributes = ValueAttributeContainer._attributes.copy()
class Selected(ValueAttributeContainer):
"""The Google Calendar selected element"""
_tag = 'selected'
_namespace = GCAL_NAMESPACE
_children = ValueAttributeContainer._children.copy()
_attributes = ValueAttributeContainer._attributes.copy()
class Timezone(ValueAttributeContainer):
"""The Google Calendar timezone element"""
_tag = 'timezone'
_namespace = GCAL_NAMESPACE
_children = ValueAttributeContainer._children.copy()
_attributes = ValueAttributeContainer._attributes.copy()
class Where(atom.AtomBase):
"""The Google Calendar Where element"""
_tag = 'where'
_namespace = gdata.GDATA_NAMESPACE
_children = atom.AtomBase._children.copy()
_attributes = atom.AtomBase._attributes.copy()
_attributes['valueString'] = 'value_string'
def __init__(self, value_string=None, extension_elements=None,
extension_attributes=None, text=None):
self.value_string = value_string
self.text = text
self.extension_elements = extension_elements or []
self.extension_attributes = extension_attributes or {}
class CalendarListEntry(gdata.GDataEntry, gdata.LinkFinder):
"""A Google Calendar meta Entry flavor of an Atom Entry """
_tag = gdata.GDataEntry._tag
_namespace = gdata.GDataEntry._namespace
_children = gdata.GDataEntry._children.copy()
_attributes = gdata.GDataEntry._attributes.copy()
_children['{%s}color' % GCAL_NAMESPACE] = ('color', Color)
_children['{%s}accesslevel' % GCAL_NAMESPACE] = ('access_level',
AccessLevel)
_children['{%s}hidden' % GCAL_NAMESPACE] = ('hidden', Hidden)
_children['{%s}selected' % GCAL_NAMESPACE] = ('selected', Selected)
_children['{%s}timezone' % GCAL_NAMESPACE] = ('timezone', Timezone)
_children['{%s}where' % gdata.GDATA_NAMESPACE] = ('where', Where)
def __init__(self, author=None, category=None, content=None,
atom_id=None, link=None, published=None,
title=None, updated=None,
color=None, access_level=None, hidden=None, timezone=None,
selected=None,
where=None,
extension_elements=None, extension_attributes=None, text=None):
gdata.GDataEntry.__init__(self, author=author, category=category,
content=content, atom_id=atom_id, link=link,
published=published, title=title,
updated=updated, text=None)
self.color = color
self.access_level = access_level
self.hidden = hidden
self.selected = selected
self.timezone = timezone
self.where = where
class CalendarListFeed(gdata.GDataFeed, gdata.LinkFinder):
"""A Google Calendar meta feed flavor of an Atom Feed"""
_tag = gdata.GDataFeed._tag
_namespace = gdata.GDataFeed._namespace
_children = gdata.GDataFeed._children.copy()
_attributes = gdata.GDataFeed._attributes.copy()
_children['{%s}entry' % atom.ATOM_NAMESPACE] = ('entry', [CalendarListEntry])
class Scope(atom.AtomBase):
"""The Google ACL scope element"""
_tag = 'scope'
_namespace = GACL_NAMESPACE
_children = atom.AtomBase._children.copy()
_attributes = atom.AtomBase._attributes.copy()
_attributes['value'] = 'value'
_attributes['type'] = 'type'
def __init__(self, extension_elements=None, value=None, scope_type=None,
extension_attributes=None, text=None):
self.value = value
self.type = scope_type
self.text = text
self.extension_elements = extension_elements or []
self.extension_attributes = extension_attributes or {}
class Role(ValueAttributeContainer):
"""The Google Calendar timezone element"""
_tag = 'role'
_namespace = GACL_NAMESPACE
_children = ValueAttributeContainer._children.copy()
_attributes = ValueAttributeContainer._attributes.copy()
class CalendarAclEntry(gdata.GDataEntry, gdata.LinkFinder):
"""A Google Calendar ACL Entry flavor of an Atom Entry """
_tag = gdata.GDataEntry._tag
_namespace = gdata.GDataEntry._namespace
_children = gdata.GDataEntry._children.copy()
_attributes = gdata.GDataEntry._attributes.copy()
_children['{%s}scope' % GACL_NAMESPACE] = ('scope', Scope)
_children['{%s}role' % GACL_NAMESPACE] = ('role', Role)
def __init__(self, author=None, category=None, content=None,
atom_id=None, link=None, published=None,
title=None, updated=None,
scope=None, role=None,
extension_elements=None, extension_attributes=None, text=None):
gdata.GDataEntry.__init__(self, author=author, category=category,
content=content, atom_id=atom_id, link=link,
published=published, title=title,
updated=updated, text=None)
self.scope = scope
self.role = role
class CalendarAclFeed(gdata.GDataFeed, gdata.LinkFinder):
"""A Google Calendar ACL feed flavor of an Atom Feed"""
_tag = gdata.GDataFeed._tag
_namespace = gdata.GDataFeed._namespace
_children = gdata.GDataFeed._children.copy()
_attributes = gdata.GDataFeed._attributes.copy()
_children['{%s}entry' % atom.ATOM_NAMESPACE] = ('entry', [CalendarAclEntry])
class CalendarEventCommentEntry(gdata.GDataEntry, gdata.LinkFinder):
"""A Google Calendar event comments entry flavor of an Atom Entry"""
_tag = gdata.GDataEntry._tag
_namespace = gdata.GDataEntry._namespace
_children = gdata.GDataEntry._children.copy()
_attributes = gdata.GDataEntry._attributes.copy()
class CalendarEventCommentFeed(gdata.GDataFeed, gdata.LinkFinder):
"""A Google Calendar event comments feed flavor of an Atom Feed"""
_tag = gdata.GDataFeed._tag
_namespace = gdata.GDataFeed._namespace
_children = gdata.GDataFeed._children.copy()
_attributes = gdata.GDataFeed._attributes.copy()
_children['{%s}entry' % atom.ATOM_NAMESPACE] = ('entry',
[CalendarEventCommentEntry])
class ExtendedProperty(gdata.ExtendedProperty):
"""A transparent subclass of gdata.ExtendedProperty added to this module
for backwards compatibility."""
class Reminder(atom.AtomBase):
"""The Google Calendar reminder element"""
_tag = 'reminder'
_namespace = gdata.GDATA_NAMESPACE
_children = atom.AtomBase._children.copy()
_attributes = atom.AtomBase._attributes.copy()
_attributes['absoluteTime'] = 'absolute_time'
_attributes['days'] = 'days'
_attributes['hours'] = 'hours'
_attributes['minutes'] = 'minutes'
_attributes['method'] = 'method'
def __init__(self, absolute_time=None,
days=None, hours=None, minutes=None, method=None,
extension_elements=None,
extension_attributes=None, text=None):
self.absolute_time = absolute_time
if days is not None:
self.days = str(days)
else:
self.days = None
if hours is not None:
self.hours = str(hours)
else:
self.hours = None
if minutes is not None:
self.minutes = str(minutes)
else:
self.minutes = None
self.method = method
self.text = text
self.extension_elements = extension_elements or []
self.extension_attributes = extension_attributes or {}
class When(atom.AtomBase):
"""The Google Calendar When element"""
_tag = 'when'
_namespace = gdata.GDATA_NAMESPACE
_children = atom.AtomBase._children.copy()
_attributes = atom.AtomBase._attributes.copy()
_children['{%s}reminder' % gdata.GDATA_NAMESPACE] = ('reminder', [Reminder])
_attributes['startTime'] = 'start_time'
_attributes['endTime'] = 'end_time'
def __init__(self, start_time=None, end_time=None, reminder=None,
extension_elements=None, extension_attributes=None, text=None):
self.start_time = start_time
self.end_time = end_time
self.reminder = reminder or []
self.text = text
self.extension_elements = extension_elements or []
self.extension_attributes = extension_attributes or {}
class Recurrence(atom.AtomBase):
"""The Google Calendar Recurrence element"""
_tag = 'recurrence'
_namespace = gdata.GDATA_NAMESPACE
_children = atom.AtomBase._children.copy()
_attributes = atom.AtomBase._attributes.copy()
class UriEnumElement(atom.AtomBase):
_children = atom.AtomBase._children.copy()
_attributes = atom.AtomBase._attributes.copy()
def __init__(self, tag, enum_map, attrib_name='value',
extension_elements=None, extension_attributes=None, text=None):
self.tag=tag
self.enum_map=enum_map
self.attrib_name=attrib_name
self.value=None
self.text=text
self.extension_elements = extension_elements or []
self.extension_attributes = extension_attributes or {}
def findKey(self, value):
res=[item[0] for item in self.enum_map.items() if item[1] == value]
if res is None or len(res) == 0:
return None
return res[0]
def _ConvertElementAttributeToMember(self, attribute, value):
# Special logic to use the enum_map to set the value of the object's value member.
if attribute == self.attrib_name and value != '':
self.value = self.enum_map[value]
return
# Find the attribute in this class's list of attributes.
if self.__class__._attributes.has_key(attribute):
# Find the member of this class which corresponds to the XML attribute
# (lookup in current_class._attributes) and set this member to the
# desired value (using self.__dict__).
setattr(self, self.__class__._attributes[attribute], value)
else:
# The current class doesn't map this attribute, so try to parent class.
atom.ExtensionContainer._ConvertElementAttributeToMember(self,
attribute,
value)
def _AddMembersToElementTree(self, tree):
# Convert the members of this class which are XML child nodes.
# This uses the class's _children dictionary to find the members which
# should become XML child nodes.
member_node_names = [values[0] for tag, values in
self.__class__._children.iteritems()]
for member_name in member_node_names:
member = getattr(self, member_name)
if member is None:
pass
elif isinstance(member, list):
for instance in member:
instance._BecomeChildElement(tree)
else:
member._BecomeChildElement(tree)
# Special logic to set the desired XML attribute.
key = self.findKey(self.value)
if key is not None:
tree.attrib[self.attrib_name]=key
# Convert the members of this class which are XML attributes.
for xml_attribute, member_name in self.__class__._attributes.iteritems():
member = getattr(self, member_name)
if member is not None:
tree.attrib[xml_attribute] = member
# Lastly, call the parent's _AddMembersToElementTree to get any
# extension elements.
atom.ExtensionContainer._AddMembersToElementTree(self, tree)
class AttendeeStatus(UriEnumElement):
"""The Google Calendar attendeeStatus element"""
_tag = 'attendeeStatus'
_namespace = gdata.GDATA_NAMESPACE
_children = UriEnumElement._children.copy()
_attributes = UriEnumElement._attributes.copy()
attendee_enum = {
'http://schemas.google.com/g/2005#event.accepted' : 'ACCEPTED',
'http://schemas.google.com/g/2005#event.declined' : 'DECLINED',
'http://schemas.google.com/g/2005#event.invited' : 'INVITED',
'http://schemas.google.com/g/2005#event.tentative' : 'TENTATIVE'}
def __init__(self, extension_elements=None,
extension_attributes=None, text=None):
UriEnumElement.__init__(self, 'attendeeStatus', AttendeeStatus.attendee_enum,
extension_elements=extension_elements,
extension_attributes=extension_attributes,
text=text)
class AttendeeType(UriEnumElement):
"""The Google Calendar attendeeType element"""
_tag = 'attendeeType'
_namespace = gdata.GDATA_NAMESPACE
_children = UriEnumElement._children.copy()
_attributes = UriEnumElement._attributes.copy()
attendee_type_enum = {
'http://schemas.google.com/g/2005#event.optional' : 'OPTIONAL',
'http://schemas.google.com/g/2005#event.required' : 'REQUIRED' }
def __init__(self, extension_elements=None,
extension_attributes=None, text=None):
UriEnumElement.__init__(self, 'attendeeType',
AttendeeType.attendee_type_enum,
extension_elements=extension_elements,
extension_attributes=extension_attributes,text=text)
class Visibility(UriEnumElement):
"""The Google Calendar Visibility element"""
_tag = 'visibility'
_namespace = gdata.GDATA_NAMESPACE
_children = UriEnumElement._children.copy()
_attributes = UriEnumElement._attributes.copy()
visibility_enum = {
'http://schemas.google.com/g/2005#event.confidential' : 'CONFIDENTIAL',
'http://schemas.google.com/g/2005#event.default' : 'DEFAULT',
'http://schemas.google.com/g/2005#event.private' : 'PRIVATE',
'http://schemas.google.com/g/2005#event.public' : 'PUBLIC' }
def __init__(self, extension_elements=None,
extension_attributes=None, text=None):
UriEnumElement.__init__(self, 'visibility', Visibility.visibility_enum,
extension_elements=extension_elements,
extension_attributes=extension_attributes,
text=text)
class Transparency(UriEnumElement):
"""The Google Calendar Transparency element"""
_tag = 'transparency'
_namespace = gdata.GDATA_NAMESPACE
_children = UriEnumElement._children.copy()
_attributes = UriEnumElement._attributes.copy()
transparency_enum = {
'http://schemas.google.com/g/2005#event.opaque' : 'OPAQUE',
'http://schemas.google.com/g/2005#event.transparent' : 'TRANSPARENT' }
def __init__(self, extension_elements=None,
extension_attributes=None, text=None):
UriEnumElement.__init__(self, tag='transparency',
enum_map=Transparency.transparency_enum,
extension_elements=extension_elements,
extension_attributes=extension_attributes,
text=text)
class Comments(atom.AtomBase):
"""The Google Calendar comments element"""
_tag = 'comments'
_namespace = gdata.GDATA_NAMESPACE
_children = atom.AtomBase._children.copy()
_attributes = atom.AtomBase._attributes.copy()
_children['{%s}feedLink' % gdata.GDATA_NAMESPACE] = ('feed_link',
gdata.FeedLink)
_attributes['rel'] = 'rel'
def __init__(self, rel=None, feed_link=None, extension_elements=None,
extension_attributes=None, text=None):
self.rel = rel
self.feed_link = feed_link
self.text = text
self.extension_elements = extension_elements or []
self.extension_attributes = extension_attributes or {}
class EventStatus(UriEnumElement):
"""The Google Calendar eventStatus element"""
_tag = 'eventStatus'
_namespace = gdata.GDATA_NAMESPACE
_children = UriEnumElement._children.copy()
_attributes = UriEnumElement._attributes.copy()
status_enum = { 'http://schemas.google.com/g/2005#event.canceled' : 'CANCELED',
'http://schemas.google.com/g/2005#event.confirmed' : 'CONFIRMED',
'http://schemas.google.com/g/2005#event.tentative' : 'TENTATIVE'}
def __init__(self, extension_elements=None,
extension_attributes=None, text=None):
UriEnumElement.__init__(self, tag='eventStatus',
enum_map=EventStatus.status_enum,
extension_elements=extension_elements,
extension_attributes=extension_attributes,
text=text)
class Who(UriEnumElement):
"""The Google Calendar Who element"""
_tag = 'who'
_namespace = gdata.GDATA_NAMESPACE
_children = UriEnumElement._children.copy()
_attributes = UriEnumElement._attributes.copy()
_children['{%s}attendeeStatus' % gdata.GDATA_NAMESPACE] = (
'attendee_status', AttendeeStatus)
_children['{%s}attendeeType' % gdata.GDATA_NAMESPACE] = ('attendee_type',
AttendeeType)
_attributes['valueString'] = 'name'
_attributes['email'] = 'email'
relEnum = { 'http://schemas.google.com/g/2005#event.attendee' : 'ATTENDEE',
'http://schemas.google.com/g/2005#event.organizer' : 'ORGANIZER',
'http://schemas.google.com/g/2005#event.performer' : 'PERFORMER',
'http://schemas.google.com/g/2005#event.speaker' : 'SPEAKER',
'http://schemas.google.com/g/2005#message.bcc' : 'BCC',
'http://schemas.google.com/g/2005#message.cc' : 'CC',
'http://schemas.google.com/g/2005#message.from' : 'FROM',
'http://schemas.google.com/g/2005#message.reply-to' : 'REPLY_TO',
'http://schemas.google.com/g/2005#message.to' : 'TO' }
def __init__(self, name=None, email=None, attendee_status=None,
attendee_type=None, rel=None, extension_elements=None,
extension_attributes=None, text=None):
UriEnumElement.__init__(self, 'who', Who.relEnum, attrib_name='rel',
extension_elements=extension_elements,
extension_attributes=extension_attributes,
text=text)
self.name = name
self.email = email
self.attendee_status = attendee_status
self.attendee_type = attendee_type
self.rel = rel
class OriginalEvent(atom.AtomBase):
"""The Google Calendar OriginalEvent element"""
_tag = 'originalEvent'
_namespace = gdata.GDATA_NAMESPACE
_children = atom.AtomBase._children.copy()
_attributes = atom.AtomBase._attributes.copy()
# TODO: The when tag used to map to a EntryLink, make sure it should really be a When.
_children['{%s}when' % gdata.GDATA_NAMESPACE] = ('when', When)
_attributes['id'] = 'id'
_attributes['href'] = 'href'
def __init__(self, id=None, href=None, when=None,
extension_elements=None, extension_attributes=None, text=None):
self.id = id
self.href = href
self.when = when
self.text = text
self.extension_elements = extension_elements or []
self.extension_attributes = extension_attributes or {}
def GetCalendarEventEntryClass():
return CalendarEventEntry
# This class is not completely defined here, because of a circular reference
# in which CalendarEventEntryLink and CalendarEventEntry refer to one another.
class CalendarEventEntryLink(gdata.EntryLink):
"""An entryLink which contains a calendar event entry
Within an event's recurranceExceptions, an entry link
points to a calendar event entry. This class exists
to capture the calendar specific extensions in the entry.
"""
_tag = 'entryLink'
_namespace = gdata.GDATA_NAMESPACE
_children = gdata.EntryLink._children.copy()
_attributes = gdata.EntryLink._attributes.copy()
# The CalendarEventEntryLink should like CalendarEventEntry as a child but
# that class hasn't been defined yet, so we will wait until after defining
# CalendarEventEntry to list it in _children.
class RecurrenceException(atom.AtomBase):
"""The Google Calendar RecurrenceException element"""
_tag = 'recurrenceException'
_namespace = gdata.GDATA_NAMESPACE
_children = atom.AtomBase._children.copy()
_attributes = atom.AtomBase._attributes.copy()
_children['{%s}entryLink' % gdata.GDATA_NAMESPACE] = ('entry_link',
CalendarEventEntryLink)
_children['{%s}originalEvent' % gdata.GDATA_NAMESPACE] = ('original_event',
OriginalEvent)
_attributes['specialized'] = 'specialized'
def __init__(self, specialized=None, entry_link=None,
original_event=None, extension_elements=None,
extension_attributes=None, text=None):
self.specialized = specialized
self.entry_link = entry_link
self.original_event = original_event
self.text = text
self.extension_elements = extension_elements or []
self.extension_attributes = extension_attributes or {}
class SendEventNotifications(atom.AtomBase):
"""The Google Calendar sendEventNotifications element"""
_tag = 'sendEventNotifications'
_namespace = GCAL_NAMESPACE
_children = atom.AtomBase._children.copy()
_attributes = atom.AtomBase._attributes.copy()
_attributes['value'] = 'value'
def __init__(self, extension_elements=None,
value=None, extension_attributes=None, text=None):
self.value = value
self.text = text
self.extension_elements = extension_elements or []
self.extension_attributes = extension_attributes or {}
class QuickAdd(atom.AtomBase):
"""The Google Calendar quickadd element"""
_tag = 'quickadd'
_namespace = GCAL_NAMESPACE
_children = atom.AtomBase._children.copy()
_attributes = atom.AtomBase._attributes.copy()
_attributes['value'] = 'value'
def __init__(self, extension_elements=None,
value=None, extension_attributes=None, text=None):
self.value = value
self.text = text
self.extension_elements = extension_elements or []
self.extension_attributes = extension_attributes or {}
def _TransferToElementTree(self, element_tree):
if self.value:
element_tree.attrib['value'] = self.value
element_tree.tag = GCAL_TEMPLATE % 'quickadd'
atom.AtomBase._TransferToElementTree(self, element_tree)
return element_tree
def _TakeAttributeFromElementTree(self, attribute, element_tree):
if attribute == 'value':
self.value = element_tree.attrib[attribute]
del element_tree.attrib[attribute]
else:
atom.AtomBase._TakeAttributeFromElementTree(self, attribute,
element_tree)
class Sequence(atom.AtomBase):
_tag = 'sequence'
_namespace = GCAL_NAMESPACE
_children = atom.AtomBase._children.copy()
_attributes = atom.AtomBase._attributes.copy()
_attributes['value'] = 'value'
def __init__(self, value=None, extension_elements=None,
extension_attributes=None, text=None):
self.value = value
self.text = text
self.extension_elements = extension_elements or []
self.extension_attributes = extension_attributes or {}
class WebContentGadgetPref(atom.AtomBase):
_tag = 'webContentGadgetPref'
_namespace = GCAL_NAMESPACE
_children = atom.AtomBase._children.copy()
_attributes = atom.AtomBase._attributes.copy()
_attributes['name'] = 'name'
_attributes['value'] = 'value'
"""The Google Calendar Web Content Gadget Preferences element"""
def __init__(self, name=None, value=None, extension_elements=None,
extension_attributes=None, text=None):
self.name = name
self.value = value
self.text = text
self.extension_elements = extension_elements or []
self.extension_attributes = extension_attributes or {}
class WebContent(atom.AtomBase):
_tag = 'webContent'
_namespace = GCAL_NAMESPACE
_children = atom.AtomBase._children.copy()
_attributes = atom.AtomBase._attributes.copy()
_children['{%s}webContentGadgetPref' % GCAL_NAMESPACE] = ('gadget_pref',
[WebContentGadgetPref])
_attributes['url'] = 'url'
_attributes['width'] = 'width'
_attributes['height'] = 'height'
def __init__(self, url=None, width=None, height=None, text=None,
gadget_pref=None, extension_elements=None, extension_attributes=None):
self.url = url
self.width = width
self.height = height
self.text = text
self.gadget_pref = gadget_pref or []
self.extension_elements = extension_elements or []
self.extension_attributes = extension_attributes or {}
class WebContentLink(atom.Link):
_tag = 'link'
_namespace = atom.ATOM_NAMESPACE
_children = atom.Link._children.copy()
_attributes = atom.Link._attributes.copy()
_children['{%s}webContent' % GCAL_NAMESPACE] = ('web_content', WebContent)
def __init__(self, title=None, href=None, link_type=None,
web_content=None):
atom.Link.__init__(self, rel=WEB_CONTENT_LINK_REL, title=title, href=href,
link_type=link_type)
self.web_content = web_content
class CalendarEventEntry(gdata.BatchEntry):
"""A Google Calendar flavor of an Atom Entry """
_tag = gdata.BatchEntry._tag
_namespace = gdata.BatchEntry._namespace
_children = gdata.BatchEntry._children.copy()
_attributes = gdata.BatchEntry._attributes.copy()
# This class also contains WebContentLinks but converting those members
# is handled in a special version of _ConvertElementTreeToMember.
_children['{%s}where' % gdata.GDATA_NAMESPACE] = ('where', [Where])
_children['{%s}when' % gdata.GDATA_NAMESPACE] = ('when', [When])
_children['{%s}who' % gdata.GDATA_NAMESPACE] = ('who', [Who])
_children['{%s}extendedProperty' % gdata.GDATA_NAMESPACE] = (
'extended_property', [ExtendedProperty])
_children['{%s}visibility' % gdata.GDATA_NAMESPACE] = ('visibility',
Visibility)
_children['{%s}transparency' % gdata.GDATA_NAMESPACE] = ('transparency',
Transparency)
_children['{%s}eventStatus' % gdata.GDATA_NAMESPACE] = ('event_status',
EventStatus)
_children['{%s}recurrence' % gdata.GDATA_NAMESPACE] = ('recurrence',
Recurrence)
_children['{%s}recurrenceException' % gdata.GDATA_NAMESPACE] = (
'recurrence_exception', [RecurrenceException])
_children['{%s}sendEventNotifications' % GCAL_NAMESPACE] = (
'send_event_notifications', SendEventNotifications)
_children['{%s}quickadd' % GCAL_NAMESPACE] = ('quick_add', QuickAdd)
_children['{%s}comments' % gdata.GDATA_NAMESPACE] = ('comments', Comments)
_children['{%s}originalEvent' % gdata.GDATA_NAMESPACE] = ('original_event',
OriginalEvent)
_children['{%s}sequence' % GCAL_NAMESPACE] = ('sequence', Sequence)
_children['{%s}reminder' % gdata.GDATA_NAMESPACE] = ('reminder', [Reminder])
def __init__(self, author=None, category=None, content=None,
atom_id=None, link=None, published=None,
title=None, updated=None,
transparency=None, comments=None, event_status=None,
send_event_notifications=None, visibility=None,
recurrence=None, recurrence_exception=None,
where=None, when=None, who=None, quick_add=None,
extended_property=None, original_event=None,
batch_operation=None, batch_id=None, batch_status=None,
sequence=None, reminder=None,
extension_elements=None, extension_attributes=None, text=None):
gdata.BatchEntry.__init__(self, author=author, category=category,
content=content,
atom_id=atom_id, link=link, published=published,
batch_operation=batch_operation, batch_id=batch_id,
batch_status=batch_status,
title=title, updated=updated)
self.transparency = transparency
self.comments = comments
self.event_status = event_status
self.send_event_notifications = send_event_notifications
self.visibility = visibility
self.recurrence = recurrence
self.recurrence_exception = recurrence_exception or []
self.where = where or []
self.when = when or []
self.who = who or []
self.quick_add = quick_add
self.extended_property = extended_property or []
self.original_event = original_event
self.sequence = sequence
self.reminder = reminder or []
self.text = text
self.extension_elements = extension_elements or []
self.extension_attributes = extension_attributes or {}
# We needed to add special logic to _ConvertElementTreeToMember because we
# want to make links with a rel of WEB_CONTENT_LINK_REL into a
# WebContentLink
def _ConvertElementTreeToMember(self, child_tree):
# Special logic to handle Web Content links
if (child_tree.tag == '{%s}link' % atom.ATOM_NAMESPACE and
child_tree.attrib['rel'] == WEB_CONTENT_LINK_REL):
if self.link is None:
self.link = []
self.link.append(atom._CreateClassFromElementTree(WebContentLink,
child_tree))
return
# Find the element's tag in this class's list of child members
if self.__class__._children.has_key(child_tree.tag):
member_name = self.__class__._children[child_tree.tag][0]
member_class = self.__class__._children[child_tree.tag][1]
# If the class member is supposed to contain a list, make sure the
# matching member is set to a list, then append the new member
# instance to the list.
if isinstance(member_class, list):
if getattr(self, member_name) is None:
setattr(self, member_name, [])
getattr(self, member_name).append(atom._CreateClassFromElementTree(
member_class[0], child_tree))
else:
setattr(self, member_name,
atom._CreateClassFromElementTree(member_class, child_tree))
else:
atom.ExtensionContainer._ConvertElementTreeToMember(self, child_tree)
def GetWebContentLink(self):
"""Finds the first link with rel set to WEB_CONTENT_REL
Returns:
A gdata.calendar.WebContentLink or none if none of the links had rel
equal to WEB_CONTENT_REL
"""
for a_link in self.link:
if a_link.rel == WEB_CONTENT_LINK_REL:
return a_link
return None
def CalendarEventEntryFromString(xml_string):
return atom.CreateClassFromXMLString(CalendarEventEntry, xml_string)
def CalendarEventCommentEntryFromString(xml_string):
return atom.CreateClassFromXMLString(CalendarEventCommentEntry, xml_string)
CalendarEventEntryLink._children = {'{%s}entry' % atom.ATOM_NAMESPACE:
('entry', CalendarEventEntry)}
def CalendarEventEntryLinkFromString(xml_string):
return atom.CreateClassFromXMLString(CalendarEventEntryLink, xml_string)
class CalendarEventFeed(gdata.BatchFeed, gdata.LinkFinder):
"""A Google Calendar event feed flavor of an Atom Feed"""
_tag = gdata.BatchFeed._tag
_namespace = gdata.BatchFeed._namespace
_children = gdata.BatchFeed._children.copy()
_attributes = gdata.BatchFeed._attributes.copy()
_children['{%s}entry' % atom.ATOM_NAMESPACE] = ('entry',
[CalendarEventEntry])
_children['{%s}timezone' % GCAL_NAMESPACE] = ('timezone', Timezone)
def __init__(self, author=None, category=None, contributor=None,
generator=None, icon=None, atom_id=None, link=None, logo=None,
rights=None, subtitle=None, title=None, updated=None, entry=None,
total_results=None, start_index=None, items_per_page=None,
interrupted=None, timezone=None,
extension_elements=None, extension_attributes=None, text=None):
gdata.BatchFeed.__init__(self, author=author, category=category,
contributor=contributor, generator=generator,
icon=icon, atom_id=atom_id, link=link,
logo=logo, rights=rights, subtitle=subtitle,
title=title, updated=updated, entry=entry,
total_results=total_results,
start_index=start_index,
items_per_page=items_per_page,
interrupted=interrupted,
extension_elements=extension_elements,
extension_attributes=extension_attributes,
text=text)
self.timezone = timezone
def CalendarListEntryFromString(xml_string):
return atom.CreateClassFromXMLString(CalendarListEntry, xml_string)
def CalendarAclEntryFromString(xml_string):
return atom.CreateClassFromXMLString(CalendarAclEntry, xml_string)
def CalendarListFeedFromString(xml_string):
return atom.CreateClassFromXMLString(CalendarListFeed, xml_string)
def CalendarAclFeedFromString(xml_string):
return atom.CreateClassFromXMLString(CalendarAclFeed, xml_string)
def CalendarEventFeedFromString(xml_string):
return atom.CreateClassFromXMLString(CalendarEventFeed, xml_string)
def CalendarEventCommentFeedFromString(xml_string):
return atom.CreateClassFromXMLString(CalendarEventCommentFeed, xml_string) | unknown | codeparrot/codeparrot-clean | ||
# -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Famille.description'
db.add_column(u'famille_famille', 'description',
self.gf('django.db.models.fields.CharField')(max_length=400, null=True, blank=True),
keep_default=False)
# Adding field 'Famille.type_garde'
db.add_column(u'famille_famille', 'type_garde',
self.gf('django.db.models.fields.CharField')(max_length=10, null=True, blank=True),
keep_default=False)
# Adding field 'Famille.type_presta'
db.add_column(u'famille_famille', 'type_presta',
self.gf('django.db.models.fields.CharField')(max_length=10, null=True, blank=True),
keep_default=False)
# Adding field 'Famille.tarif'
db.add_column(u'famille_famille', 'tarif',
self.gf('django.db.models.fields.FloatField')(null=True, blank=True),
keep_default=False)
# Adding field 'Famille.diploma'
db.add_column(u'famille_famille', 'diploma',
self.gf('django.db.models.fields.CharField')(max_length=30, null=True, blank=True),
keep_default=False)
# Adding field 'Famille.menage'
db.add_column(u'famille_famille', 'menage',
self.gf('django.db.models.fields.BooleanField')(default=False),
keep_default=False)
# Adding field 'Famille.repassage'
db.add_column(u'famille_famille', 'repassage',
self.gf('django.db.models.fields.BooleanField')(default=False),
keep_default=False)
# Adding field 'Famille.cdt_periscolaire'
db.add_column(u'famille_famille', 'cdt_periscolaire',
self.gf('django.db.models.fields.BooleanField')(default=False),
keep_default=False)
# Adding field 'Famille.sortie_ecole'
db.add_column(u'famille_famille', 'sortie_ecole',
self.gf('django.db.models.fields.BooleanField')(default=False),
keep_default=False)
# Adding field 'Famille.nuit'
db.add_column(u'famille_famille', 'nuit',
self.gf('django.db.models.fields.BooleanField')(default=False),
keep_default=False)
# Adding field 'Famille.non_fumeur'
db.add_column(u'famille_famille', 'non_fumeur',
self.gf('django.db.models.fields.BooleanField')(default=False),
keep_default=False)
# Adding field 'Famille.devoirs'
db.add_column(u'famille_famille', 'devoirs',
self.gf('django.db.models.fields.BooleanField')(default=False),
keep_default=False)
# Adding field 'Famille.urgence'
db.add_column(u'famille_famille', 'urgence',
self.gf('django.db.models.fields.BooleanField')(default=False),
keep_default=False)
# Adding field 'Famille.psc1'
db.add_column(u'famille_famille', 'psc1',
self.gf('django.db.models.fields.BooleanField')(default=False),
keep_default=False)
# Adding field 'Famille.permis'
db.add_column(u'famille_famille', 'permis',
self.gf('django.db.models.fields.BooleanField')(default=False),
keep_default=False)
# Adding field 'Famille.langue'
db.add_column(u'famille_famille', 'langue',
self.gf('django.db.models.fields.CharField')(default='', max_length=10, blank=True),
keep_default=False)
# Adding field 'Famille.baby'
db.add_column(u'famille_famille', 'baby',
self.gf('django.db.models.fields.BooleanField')(default=False),
keep_default=False)
# Changing field 'Famille.city'
db.alter_column(u'famille_famille', 'city', self.gf('django.db.models.fields.CharField')(max_length=40, null=True))
# Changing field 'Famille.street'
db.alter_column(u'famille_famille', 'street', self.gf('django.db.models.fields.CharField')(max_length=100, null=True))
# Changing field 'Famille.postal_code'
db.alter_column(u'famille_famille', 'postal_code', self.gf('django.db.models.fields.CharField')(max_length=8, null=True))
def backwards(self, orm):
# Deleting field 'Famille.description'
db.delete_column(u'famille_famille', 'description')
# Deleting field 'Famille.type_garde'
db.delete_column(u'famille_famille', 'type_garde')
# Deleting field 'Famille.type_presta'
db.delete_column(u'famille_famille', 'type_presta')
# Deleting field 'Famille.tarif'
db.delete_column(u'famille_famille', 'tarif')
# Deleting field 'Famille.diploma'
db.delete_column(u'famille_famille', 'diploma')
# Deleting field 'Famille.menage'
db.delete_column(u'famille_famille', 'menage')
# Deleting field 'Famille.repassage'
db.delete_column(u'famille_famille', 'repassage')
# Deleting field 'Famille.cdt_periscolaire'
db.delete_column(u'famille_famille', 'cdt_periscolaire')
# Deleting field 'Famille.sortie_ecole'
db.delete_column(u'famille_famille', 'sortie_ecole')
# Deleting field 'Famille.nuit'
db.delete_column(u'famille_famille', 'nuit')
# Deleting field 'Famille.non_fumeur'
db.delete_column(u'famille_famille', 'non_fumeur')
# Deleting field 'Famille.devoirs'
db.delete_column(u'famille_famille', 'devoirs')
# Deleting field 'Famille.urgence'
db.delete_column(u'famille_famille', 'urgence')
# Deleting field 'Famille.psc1'
db.delete_column(u'famille_famille', 'psc1')
# Deleting field 'Famille.permis'
db.delete_column(u'famille_famille', 'permis')
# Deleting field 'Famille.langue'
db.delete_column(u'famille_famille', 'langue')
# Deleting field 'Famille.baby'
db.delete_column(u'famille_famille', 'baby')
# Changing field 'Famille.city'
db.alter_column(u'famille_famille', 'city', self.gf('django.db.models.fields.CharField')(default='', max_length=40))
# Changing field 'Famille.street'
db.alter_column(u'famille_famille', 'street', self.gf('django.db.models.fields.CharField')(default='', max_length=100))
# Changing field 'Famille.postal_code'
db.alter_column(u'famille_famille', 'postal_code', self.gf('django.db.models.fields.CharField')(default='', max_length=8))
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Group']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Permission']"}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'famille.enfant': {
'Meta': {'object_name': 'Enfant'},
'created_at': ('django.db.models.fields.DateField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'e_birthday': ('django.db.models.fields.DateField', [], {'db_column': "'birthday'", 'blank': 'True'}),
'e_name': ('django.db.models.fields.CharField', [], {'max_length': '20', 'db_column': "'name'"}),
'famille': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'enfants'", 'to': u"orm['famille.Famille']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'updated_at': ('django.db.models.fields.DateField', [], {'auto_now': 'True', 'blank': 'True'})
},
u'famille.famille': {
'Meta': {'object_name': 'Famille'},
'baby': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'cdt_periscolaire': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'city': ('django.db.models.fields.CharField', [], {'max_length': '40', 'null': 'True', 'blank': 'True'}),
'country': ('django.db.models.fields.CharField', [], {'default': "'France'", 'max_length': '20', 'blank': 'True'}),
'created_at': ('django.db.models.fields.DateField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '400', 'null': 'True', 'blank': 'True'}),
'devoirs': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'diploma': ('django.db.models.fields.CharField', [], {'max_length': '30', 'null': 'True', 'blank': 'True'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '100'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'langue': ('django.db.models.fields.CharField', [], {'max_length': '10', 'blank': 'True'}),
'menage': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
'non_fumeur': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'nuit': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'permis': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'postal_code': ('django.db.models.fields.CharField', [], {'max_length': '8', 'null': 'True', 'blank': 'True'}),
'psc1': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'repassage': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'sortie_ecole': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'street': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'tarif': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'type_garde': ('django.db.models.fields.CharField', [], {'max_length': '10', 'null': 'True', 'blank': 'True'}),
'type_presta': ('django.db.models.fields.CharField', [], {'max_length': '10', 'null': 'True', 'blank': 'True'}),
'updated_at': ('django.db.models.fields.DateField', [], {'auto_now': 'True', 'blank': 'True'}),
'urgence': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['auth.User']", 'unique': 'True'})
},
u'famille.prestataire': {
'Meta': {'object_name': 'Prestataire'},
'created_at': ('django.db.models.fields.DateField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '100'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
'sub_types': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
'type': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
'updated_at': ('django.db.models.fields.DateField', [], {'auto_now': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['auth.User']", 'unique': 'True'})
}
}
complete_apps = ['famille'] | unknown | codeparrot/codeparrot-clean | ||
from django import forms
from django.contrib import admin
from django.contrib.auth.admin import UserAdmin as AuthUserAdmin
from django.contrib.auth.forms import UserChangeForm, UserCreationForm
from .models import User
class MyUserChangeForm(UserChangeForm):
class Meta(UserChangeForm.Meta):
model = User
class MyUserCreationForm(UserCreationForm):
error_message = UserCreationForm.error_messages.update({
'duplicate_username': 'This username has already been taken.'
})
class Meta(UserCreationForm.Meta):
model = User
def clean_username(self):
username = self.cleaned_data["username"]
try:
User.objects.get(username=username)
except User.DoesNotExist:
return username
raise forms.ValidationError(self.error_messages['duplicate_username'])
@admin.register(User)
class MyUserAdmin(AuthUserAdmin):
form = MyUserChangeForm
add_form = MyUserCreationForm
fieldsets = (
('User Profile', {'fields': ('name', 'weight', 'height', 'sex', 'birth_date')}),
) + AuthUserAdmin.fieldsets
list_display = ('username', 'name', 'is_superuser')
search_fields = ['name'] | unknown | codeparrot/codeparrot-clean | ||
<?php
/*
* This file is part of the Symfony package.
*
* (c) Fabien Potencier <fabien@symfony.com>
*
* For the full copyright and license information, please view the LICENSE
* file that was distributed with this source code.
*/
use Symfony\Bundle\FrameworkBundle\FrameworkBundle;
use Symfony\Bundle\FrameworkBundle\Tests\Functional\Bundle\TestBundle\TestBundle;
use Symfony\Bundle\SecurityBundle\SecurityBundle;
return [
new FrameworkBundle(),
new SecurityBundle(),
new TestBundle(),
]; | php | github | https://github.com/symfony/symfony | src/Symfony/Bundle/FrameworkBundle/Tests/Functional/app/Security/bundles.php |
#ifndef SQL_JOIN_CACHE_INCLUDED
#define SQL_JOIN_CACHE_INCLUDED
/* Copyright (c) 2000, 2025, Oracle and/or its affiliates.
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License, version 2.0,
as published by the Free Software Foundation.
This program is designed to work with certain software (including
but not limited to OpenSSL) that is licensed under separate terms,
as designated in a particular file or component or in included license
documentation. The authors of MySQL hereby grant you an additional
permission to link the program and your derivative works with the
separately licensed software that they have either included with
the program or referenced in the documentation.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License, version 2.0, for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */
class JOIN_CACHE {
public:
/** Bits describing cache's type @sa setup_join_buffering() */
enum enum_join_cache_type { ALG_NONE = 0, ALG_BNL = 1, ALG_BKA = 2 };
};
#endif /* SQL_JOIN_CACHE_INCLUDED */ | c | github | https://github.com/mysql/mysql-server | sql/sql_join_buffer.h |
#################################################################################################
# @file App.py #
# @brief The App class representing an Android app. #
# @update 2014-02-02 19:59:00 (Sun Feb 2, 2014 at 7:59 PM) #
# @author Paolo Rovelli #
#################################################################################################
#-------------------------------- BEGIN Import Python types: ------------------------------#
import os
import subprocess
import sys
import fnmatch
import zipfile
import shutil
import hashlib
#-------------------------------- END Import Python types. --------------------------------#
#-------------------------------- BEGIN Import Classes: -----------------------------------#
from Author import *
from Certificate import *
#-------------------------------- END Import Classes. -------------------------------------#
#----------------------------------- BEGIN Configuration: ---------------------------------#
#Certificate file:
certDir = "META-INF/"
certFile = "CERT.RSA"
manifestFile = "AndroidManifest.xml"
#----------------------------------- END Configuration. -----------------------------------#
#----------------------------------- BEGIN Generic functions: ---------------------------------#
##
# Find a substring in a string, starting after a specified prefix and ended before a specified suffix.
#
# @param s the string.
# @param prefix the prefix of the file name to be deleted.
# @param suffix the suffix of the file name to be deleted.
# @return the substring starting after prefix and ended before suffix.
##
def findBetween(s, prefix, suffix):
try:
start = s.index(prefix) + len(prefix)
end = s.index(suffix, start)
return s[start:end]
except ValueError:
return ""
##
# Find all the substring starting position in a string.
#
# @param haystack the string.
# @param needle the substring to be found.
# @return the substring starting after prefix and ended before suffix.
##
def findAll(haystack, needle):
offs = -1
while True:
offs = haystack.find(needle, offs+1)
if offs == -1:
break
else:
yield offs
#------------------------------------ END Generic functions. ----------------------------------#
##
# App class.
#
# @author Paolo Rovelli
##
class App():
#-------- Class attributes: --------#
__author = None # the author of the app
__certificate = None # the digital certificate of the app
__name = "" # name of the app
__package = "" # package of the app
__version = "" # version of the app
__sdk = "" # target SDK of the app
__services = None # Services declared by the app
__activities = None # Activities declared by the app
__receivers = None # BroadcastReceivers declared by the app
__permissions = None # premissions requested by the app
__size = "" # The app size
__md5 = "" # MD5 hash of the app
__sha256 = "" # SHA-256 hash of the app
__sha512 = "" # SHA-512 hash of the app
__dexStrings = None # strings hard-coded in the classes.dex file
__dexURLs = None # URLs hard-coded in the classes.dex file
__dexShellCommands = None # commands hard-coded in the classes.dex file
#-------- Class methods: --------#
##
# Class constructor.
#
# @param apkDir the complete path where APK package to be analyzed is stored.
# @param apkFile the name of the APK package to be analyzed.
##
def __init__(self, apkDir, apkFile):
apkAbsoluteDir = os.path.join(apkDir, apkFile)
#Attributes initialization:
self.__author = None
self.__certificate = None
self.__name = ""
self.__package = ""
self.__version = ""
self.__sdk = ""
self.__services = []
self.__activities = []
self.__receivers = []
self.__permissions = []
self.__dexStrings = []
self.__dexURLs = []
self.__dexShellCommands = []
#Calculate the MD5 and SHA-256 hashes of the APK package:
try:
self.__size = os.path.getsize(apkAbsoluteDir) # os.stat(apkAbsoluteDir).st_size
apkFileContent = open(apkAbsoluteDir, 'rb').read()
except:
pass
else:
self.__md5 = hashlib.md5(apkFileContent).hexdigest()
self.__sha256 = hashlib.sha256(apkFileContent).hexdigest()
self.__sha512 = hashlib.sha512(apkFileContent).hexdigest()
#Extract the certificate (META-INF/CERT.RSA) from the APK package and save it (temporarily):
with zipfile.ZipFile(apkAbsoluteDir) as z:
with z.open(os.path.join(certDir, certFile)) as zf, open(os.path.join(apkDir, os.path.basename(certFile)), 'wb') as f:
shutil.copyfileobj(zf, f)
#Extract the author and certificate information from the digital certificate file (META-INF/CERT.RSA):
self.__author = Author(certFile)
self.__certificate = Certificate(certFile)
#Remove the (temp) created file:
os.remove(certFile)
#Extract the AndroidManifest.xml file info:
self.extractManifestInfo(apkFile)
##
# Get the author of the app.
#
# @return the author of the app.
##
def getAuthor(self):
return self.__author
##
# Get the digital certificate of the app.
#
# @return the digital certificate of the app.
##
def getCertificate(self):
return self.__certificate
##
# Get the permissions requested by the app in its AndroidManifest.xml file.
#
# @return the permissions requested by the app.
##
def getPermissions(self):
return self.__permissions
##
# Get the number of permissions requested by the app in its AndroidManifest.xml file.
#
# @return the number of permissions requested by the app.
##
def getNumberOfPermissions(self):
return len(self.__permissions)
##
# Get the app package name.
#
# @return the app package name.
##
def getPackage(self):
return self.__package
##
# Get the app name.
#
# @return the app name.
##
def getName(self):
return self.__name
##
# Get the app version.
#
# @return the app version.
##
def getVersion(self):
return self.__version
##
# Get the app Activities.
#
# @return the app Activities.
##
def getActivities(self):
return self.__activities
##
# Get the app Services.
#
# @return the app Services.
##
def getServices(self):
return self.__services
##
# Get the app BroadcastReceivers.
#
# @return the app BroadcastReceivers.
##
def getBroadcastReceivers(self):
return self.__receivers
##
# Get the app version.
#
# @return the app version.
##
def getTargetSdk(self):
return self.__sdk
##
# Get the size of the APK package.
#
# @return the size of the APK package.
##
def getSize(self):
return self.__size
##
# Get the MD5 hash of the APK package.
#
# @return the MD5 hash of the APK package.
##
def getAppMD5(self):
return self.__md5
##
# Get the SHA-256 hash of the APK package.
#
# @return the SHA-256 hash of the APK package.
##
def getAppSHA256(self):
return self.__sha256
##
# Get the SHA-512 hash of the APK package.
#
# @return the SHA-512 hash of the APK package.
##
def getAppSHA512(self):
return self.__sha512
##
# Get the classes.dex encoded URLs.
#
# @return the URLs hard-coded into the classes.dex file.
##
def getDexURLs(self):
return self.__dexURLs
##
# Get the classes.dex encoded shell commands.
#
# @return the shell commands hard-coded into the classes.dex file.
##
def getDexShellCommands(self):
return self.__dexShellCommands
##
# Get the classes.dex encoded strings.
#
# @return the strings hard-coded into the classes.dex file.
##
def getDexStrings(self):
return self.__dexStrings
##
# Set the classes.dex encoded strings and URLs.
#
# @param strings the strings and URLs hard-coded into the classes.dex file.
##
def setDexStrings(self, strings):
for string in strings:
if string != "":
if "www" in string.lower() or "http://" in string.lower() or ".com" in string.lower() or ".net" in string.lower() or ".org" in string.lower() or ".eu" in string.lower() or ".co.uk" in string.lower() or ".es" in string.lower() or ".it" in string.lower() or ".de" in string.lower() or ".fr" in string.lower() or ".us" in string.lower() or ".ru" in string.lower() or ".biz" in string.lower() or ".info" in string.lower():
self.__dexURLs.append(string)
elif "su " in string.lower() or "su_" in string.lower() or "chmod" in string.lower() or "chown" in string.lower() or "mount" in string.lower() or "dexopt" in string.lower() or "dhcpcd" in string.lower() or "dmesg" in string.lower() or "dnsmasq" in string.lower() or "dumpstate" in string.lower() or "dumpsys" in string.lower() or "fsck" in string.lower() or "iptables" in string.lower() or "keystore" in string.lower() or "lsmod" in string.lower() or "kill" in string.lower() or "rmdir" in string.lower() or "exit" in string.lower() or "logcat" in string.lower() or string.lower() == "pm" or string.lower() == "am" or "apk" in string.lower():
self.__dexShellCommands.append(string)
else:
self.__dexStrings.append(string)
#Sort the lists:
self.__dexURLs.sort()
self.__dexStrings.sort()
##
# Extract the permissions from the AndroidManifest.xml file.
##
def extractManifestInfo(self, apkFile):
self.extractAppNameFromAPK(apkFile)
self.extractAppDetailsFromAPK(apkFile)
self.extractAppPermissionsFromManifest(apkFile)
##
# Extract the app name, version, package and targetted SDK from the AndroidManifest.xml file of a given APK package.
#
# @param apkFile the APK package to be analyzed.
##
def extractAppNameFromAPK(self, apkFile):
#Extract the APK package info:
shellcommand = "aapt dump badging " + apkFile # ["aapt", "dump", "badging", apk]
process = subprocess.Popen(shellcommand, stdout=subprocess.PIPE, stderr=None, shell=True)
apkInfo = process.communicate()[0].splitlines()
##
# Example: aapt dump badging DroidRoot.A.apk
# -----------------------------------------
# package: name='com.corner23.android.universalandroot' versionCode='11' versionName='1.6.1'
# application-label:'Universal Androot'
# application-icon-160:'res/drawable/icon.png'
# application: label='Universal Androot' icon='res/drawable/icon.png'
# launchable-activity: name='com.corner23.android.universalandroot.UniversalAndroot' label='Universal Androot' icon=''
# uses-permission:'android.permission.CHANGE_WIFI_STATE'
# uses-permission:'android.permission.ACCESS_WIFI_STATE'
# uses-permission:'android.permission.WAKE_LOCK'
# uses-permission:'android.permission.WRITE_EXTERNAL_STORAGE'
# uses-implied-permission:'android.permission.WRITE_EXTERNAL_STORAGE','targetSdkVersion < 4'
# uses-permission:'android.permission.READ_PHONE_STATE'
# uses-implied-permission:'android.permission.READ_PHONE_STATE','targetSdkVersion < 4'
# uses-permission:'android.permission.READ_EXTERNAL_STORAGE'
# uses-implied-permission:'android.permission.READ_EXTERNAL_STORAGE','requested WRITE_EXTERNAL_STORAGE'
# uses-feature:'android.hardware.wifi'
# uses-implied-feature:'android.hardware.wifi','requested android.permission.ACCESS_WIFI_STATE, android.permission.CHANGE_WIFI_STATE, or android.permission.CHANGE_WIFI_MULTICAST_STATE permission'
# uses-feature:'android.hardware.touchscreen'
# uses-implied-feature:'android.hardware.touchscreen','assumed you require a touch screen unless explicitly made optional'
# uses-feature:'android.hardware.screen.portrait'
# uses-implied-feature:'android.hardware.screen.portrait','one or more activities have specified a portrait orientation'
# main
# supports-screens: 'normal'
# supports-any-density: 'false'
# locales: '--_--'
# densities: '160'
##
for info in apkInfo:
#Debug
#print "info: " + info
#Package info:
pathPrefix = "package:"
if info[0:len(pathPrefix)] == pathPrefix:
self.__package = findBetween(info, "name='", "'")
self.__version = findBetween(info, "versionName='", "'")
continue
#Target SDK version:
pathPrefix = "targetSdkVersion:"
if info[0:len(pathPrefix)] == pathPrefix:
self.__sdk = findBetween(info, "targetSdkVersion:'", "'")
continue
#App name:
pathPrefix = "application:"
if info[0:len(pathPrefix)] == pathPrefix:
self.__name = findBetween(info, "label='", "'")
continue
#Main Activity:
#pathPrefix = "launchable-activity:"
#if info[0:len(pathPrefix)] == pathPrefix:
# self.__activities.append( findBetween(info, "name='", "'") )
# continue
#Debug:
#print "App Package: " + self.__package
#print "App Name: " + self.__name
#print "App Version: " + self.__version
#print "Target SDK: " + self.__sdk
#print "Main Activity: " + self.__activities[0]
##
# Extract some app details (e.g. Activities, Services, BroadcastReceivers, etc...) from the AndroidManifest.xml file of a given APK package.
#
# @param apkFile the APK package to be analyzed.
##
def extractAppDetailsFromAPK(self, apkFile):
#Extract the AndroidManifest XML tree:
shellcommand = "aapt dump xmltree " + apkFile + " AndroidManifest.xml" # ["aapt", "dump", "xmltree", apk, "AndroidManifest.xml"]
process = subprocess.Popen(shellcommand, stdout=subprocess.PIPE, stderr=None, shell=True)
xmlTree = process.communicate()[0]
##
# Example: aapt dump xmltree DroidRoot.A.apk AndroidManifest.xml
# -----------------------------------------
# N: android=http://schemas.android.com/apk/res/android
# E: manifest (line=2)
# A: android:versionCode(0x0101021b)=(type 0x10)0xb
# A: android:versionName(0x0101021c)="1.6.1" (Raw: "1.6.1")
# A: package="com.corner23.android.universalandroot" (Raw: "com.corner23.android.universalandroot")
# E: application (line=6)
# A: android:label(0x01010001)=@0x7f050000
# A: android:icon(0x01010002)=@0x7f020000
# E: activity (line=7)
# A: android:label(0x01010001)=@0x7f050000
# A: android:name(0x01010003)=".UniversalAndroot" (Raw: ".UniversalAndroot")
# A: android:screenOrientation(0x0101001e)=(type 0x10)0x1
# E: intent-filter (line=10)
# E: action (line=11)
# A: android:name(0x01010003)="android.intent.action.MAIN" (Raw: "android.intent.action.MAIN")
# E: category (line=12)
# A: android:name(0x01010003)="android.intent.category.LAUNCHER" (Raw: "android.intent.category.LAUNCHER")
# E: uses-permission (line=16)
# A: android:name(0x01010003)="android.permission.CHANGE_WIFI_STATE" (Raw: "android.permission.CHANGE_WIFI_STATE")
# E: uses-permission (line=17)
# A: android:name(0x01010003)="android.permission.ACCESS_WIFI_STATE" (Raw: "android.permission.ACCESS_WIFI_STATE")
# E: uses-permission (line=18)
# A: android:name(0x01010003)="android.permission.WAKE_LOCK" (Raw: "android.permission.WAKE_LOCK")
##
#Take only from the <application> TAG:
xmlTree = xmlTree[xmlTree.index("application"):-1]
#print "Number of Activities: " + str(xmlTree.count("activity"))
#print "Number of Services: " + str(xmlTree.count("service"))
#print "Number of BroadcastReceivers: " + str(xmlTree.count("receiver"))
for offs in findAll(xmlTree, "activity"):
activity = xmlTree[offs:-1]
idx = findBetween(activity, "android:name(", ")=\"")
self.__activities.append( findBetween(activity, "android:name(" + idx + ")=\"", "\"") )
for offs in findAll(xmlTree, "service"):
service = xmlTree[offs:-1]
idx = findBetween(service, "android:name(", ")=\"")
self.__services.append( findBetween(service, "android:name(" + idx + ")=\"", "\"") )
for offs in findAll(xmlTree, "receiver"):
receiver = xmlTree[offs:-1]
idx = findBetween(receiver, "android:name(", ")=\"")
self.__receivers.append( findBetween(receiver, "android:name(" + idx + ")=\"", "\"") )
#Sort the lists of Activities, Services and BroadcastReceivers:
self.__activities.sort()
self.__services.sort()
self.__receivers.sort()
#Debug:
#print "Activities: " + str(self.__activities)
#print "Services: " + str(self.__services)
#print "BroadcastReceivers: " + str(self.__receivers)
##
# Extract the permissions from the AndroidManifest.xml file.
##
def extractAppPermissionsFromManifest(self, apkFile):
#Extract the AndroidManifest.xml permissions:
shellcommand = "aapt dump permissions ./" + apkFile + " | sed 1d | awk '{ print $NF }'" # ["aapt", "dump", "permissions", apk]
process = subprocess.Popen(shellcommand, stdout=subprocess.PIPE, stderr=None, shell=True)
self.__permissions = process.communicate()[0].splitlines()
#Sort the list of permissions:
self.__permissions.sort()
#Debug:
#print "App Permission: " + str(self.__permissions)
#print "App Number of Permissions: " + str(len(self.__permissions)) | unknown | codeparrot/codeparrot-clean | ||
#!/usr/bin/python
# -*- coding: utf-8 -*-
"""
Flexible python representation of a symbolic mathematical formula.
Acceptes Presentation MathML, Content MathML (and could also do OpenMath).
Provides sympy representation.
"""
#
# File: formula.py
# Date: 04-May-12 (creation)
# Author: I. Chuang <ichuang@mit.edu>
#
import os
import string # pylint: disable=W0402
import re
import logging
import operator
import requests
import sympy
from sympy.printing.latex import LatexPrinter
from sympy.printing.str import StrPrinter
from sympy import latex, sympify
from sympy.physics.quantum.qubit import Qubit
from sympy.physics.quantum.state import Ket
from xml.sax.saxutils import unescape
import unicodedata
from lxml import etree
#import subprocess
from copy import deepcopy
log = logging.getLogger(__name__)
log.warning("Dark code. Needs review before enabling in prod.")
os.environ['PYTHONIOENCODING'] = 'utf-8'
#-----------------------------------------------------------------------------
class dot(sympy.operations.LatticeOp): # pylint: disable=invalid-name, no-member
"""my dot product"""
zero = sympy.Symbol('dotzero')
identity = sympy.Symbol('dotidentity')
def _print_dot(_self, expr):
"""Print statement used for LatexPrinter"""
return r'{((%s) \cdot (%s))}' % (expr.args[0], expr.args[1])
LatexPrinter._print_dot = _print_dot # pylint: disable=protected-access
#-----------------------------------------------------------------------------
# unit vectors (for 8.02)
def _print_hat(_self, expr):
"""Print statement used for LatexPrinter"""
return '\\hat{%s}' % str(expr.args[0]).lower()
LatexPrinter._print_hat = _print_hat # pylint: disable=protected-access
StrPrinter._print_hat = _print_hat # pylint: disable=protected-access
#-----------------------------------------------------------------------------
# helper routines
def to_latex(expr):
"""
Convert expression to latex mathjax format
"""
if expr is None:
return ''
expr_s = latex(expr)
expr_s = expr_s.replace(r'\XI', 'XI') # workaround for strange greek
# substitute back into latex form for scripts
# literally something of the form
# 'scriptN' becomes '\\mathcal{N}'
# note: can't use something akin to the _print_hat method above because we sometimes get 'script(N)__B' or more complicated terms
expr_s = re.sub(
r'script([a-zA-Z0-9]+)',
'\\mathcal{\\1}',
expr_s
)
#return '<math>%s{}{}</math>' % (xs[1:-1])
if expr_s[0] == '$':
return '[mathjax]%s[/mathjax]<br>' % (expr_s[1:-1]) # for sympy v6
return '[mathjax]%s[/mathjax]<br>' % (expr_s) # for sympy v7
def my_evalf(expr, chop=False):
"""
Enhanced sympy evalf to handle lists of expressions
and catch eval failures without dropping out.
"""
if type(expr) == list:
try:
return [x.evalf(chop=chop) for x in expr]
except:
return expr
try:
return expr.evalf(chop=chop)
except:
return expr
def my_sympify(expr, normphase=False, matrix=False, abcsym=False, do_qubit=False, symtab=None):
"""
Version of sympify to import expression into sympy
"""
# make all lowercase real?
if symtab:
varset = symtab
else:
varset = {'p': sympy.Symbol('p'),
'g': sympy.Symbol('g'),
'e': sympy.E, # for exp
'i': sympy.I, # lowercase i is also sqrt(-1)
'Q': sympy.Symbol('Q'), # otherwise it is a sympy "ask key"
'I': sympy.Symbol('I'), # otherwise it is sqrt(-1)
'N': sympy.Symbol('N'), # or it is some kind of sympy function
'ZZ': sympy.Symbol('ZZ'), # otherwise it is the PythonIntegerRing
'XI': sympy.Symbol('XI'), # otherwise it is the capital \XI
'hat': sympy.Function('hat'), # for unit vectors (8.02)
}
if do_qubit: # turn qubit(...) into Qubit instance
varset.update({'qubit': Qubit,
'Ket': Ket,
'dot': dot,
'bit': sympy.Function('bit'),
})
if abcsym: # consider all lowercase letters as real symbols, in the parsing
for letter in string.lowercase:
if letter in varset: # exclude those already done
continue
varset.update({letter: sympy.Symbol(letter, real=True)})
sexpr = sympify(expr, locals=varset)
if normphase: # remove overall phase if sexpr is a list
if type(sexpr) == list:
if sexpr[0].is_number:
ophase = sympy.sympify('exp(-I*arg(%s))' % sexpr[0])
sexpr = [sympy.Mul(x, ophase) for x in sexpr]
def to_matrix(expr):
"""
Convert a list, or list of lists to a matrix.
"""
# if expr is a list of lists, and is rectangular, then return Matrix(expr)
if not type(expr) == list:
return expr
for row in expr:
if (not type(row) == list):
return expr
rdim = len(expr[0])
for row in expr:
if not len(row) == rdim:
return expr
return sympy.Matrix(expr)
if matrix:
sexpr = to_matrix(sexpr)
return sexpr
#-----------------------------------------------------------------------------
# class for symbolic mathematical formulas
class formula(object):
"""
Representation of a mathematical formula object. Accepts mathml math expression
for constructing, and can produce sympy translation. The formula may or may not
include an assignment (=).
"""
def __init__(self, expr, asciimath='', options=None):
self.expr = expr.strip()
self.asciimath = asciimath
self.the_cmathml = None
self.the_sympy = None
self.options = options
def is_presentation_mathml(self):
"""
Check if formula is in mathml presentation format.
"""
return '<mstyle' in self.expr
def is_mathml(self):
"""
Check if formula is in mathml format.
"""
return '<math ' in self.expr
def fix_greek_in_mathml(self, xml):
"""
Recursively fix greek letters in passed in xml.
"""
def gettag(expr):
return re.sub('{http://[^}]+}', '', expr.tag)
for k in xml:
tag = gettag(k)
if tag == 'mi' or tag == 'ci':
usym = unicode(k.text)
try:
udata = unicodedata.name(usym)
except Exception:
udata = None
# print "usym = %s, udata=%s" % (usym,udata)
if udata: # eg "GREEK SMALL LETTER BETA"
if 'GREEK' in udata:
usym = udata.split(' ')[-1]
if 'SMALL' in udata:
usym = usym.lower()
#print "greek: ",usym
k.text = usym
self.fix_greek_in_mathml(k)
return xml
def preprocess_pmathml(self, xml):
r"""
Pre-process presentation MathML from ASCIIMathML to make it more
acceptable for SnuggleTeX, and also to accomodate some sympy
conventions (eg hat(i) for \hat{i}).
This method would be a good spot to look for an integral and convert
it, if possible...
"""
if type(xml) == str or type(xml) == unicode:
xml = etree.fromstring(xml) # TODO: wrap in try
xml = self.fix_greek_in_mathml(xml) # convert greek utf letters to greek spelled out in ascii
def gettag(expr):
return re.sub('{http://[^}]+}', '', expr.tag)
def fix_pmathml(xml):
"""
f and g are processed as functions by asciimathml, eg "f-2" turns
into "<mrow><mi>f</mi><mo>-</mo></mrow><mn>2</mn>" this is
really terrible for turning into cmathml. undo this here.
"""
for k in xml:
tag = gettag(k)
if tag == 'mrow':
if len(k) == 2:
if gettag(k[0]) == 'mi' and k[0].text in ['f', 'g'] and gettag(k[1]) == 'mo':
idx = xml.index(k)
xml.insert(idx, deepcopy(k[0])) # drop the <mrow> container
xml.insert(idx + 1, deepcopy(k[1]))
xml.remove(k)
fix_pmathml(k)
fix_pmathml(xml)
def fix_hat(xml):
"""
hat i is turned into <mover><mi>i</mi><mo>^</mo></mover> ; mangle
this into <mi>hat(f)</mi> hat i also somtimes turned into
<mover><mrow> <mi>j</mi> </mrow><mo>^</mo></mover>
"""
for k in xml:
tag = gettag(k)
if tag == 'mover':
if len(k) == 2:
if gettag(k[0]) == 'mi' and gettag(k[1]) == 'mo' and str(k[1].text) == '^':
newk = etree.Element('mi')
newk.text = 'hat(%s)' % k[0].text
xml.replace(k, newk)
if gettag(k[0]) == 'mrow' and gettag(k[0][0]) == 'mi' and gettag(k[1]) == 'mo' and str(k[1].text) == '^':
newk = etree.Element('mi')
newk.text = 'hat(%s)' % k[0][0].text
xml.replace(k, newk)
fix_hat(k)
fix_hat(xml)
def flatten_pmathml(xml):
"""
Give the text version of certain PMathML elements
Sometimes MathML will be given with each letter separated (it
doesn't know if its implicit multiplication or what). From an xml
node, find the (text only) variable name it represents. So it takes
<mrow>
<mi>m</mi>
<mi>a</mi>
<mi>x</mi>
</mrow>
and returns 'max', for easier use later on.
"""
tag = gettag(xml)
if tag == 'mn':
return xml.text
elif tag == 'mi':
return xml.text
elif tag == 'mrow':
return ''.join([flatten_pmathml(y) for y in xml])
raise Exception('[flatten_pmathml] unknown tag %s' % tag)
def fix_mathvariant(parent):
"""
Fix certain kinds of math variants
Literally replace <mstyle mathvariant="script"><mi>N</mi></mstyle>
with 'scriptN'. There have been problems using script_N or script(N)
"""
for child in parent:
if (gettag(child) == 'mstyle' and child.get('mathvariant') == 'script'):
newchild = etree.Element('mi')
newchild.text = 'script%s' % flatten_pmathml(child[0])
parent.replace(child, newchild)
fix_mathvariant(child)
fix_mathvariant(xml)
# find "tagged" superscripts
# they have the character \u200b in the superscript
# replace them with a__b so snuggle doesn't get confused
def fix_superscripts(xml):
""" Look for and replace sup elements with 'X__Y' or 'X_Y__Z'
In the javascript, variables with '__X' in them had an invisible
character inserted into the sup (to distinguish from powers)
E.g. normal:
<msubsup>
<mi>a</mi>
<mi>b</mi>
<mi>c</mi>
</msubsup>
to be interpreted '(a_b)^c' (nothing done by this method)
And modified:
<msubsup>
<mi>b</mi>
<mi>x</mi>
<mrow>
<mo>​</mo>
<mi>d</mi>
</mrow>
</msubsup>
to be interpreted 'a_b__c'
also:
<msup>
<mi>x</mi>
<mrow>
<mo>​</mo>
<mi>B</mi>
</mrow>
</msup>
to be 'x__B'
"""
for k in xml:
tag = gettag(k)
# match things like the last example--
# the second item in msub is an mrow with the first
# character equal to \u200b
if (
tag == 'msup' and
len(k) == 2 and gettag(k[1]) == 'mrow' and
gettag(k[1][0]) == 'mo' and k[1][0].text == u'\u200b' # whew
):
# replace the msup with 'X__Y'
k[1].remove(k[1][0])
newk = etree.Element('mi')
newk.text = '%s__%s' % (flatten_pmathml(k[0]), flatten_pmathml(k[1]))
xml.replace(k, newk)
# match things like the middle example-
# the third item in msubsup is an mrow with the first
# character equal to \u200b
if (
tag == 'msubsup' and
len(k) == 3 and gettag(k[2]) == 'mrow' and
gettag(k[2][0]) == 'mo' and k[2][0].text == u'\u200b' # whew
):
# replace the msubsup with 'X_Y__Z'
k[2].remove(k[2][0])
newk = etree.Element('mi')
newk.text = '%s_%s__%s' % (flatten_pmathml(k[0]), flatten_pmathml(k[1]), flatten_pmathml(k[2]))
xml.replace(k, newk)
fix_superscripts(k)
fix_superscripts(xml)
def fix_msubsup(parent):
"""
Snuggle returns an error when it sees an <msubsup> replace such
elements with an <msup>, except the first element is of
the form a_b. I.e. map a_b^c => (a_b)^c
"""
for child in parent:
# fix msubsup
if (gettag(child) == 'msubsup' and len(child) == 3):
newchild = etree.Element('msup')
newbase = etree.Element('mi')
newbase.text = '%s_%s' % (flatten_pmathml(child[0]), flatten_pmathml(child[1]))
newexp = child[2]
newchild.append(newbase)
newchild.append(newexp)
parent.replace(child, newchild)
fix_msubsup(child)
fix_msubsup(xml)
self.xml = xml # pylint: disable=attribute-defined-outside-init
return self.xml
def get_content_mathml(self):
if self.the_cmathml:
return self.the_cmathml
# pre-process the presentation mathml before sending it to snuggletex to convert to content mathml
try:
xml = self.preprocess_pmathml(self.expr)
except Exception, err:
log.warning('Err %s while preprocessing; expr=%s', err, self.expr)
return "<html>Error! Cannot process pmathml</html>"
pmathml = etree.tostring(xml, pretty_print=True)
self.the_pmathml = pmathml # pylint: disable=attribute-defined-outside-init
# convert to cmathml
self.the_cmathml = self.GetContentMathML(self.asciimath, pmathml)
return self.the_cmathml
cmathml = property(get_content_mathml, None, None, 'content MathML representation')
def make_sympy(self, xml=None):
"""
Return sympy expression for the math formula.
The math formula is converted to Content MathML then that is parsed.
This is a recursive function, called on every CMML node. Support for
more functions can be added by modifying opdict, abould halfway down
"""
if self.the_sympy:
return self.the_sympy
if xml is None: # root
if not self.is_mathml():
return my_sympify(self.expr)
if self.is_presentation_mathml():
cmml = None
try:
cmml = self.cmathml
xml = etree.fromstring(str(cmml))
except Exception, err:
if 'conversion from Presentation MathML to Content MathML was not successful' in cmml:
msg = "Illegal math expression"
else:
msg = 'Err %s while converting cmathml to xml; cmml=%s' % (err, cmml)
raise Exception(msg)
xml = self.fix_greek_in_mathml(xml)
self.the_sympy = self.make_sympy(xml[0])
else:
xml = etree.fromstring(self.expr)
xml = self.fix_greek_in_mathml(xml)
self.the_sympy = self.make_sympy(xml[0])
return self.the_sympy
def gettag(expr):
return re.sub('{http://[^}]+}', '', expr.tag)
# simple math
def op_divide(*args):
if not len(args) == 2:
raise Exception('divide given wrong number of arguments!')
# print "divide: arg0=%s, arg1=%s" % (args[0],args[1])
return sympy.Mul(args[0], sympy.Pow(args[1], -1))
def op_plus(*args):
return args[0] if len(args) == 1 else op_plus(*args[:-1]) + args[-1]
def op_times(*args):
return reduce(operator.mul, args)
def op_minus(*args):
if len(args) == 1:
return -args[0]
if not len(args) == 2:
raise Exception('minus given wrong number of arguments!')
#return sympy.Add(args[0],-args[1])
return args[0] - args[1]
opdict = {
'plus': op_plus,
'divide': operator.div, # should this be op_divide?
'times': op_times,
'minus': op_minus,
'root': sympy.sqrt,
'power': sympy.Pow,
'sin': sympy.sin,
'cos': sympy.cos,
'tan': sympy.tan,
'cot': sympy.cot,
'sinh': sympy.sinh,
'cosh': sympy.cosh,
'coth': sympy.coth,
'tanh': sympy.tanh,
'asin': sympy.asin,
'acos': sympy.acos,
'atan': sympy.atan,
'atan2': sympy.atan2,
'acot': sympy.acot,
'asinh': sympy.asinh,
'acosh': sympy.acosh,
'atanh': sympy.atanh,
'acoth': sympy.acoth,
'exp': sympy.exp,
'log': sympy.log,
'ln': sympy.ln,
}
# simple symbols - TODO is this code used?
nums1dict = {
'pi': sympy.pi,
}
def parsePresentationMathMLSymbol(xml):
"""
Parse <msub>, <msup>, <mi>, and <mn>
"""
tag = gettag(xml)
if tag == 'mn':
return xml.text
elif tag == 'mi':
return xml.text
elif tag == 'msub':
return '_'.join([parsePresentationMathMLSymbol(y) for y in xml])
elif tag == 'msup':
return '^'.join([parsePresentationMathMLSymbol(y) for y in xml])
raise Exception('[parsePresentationMathMLSymbol] unknown tag %s' % tag)
# parser tree for Content MathML
tag = gettag(xml)
# first do compound objects
if tag == 'apply': # apply operator
opstr = gettag(xml[0])
if opstr in opdict:
op = opdict[opstr] # pylint: disable=invalid-name
args = [self.make_sympy(expr) for expr in xml[1:]]
try:
res = op(*args)
except Exception, err:
self.args = args # pylint: disable=attribute-defined-outside-init
self.op = op # pylint: disable=attribute-defined-outside-init, invalid-name
raise Exception('[formula] error=%s failed to apply %s to args=%s' % (err, opstr, args))
return res
else:
raise Exception('[formula]: unknown operator tag %s' % (opstr))
elif tag == 'list': # square bracket list
if gettag(xml[0]) == 'matrix':
return self.make_sympy(xml[0])
else:
return [self.make_sympy(expr) for expr in xml]
elif tag == 'matrix':
return sympy.Matrix([self.make_sympy(expr) for expr in xml])
elif tag == 'vector':
return [self.make_sympy(expr) for expr in xml]
# atoms are below
elif tag == 'cn': # number
return sympy.sympify(xml.text)
# return float(xml.text)
elif tag == 'ci': # variable (symbol)
if len(xml) > 0 and (gettag(xml[0]) == 'msub' or gettag(xml[0]) == 'msup'): # subscript or superscript
usym = parsePresentationMathMLSymbol(xml[0])
sym = sympy.Symbol(str(usym))
else:
usym = unicode(xml.text)
if 'hat' in usym:
sym = my_sympify(usym)
else:
if usym == 'i' and self.options is not None and 'imaginary' in self.options: # i = sqrt(-1)
sym = sympy.I
else:
sym = sympy.Symbol(str(usym))
return sym
else: # unknown tag
raise Exception('[formula] unknown tag %s' % tag)
sympy = property(make_sympy, None, None, 'sympy representation')
def GetContentMathML(self, asciimath, mathml):
"""
Handle requests to snuggletex API to convert the Ascii math to MathML
"""
# url = 'http://192.168.1.2:8080/snuggletex-webapp-1.2.2/ASCIIMathMLUpConversionDemo'
# url = 'http://127.0.0.1:8080/snuggletex-webapp-1.2.2/ASCIIMathMLUpConversionDemo'
url = 'https://math-xserver.mitx.mit.edu/snuggletex-webapp-1.2.2/ASCIIMathMLUpConversionDemo'
if 1:
payload = {
'asciiMathInput': asciimath,
'asciiMathML': mathml,
#'asciiMathML':unicode(mathml).encode('utf-8'),
}
headers = {'User-Agent': "Mozilla/5.0 (Windows; U; Windows NT 5.1; en-US; rv:1.8.1.13) Gecko/20080311 Firefox/2.0.0.13"}
request = requests.post(url, data=payload, headers=headers, verify=False)
request.encoding = 'utf-8'
ret = request.text
# print "encoding: ", request.encoding
mode = 0
cmathml = []
for k in ret.split('\n'):
if 'conversion to Content MathML' in k:
mode = 1
continue
if mode == 1:
if '<h3>Maxima Input Form</h3>' in k:
mode = 0
continue
cmathml.append(k)
cmathml = '\n'.join(cmathml[2:])
cmathml = '<math xmlns="http://www.w3.org/1998/Math/MathML">\n' + unescape(cmathml) + '\n</math>'
# print cmathml
return cmathml
#-----------------------------------------------------------------------------
def test1():
"""Test XML strings - addition"""
xmlstr = """
<math xmlns="http://www.w3.org/1998/Math/MathML">
<apply>
<plus/>
<cn>1</cn>
<cn>2</cn>
</apply>
</math>
"""
return formula(xmlstr)
def test2():
"""Test XML strings - addition, Greek alpha"""
xmlstr = u"""
<math xmlns="http://www.w3.org/1998/Math/MathML">
<apply>
<plus/>
<cn>1</cn>
<apply>
<times/>
<cn>2</cn>
<ci>α</ci>
</apply>
</apply>
</math>
"""
return formula(xmlstr)
def test3():
"""Test XML strings - addition, Greek gamma"""
xmlstr = """
<math xmlns="http://www.w3.org/1998/Math/MathML">
<apply>
<divide/>
<cn>1</cn>
<apply>
<plus/>
<cn>2</cn>
<ci>γ</ci>
</apply>
</apply>
</math>
"""
return formula(xmlstr)
def test4():
"""Test XML strings - addition, Greek alpha, mfrac"""
xmlstr = u"""
<math xmlns="http://www.w3.org/1998/Math/MathML">
<mstyle displaystyle="true">
<mn>1</mn>
<mo>+</mo>
<mfrac>
<mn>2</mn>
<mi>α</mi>
</mfrac>
</mstyle>
</math>
"""
return formula(xmlstr)
def test5():
"""Test XML strings - sum of two matrices"""
xmlstr = u"""
<math xmlns="http://www.w3.org/1998/Math/MathML">
<mstyle displaystyle="true">
<mrow>
<mi>cos</mi>
<mrow>
<mo>(</mo>
<mi>θ</mi>
<mo>)</mo>
</mrow>
</mrow>
<mo>⋅</mo>
<mrow>
<mo>[</mo>
<mtable>
<mtr>
<mtd>
<mn>1</mn>
</mtd>
<mtd>
<mn>0</mn>
</mtd>
</mtr>
<mtr>
<mtd>
<mn>0</mn>
</mtd>
<mtd>
<mn>1</mn>
</mtd>
</mtr>
</mtable>
<mo>]</mo>
</mrow>
<mo>+</mo>
<mrow>
<mo>[</mo>
<mtable>
<mtr>
<mtd>
<mn>0</mn>
</mtd>
<mtd>
<mn>1</mn>
</mtd>
</mtr>
<mtr>
<mtd>
<mn>1</mn>
</mtd>
<mtd>
<mn>0</mn>
</mtd>
</mtr>
</mtable>
<mo>]</mo>
</mrow>
</mstyle>
</math>
"""
return formula(xmlstr)
def test6():
"""Test XML strings - imaginary numbers"""
xmlstr = u"""
<math xmlns="http://www.w3.org/1998/Math/MathML">
<mstyle displaystyle="true">
<mn>1</mn>
<mo>+</mo>
<mi>i</mi>
</mstyle>
</math>
"""
return formula(xmlstr, options='imaginary') | unknown | codeparrot/codeparrot-clean | ||
"""
Data format classes ("responders") that can be plugged
into model_resource.ModelResource and determine how
the objects of a ModelResource instance are rendered
(e.g. serialized to XML, rendered by templates, ...).
"""
from django.core import serializers
from django.core.handlers.wsgi import STATUS_CODE_TEXT
from django.core.paginator import ObjectPaginator, InvalidPage
from django.core.xheaders import populate_xheaders
from django import newforms as forms
from django.http import Http404, HttpResponse
from django.newforms.util import ErrorDict
from django.shortcuts import render_to_response
from django.template import loader, RequestContext
from django.utils import simplejson
from django.utils.xmlutils import SimplerXMLGenerator
from django.views.generic.simple import direct_to_template
class SerializeResponder(object):
"""
Class for all data formats that are possible
with Django's serializer framework.
"""
def __init__(self, format, mimetype=None, paginate_by=None, allow_empty=False):
"""
format:
may be every format that works with Django's serializer
framework. By default: xml, python, json, (yaml).
mimetype:
if the default None is not changed, any HttpResponse calls
use settings.DEFAULT_CONTENT_TYPE and settings.DEFAULT_CHARSET
paginate_by:
Number of elements per page. Default: All elements.
"""
self.format = format
self.mimetype = mimetype
self.paginate_by = paginate_by
self.allow_empty = allow_empty
self.expose_fields = []
def render(self, object_list):
"""
Serializes a queryset to the format specified in
self.format.
"""
# Hide unexposed fields
hidden_fields = []
for obj in list(object_list):
for field in obj._meta.fields:
if not field.name in self.expose_fields and field.serialize:
field.serialize = False
hidden_fields.append(field)
response = serializers.serialize(self.format, object_list)
# Show unexposed fields again
for field in hidden_fields:
field.serialize = True
return response
def element(self, request, elem):
"""
Renders single model objects to HttpResponse.
"""
return HttpResponse(self.render([elem]), self.mimetype)
def error(self, request, status_code, error_dict=None):
"""
Handles errors in a RESTful way.
- appropriate status code
- appropriate mimetype
- human-readable error message
"""
if not error_dict:
error_dict = ErrorDict()
response = HttpResponse(mimetype = self.mimetype)
response.write('%d %s' % (status_code, STATUS_CODE_TEXT[status_code]))
if error_dict:
response.write('\n\nErrors:\n')
response.write(error_dict.as_text())
response.status_code = status_code
return response
def list(self, request, queryset, page=None):
"""
Renders a list of model objects to HttpResponse.
"""
if self.paginate_by:
paginator = ObjectPaginator(queryset, self.paginate_by)
if not page:
page = request.GET.get('page', 1)
try:
page = int(page)
object_list = paginator.get_page(page - 1)
except (InvalidPage, ValueError):
if page == 1 and self.allow_empty:
object_list = []
else:
return self.error(request, 404)
else:
object_list = list(queryset)
return HttpResponse(self.render(object_list), self.mimetype)
class JSONResponder(SerializeResponder):
"""
JSON data format class.
"""
def __init__(self, paginate_by=None, allow_empty=False):
SerializeResponder.__init__(self, 'json', 'application/json',
paginate_by=paginate_by, allow_empty=allow_empty)
def error(self, request, status_code, error_dict=None):
"""
Return JSON error response that includes a human readable error
message, application-specific errors and a machine readable
status code.
"""
if not error_dict:
error_dict = ErrorDict()
response = HttpResponse(mimetype = self.mimetype)
response.status_code = status_code
response_dict = {
"error-message" : '%d %s' % (status_code, STATUS_CODE_TEXT[status_code]),
"status-code" : status_code,
"model-errors" : error_dict
}
simplejson.dump(response_dict, response)
return response
class XMLResponder(SerializeResponder):
"""
XML data format class.
"""
def __init__(self, paginate_by=None, allow_empty=False):
SerializeResponder.__init__(self, 'xml', 'application/xml',
paginate_by=paginate_by, allow_empty=allow_empty)
def error(self, request, status_code, error_dict=None):
"""
Return XML error response that includes a human readable error
message, application-specific errors and a machine readable
status code.
"""
from django.conf import settings
if not error_dict:
error_dict = ErrorDict()
response = HttpResponse(mimetype = self.mimetype)
response.status_code = status_code
xml = SimplerXMLGenerator(response, settings.DEFAULT_CHARSET)
xml.startDocument()
xml.startElement("django-error", {})
xml.addQuickElement(name="error-message", contents='%d %s' % (status_code, STATUS_CODE_TEXT[status_code]))
xml.addQuickElement(name="status-code", contents=str(status_code))
if error_dict:
xml.startElement("model-errors", {})
for (model_field, errors) in error_dict.items():
for error in errors:
xml.addQuickElement(name=model_field, contents=error)
xml.endElement("model-errors")
xml.endElement("django-error")
xml.endDocument()
return response
class TemplateResponder(object):
"""
Data format class that uses templates (similar to Django's
generic views).
"""
def __init__(self, template_dir, paginate_by=None, template_loader=loader,
extra_context=None, allow_empty=False, context_processors=None,
template_object_name='object', mimetype=None):
self.template_dir = template_dir
self.paginate_by = paginate_by
self.template_loader = template_loader
if not extra_context:
extra_context = {}
for key, value in extra_context.items():
if callable(value):
extra_context[key] = value()
self.extra_context = extra_context
self.allow_empty = allow_empty
self.context_processors = context_processors
self.template_object_name = template_object_name
self.mimetype = mimetype
self.expose_fields = None # Set by Collection.__init__
def _hide_unexposed_fields(self, obj, allowed_fields):
"""
Remove fields from a model that should not be public.
"""
for field in obj._meta.fields:
if not field.name in allowed_fields and \
not field.name + '_id' in allowed_fields:
obj.__dict__.pop(field.name)
def list(self, request, queryset, page=None):
"""
Renders a list of model objects to HttpResponse.
"""
template_name = '%s/%s_list.html' % (self.template_dir, queryset.model._meta.module_name)
if self.paginate_by:
paginator = ObjectPaginator(queryset, self.paginate_by)
if not page:
page = request.GET.get('page', 1)
try:
page = int(page)
object_list = paginator.get_page(page - 1)
except (InvalidPage, ValueError):
if page == 1 and self.allow_empty:
object_list = []
else:
raise Http404
c = RequestContext(request, {
'%s_list' % self.template_object_name: object_list,
'is_paginated': paginator.pages > 1,
'results_per_page': self.paginate_by,
'has_next': paginator.has_next_page(page - 1),
'has_previous': paginator.has_previous_page(page - 1),
'page': page,
'next': page + 1,
'previous': page - 1,
'last_on_page': paginator.last_on_page(page - 1),
'first_on_page': paginator.first_on_page(page - 1),
'pages': paginator.pages,
'hits' : paginator.hits,
}, self.context_processors)
else:
object_list = queryset
c = RequestContext(request, {
'%s_list' % self.template_object_name: object_list,
'is_paginated': False
}, self.context_processors)
if not self.allow_empty and len(queryset) == 0:
raise Http404
# Hide unexposed fields
for obj in object_list:
self._hide_unexposed_fields(obj, self.expose_fields)
c.update(self.extra_context)
t = self.template_loader.get_template(template_name)
return HttpResponse(t.render(c), mimetype=self.mimetype)
def element(self, request, elem):
"""
Renders single model objects to HttpResponse.
"""
template_name = '%s/%s_detail.html' % (self.template_dir, elem._meta.module_name)
t = self.template_loader.get_template(template_name)
c = RequestContext(request, {
self.template_object_name : elem,
}, self.context_processors)
# Hide unexposed fields
self._hide_unexposed_fields(elem, self.expose_fields)
c.update(self.extra_context)
response = HttpResponse(t.render(c), mimetype=self.mimetype)
populate_xheaders(request, response, elem.__class__, getattr(elem, elem._meta.pk.name))
return response
def error(self, request, status_code, error_dict=None):
"""
Renders error template (template name: error status code).
"""
if not error_dict:
error_dict = ErrorDict()
response = direct_to_template(request,
template = '%s/%s.html' % (self.template_dir, str(status_code)),
extra_context = { 'errors' : error_dict },
mimetype = self.mimetype)
response.status_code = status_code
return response
def create_form(self, request, queryset, form_class):
"""
Render form for creation of new collection entry.
"""
ResourceForm = forms.form_for_model(queryset.model, form=form_class)
if request.POST:
form = ResourceForm(request.POST)
else:
form = ResourceForm()
template_name = '%s/%s_form.html' % (self.template_dir, queryset.model._meta.module_name)
return render_to_response(template_name, {'form':form})
def update_form(self, request, pk, queryset, form_class):
"""
Render edit form for single entry.
"""
# Remove queryset cache by cloning the queryset
queryset = queryset._clone()
elem = queryset.get(**{queryset.model._meta.pk.name : pk})
ResourceForm = forms.form_for_instance(elem, form=form_class)
if request.PUT:
form = ResourceForm(request.PUT)
else:
form = ResourceForm()
template_name = '%s/%s_form.html' % (self.template_dir, elem._meta.module_name)
return render_to_response(template_name,
{'form':form, 'update':True, self.template_object_name:elem}) | unknown | codeparrot/codeparrot-clean | ||
# ERPNext - web based ERP (http://erpnext.com)
# Copyright (C) 2012 Web Notes Technologies Pvt Ltd
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from __future__ import unicode_literals
import webnotes
from webnotes import msgprint
feed_dict = {
# Project
'Project': ['[%(status)s]', '#000080'],
'Task': ['[%(status)s] %(subject)s', '#000080'],
# Sales
'Lead': ['%(lead_name)s', '#000080'],
'Quotation': ['[%(status)s] To %(customer_name)s worth %(currency)s %(grand_total_export)s', '#4169E1'],
'Sales Order': ['[%(status)s] To %(customer_name)s worth %(currency)s %(grand_total_export)s', '#4169E1'],
# Purchase
'Supplier': ['%(supplier_name)s, %(supplier_type)s', '#6495ED'],
'Purchase Order': ['[%(status)s] %(name)s To %(supplier_name)s for %(currency)s %(grand_total_import)s', '#4169E1'],
# Stock
'Delivery Note': ['[%(status)s] To %(customer_name)s', '#4169E1'],
'Purchase Receipt': ['[%(status)s] From %(supplier)s', '#4169E1'],
# Accounts
'Journal Voucher': ['[%(voucher_type)s] %(name)s', '#4169E1'],
'Purchase Invoice': ['To %(supplier_name)s for %(currency)s %(grand_total_import)s', '#4169E1'],
'Sales Invoice':['To %(customer_name)s for %(currency)s %(grand_total_export)s', '#4169E1'],
# HR
'Expense Claim': ['[%(approval_status)s] %(name)s by %(employee_name)s', '#4169E1'],
'Salary Slip': ['%(employee_name)s for %(month)s %(fiscal_year)s', '#4169E1'],
'Leave Transaction':['%(leave_type)s for %(employee)s', '#4169E1'],
# Support
'Customer Issue': ['[%(status)s] %(description)s by %(customer_name)s', '#000080'],
'Maintenance Visit':['To %(customer_name)s', '#4169E1'],
'Support Ticket': ["[%(status)s] %(subject)s", '#000080'],
# Website
'Web Page': ['%(title)s', '#000080'],
'Blog': ['%(title)s', '#000080']
}
def make_feed(feedtype, doctype, name, owner, subject, color):
"makes a new Feed record"
#msgprint(subject)
from webnotes.model.doc import Document
from webnotes.utils import get_fullname
if feedtype in ('Login', 'Comment', 'Assignment'):
# delete old login, comment feed
webnotes.conn.sql("""delete from tabFeed where
datediff(curdate(), creation) > 7 and doc_type in ('Comment', 'Login', 'Assignment')""")
else:
# one feed per item
webnotes.conn.sql("""delete from tabFeed
where doc_type=%s and doc_name=%s
and ifnull(feed_type,'') != 'Comment'""", (doctype, name))
f = Document('Feed')
f.owner = owner
f.feed_type = feedtype
f.doc_type = doctype
f.doc_name = name
f.subject = subject
f.color = color
f.full_name = get_fullname(owner)
f.save()
def update_feed(controller, method=None):
"adds a new feed"
doc = controller.doc
if method in ['on_update', 'on_submit']:
subject, color = feed_dict.get(doc.doctype, [None, None])
if subject:
make_feed('', doc.doctype, doc.name, doc.owner, subject % doc.fields, color) | unknown | codeparrot/codeparrot-clean | ||
#!/usr/bin/env python
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
try:
from cStringIO import StringIO
except ImportError:
from StringIO import StringIO
from avro import ipc
from avro import io
from zope.interface import implements
from twisted.web.client import Agent
from twisted.web.http_headers import Headers
from twisted.internet.defer import maybeDeferred, Deferred
from twisted.web.iweb import IBodyProducer
from twisted.web import resource, server
from twisted.internet.protocol import Protocol
class TwistedRequestor(ipc.BaseRequestor):
"""A Twisted-compatible requestor. Returns a Deferred that will fire with the
returning value, instead of blocking until the request completes."""
def _process_handshake(self, call_response, message_name, request_datum):
# process the handshake and call response
buffer_decoder = io.BinaryDecoder(StringIO(call_response))
call_response_exists = self.read_handshake_response(buffer_decoder)
if call_response_exists:
return self.read_call_response(message_name, buffer_decoder)
else:
return self.request(message_name, request_datum)
def issue_request(self, call_request, message_name, request_datum):
d = self.transceiver.transceive(call_request)
d.addCallback(self._process_handshake, message_name, request_datum)
return d
class RequestStreamingProducer(object):
"""A streaming producer for issuing requests with the Twisted.web Agent."""
implements(IBodyProducer)
paused = False
stopped = False
started = False
def __init__(self, message):
self._message = message
self._length = len(message)
# We need a buffer length header for every buffer and an additional
# zero-length buffer as the message terminator
self._length += (self._length / ipc.BUFFER_SIZE + 2) \
* ipc.BUFFER_HEADER_LENGTH
self._total_bytes_sent = 0
self._deferred = Deferred()
# read-only properties
message = property(lambda self: self._message)
length = property(lambda self: self._length)
consumer = property(lambda self: self._consumer)
deferred = property(lambda self: self._deferred)
def _get_total_bytes_sent(self):
return self._total_bytes_sent
def _set_total_bytes_sent(self, bytes_sent):
self._total_bytes_sent = bytes_sent
total_bytes_sent = property(_get_total_bytes_sent, _set_total_bytes_sent)
def startProducing(self, consumer):
if self.started:
return
self.started = True
self._consumer = consumer
# Keep writing data to the consumer until we're finished,
# paused (pauseProducing()) or stopped (stopProducing())
while self.length - self.total_bytes_sent > 0 and \
not self.paused and not self.stopped:
self.write()
# self.write will fire this deferred once it has written
# the entire message to the consumer
return self.deferred
def resumeProducing(self):
self.paused = False
self.write(self)
def pauseProducing(self):
self.paused = True
def stopProducing(self):
self.stopped = True
def write(self):
if self.length - self.total_bytes_sent > ipc.BUFFER_SIZE:
buffer_length = ipc.BUFFER_SIZE
else:
buffer_length = self.length - self.total_bytes_sent
self.write_buffer(self.message[self.total_bytes_sent:
(self.total_bytes_sent + buffer_length)])
self.total_bytes_sent += buffer_length
# Make sure we wrote the entire message
if self.total_bytes_sent == self.length and not self.stopped:
self.stopProducing()
# A message is always terminated by a zero-length buffer.
self.write_buffer_length(0)
self.deferred.callback(None)
def write_buffer(self, chunk):
buffer_length = len(chunk)
self.write_buffer_length(buffer_length)
self.consumer.write(chunk)
def write_buffer_length(self, n):
self.consumer.write(ipc.BIG_ENDIAN_INT_STRUCT.pack(n))
class AvroProtocol(Protocol):
recvd = ''
done = False
def __init__(self, finished):
self.finished = finished
self.message = []
def dataReceived(self, data):
self.recvd = self.recvd + data
while len(self.recvd) >= ipc.BUFFER_HEADER_LENGTH:
buffer_length ,= ipc.BIG_ENDIAN_INT_STRUCT.unpack(
self.recvd[:ipc.BUFFER_HEADER_LENGTH])
if buffer_length == 0:
response = ''.join(self.message)
self.done = True
self.finished.callback(response)
break
if len(self.recvd) < buffer_length + ipc.BUFFER_HEADER_LENGTH:
break
buffer = self.recvd[ipc.BUFFER_HEADER_LENGTH:buffer_length + ipc.BUFFER_HEADER_LENGTH]
self.recvd = self.recvd[buffer_length + ipc.BUFFER_HEADER_LENGTH:]
self.message.append(buffer)
def connectionLost(self, reason):
if not self.done:
self.finished.errback(ipc.ConnectionClosedException("Reader read 0 bytes."))
class TwistedHTTPTransceiver(object):
"""This transceiver uses the Agent class present in Twisted.web >= 9.0
for issuing requests to the remote endpoint."""
def __init__(self, host, port, remote_name=None, reactor=None):
self.url = "http://%s:%d/" % (host, port)
if remote_name is None:
# There's no easy way to get this peer's remote address
# in Twisted so I use a random UUID to identify ourselves
import uuid
self.remote_name = uuid.uuid4()
if reactor is None:
from twisted.internet import reactor
self.agent = Agent(reactor)
def read_framed_message(self, response):
finished = Deferred()
response.deliverBody(AvroProtocol(finished))
return finished
def transceive(self, request):
req_method = 'POST'
req_headers = {
'Content-Type': ['avro/binary'],
'Accept-Encoding': ['identity'],
}
body_producer = RequestStreamingProducer(request)
d = self.agent.request(
req_method,
self.url,
headers=Headers(req_headers),
bodyProducer=body_producer)
return d.addCallback(self.read_framed_message)
class AvroResponderResource(resource.Resource):
"""This Twisted.web resource can be placed anywhere in a URL hierarchy
to provide an Avro endpoint. Different Avro protocols can be served
by the same web server as long as they are in different resources in
a URL hierarchy."""
isLeaf = True
def __init__(self, responder):
resource.Resource.__init__(self)
self.responder = responder
def cb_render_POST(self, resp_body, request):
request.setResponseCode(200)
request.setHeader('Content-Type', 'avro/binary')
resp_writer = ipc.FramedWriter(request)
resp_writer.write_framed_message(resp_body)
request.finish()
def render_POST(self, request):
# Unfortunately, Twisted.web doesn't support incoming
# streamed input yet, the whole payload must be kept in-memory
request.content.seek(0, 0)
call_request_reader = ipc.FramedReader(request.content)
call_request = call_request_reader.read_framed_message()
d = maybeDeferred(self.responder.respond, call_request)
d.addCallback(self.cb_render_POST, request)
return server.NOT_DONE_YET | unknown | codeparrot/codeparrot-clean | ||
---
navigation_title: "MySQL"
mapped_pages:
- https://www.elastic.co/guide/en/elasticsearch/reference/current/es-connectors-mysql.html
---
# Elastic MySQL connector reference [es-connectors-mysql]
The *Elastic MySQL connector* is a [connector](/reference/search-connectors/index.md) for [MySQL](https://www.mysql.com) data sources. This connector is written in Python using the [Elastic connector framework](https://github.com/elastic/connectors/tree/main).
View the [**source code** for this connector](https://github.com/elastic/connectors/tree/main/app/connectors_service/connectors/sources/mysql) (branch *main*, compatible with Elastic *9.0*).
## **Self-managed connector** [es-connectors-mysql-connector-client-reference]
### Availability and prerequisites [es-connectors-mysql-client-prerequisites]
This connector is available as a **self-managed managed connector** in Elastic versions **8.5.0 and later**. To use this connector as a self-managed connector, satisfy all [self-managed connector requirements](/reference/search-connectors/self-managed-connectors.md).
This connector has no additional prerequisites beyond the shared requirements, linked above.
### Create a MySQL connector [es-connectors-mysql-create-connector-client]
#### Use the UI [es-connectors-mysql-client-create-use-the-ui]
To create a new MySQL connector:
1. In the Kibana UI, search for "connectors" using the [global search field](docs-content://explore-analyze/query-filter/filtering.md#_finding_your_apps_and_objects) and choose the "Elasticsearch" connectors.
2. Follow the instructions to create a new **MySQL** self-managed connector.
#### Use the API [es-connectors-mysql-client-create-use-the-api]
You can use the {{es}} [Create connector API](https://www.elastic.co/docs/api/doc/elasticsearch/group/endpoint-connector) to create a new self-managed MySQL self-managed connector.
For example:
```console
PUT _connector/my-mysql-connector
{
"index_name": "my-elasticsearch-index",
"name": "Content synced from MySQL",
"service_type": "mysql"
}
```
% TEST[skip:can’t test in isolation]
:::::{dropdown} You’ll also need to create an API key for the connector to use.
::::{note}
The user needs the cluster privileges `manage_api_key`, `manage_connector` and `write_connector_secrets` to generate API keys programmatically.
::::
To create an API key for the connector:
1. Run the following command, replacing values where indicated. Note the `encoded` return values from the response:
```console
POST /_security/api_key
{
"name": "connector_name-connector-api-key",
"role_descriptors": {
"connector_name-connector-role": {
"cluster": [
"monitor",
"manage_connector"
],
"indices": [
{
"names": [
"index_name",
".search-acl-filter-index_name",
".elastic-connectors*"
],
"privileges": [
"all"
],
"allow_restricted_indices": false
}
]
}
}
}
```
2. Update your `config.yml` file with the API key `encoded` value.
:::::
Refer to the [{{es}} API documentation](https://www.elastic.co/docs/api/doc/elasticsearch/group/endpoint-connector) for details of all available Connector APIs.
### Usage [es-connectors-mysql-client-usage]
To use this connector as a **self-managed connector**, see [*Self-managed connectors*](/reference/search-connectors/self-managed-connectors.md).
For additional operations, see [*Connectors UI in {{kib}}*](/reference/search-connectors/connectors-ui-in-kibana.md).
### Compatibility [es-connectors-mysql-client-compatibility]
This connector is compatible with **MySQL 5.6 and later**.
The connector is also compatible with **MariaDB** databases compatible with the above.
The data source and your Elastic deployment must be able to communicate with each other over a network.
### Configuration [es-connectors-mysql-client-configuration]
Each time you create an index to be managed by this connector, you will create a new connector configuration. You will need some or all of the following information about the data source.
Host
: The IP address or domain name of the MySQL host, excluding port. Examples:
* `192.158.1.38`
* `localhost`
Port
: The port of the MySQL host. Examples:
* `3306`
* `3307`
Username
: The MySQL username the connector will use.
The user must have access to the configured database. You may want to create a dedicated, read-only user for each connector.
Password
: The MySQL password the connector will use.
Database
: The MySQL database to sync. The database must be accessible using the configured username and password.
Examples:
* `products`
* `orders`
Tables
: The tables in the configured database to sync. One or more table names, separated by commas. The tables must be accessible using the configured username and password.
Examples:
* `furniture, food, toys`
* `laptops`
Enable SSL
: Whether SSL verification will be enabled. Default value is `True`.
SSL Certificate
: Content of SSL certificate. If SSL is disabled, the SSL certificate value will be ignored.
::::{dropdown} Expand to see an example certificate
```
-----BEGIN CERTIFICATE-----
MIID+jCCAuKgAwIBAgIGAJJMzlxLMA0GCSqGSIb3DQEBCwUAMHoxCzAJBgNVBAYT
AlVTMQwwCgYDVQQKEwNJQk0xFjAUBgNVBAsTDURlZmF1bHROb2RlMDExFjAUBgNV
BAsTDURlZmF1bHRDZWxsMDExGTAXBgNVBAsTEFJvb3QgQ2VydGlmaWNhdGUxEjAQ
BgNVBAMTCWxvY2FsaG9zdDAeFw0yMTEyMTQyMjA3MTZaFw0yMjEyMTQyMjA3MTZa
MF8xCzAJBgNVBAYTAlVTMQwwCgYDVQQKEwNJQk0xFjAUBgNVBAsTDURlZmF1bHRO
b2RlMDExFjAUBgNVBAsTDURlZmF1bHRDZWxsMDExEjAQBgNVBAMTCWxvY2FsaG9z
dDCCASIwDQYJKoZIhvcNAQEBBQADggEPADCCAQoCggEBAMv5HCsJZIpI5zCy+jXV
z6lmzNc9UcVSEEHn86h6zT6pxuY90TYeAhlZ9hZ+SCKn4OQ4GoDRZhLPTkYDt+wW
CV3NTIy9uCGUSJ6xjCKoxClJmgSQdg5m4HzwfY4ofoEZ5iZQ0Zmt62jGRWc0zuxj
hegnM+eO2reBJYu6Ypa9RPJdYJsmn1RNnC74IDY8Y95qn+WZj//UALCpYfX41hko
i7TWD9GKQO8SBmAxhjCDifOxVBokoxYrNdzESl0LXvnzEadeZTd9BfUtTaBHhx6t
njqqCPrbTY+3jAbZFd4RiERPnhLVKMytw5ot506BhPrUtpr2lusbN5svNXjuLeea
MMUCAwEAAaOBoDCBnTATBgNVHSMEDDAKgAhOatpLwvJFqjAdBgNVHSUEFjAUBggr
BgEFBQcDAQYIKwYBBQUHAwIwVAYDVR0RBE0wS4E+UHJvZmlsZVVVSUQ6QXBwU3J2
MDEtQkFTRS05MDkzMzJjMC1iNmFiLTQ2OTMtYWI5NC01Mjc1ZDI1MmFmNDiCCWxv
Y2FsaG9zdDARBgNVHQ4ECgQITzqhA5sO8O4wDQYJKoZIhvcNAQELBQADggEBAKR0
gY/BM69S6BDyWp5dxcpmZ9FS783FBbdUXjVtTkQno+oYURDrhCdsfTLYtqUlP4J4
CHoskP+MwJjRIoKhPVQMv14Q4VC2J9coYXnePhFjE+6MaZbTjq9WaekGrpKkMaQA
iQt5b67jo7y63CZKIo9yBvs7sxODQzDn3wZwyux2vPegXSaTHR/rop/s/mPk3YTS
hQprs/IVtPoWU4/TsDN3gIlrAYGbcs29CAt5q9MfzkMmKsuDkTZD0ry42VjxjAmk
xw23l/k8RoD1wRWaDVbgpjwSzt+kl+vJE/ip2w3h69eEZ9wbo6scRO5lCO2JM4Pr
7RhLQyWn2u00L7/9Omw=
-----END CERTIFICATE-----
```
::::
### Known issues [es-connectors-mysql-client-known-issues]
This connector has the following known issues:
* **Upgrading from a tech preview connector (8.7 or earlier) to 8.8 will cause the MySQL connector configuration to be invalid.**
MySQL connectors prior to 8.8 can be missing some configuration fields that are required for the connector to run. If you would like to continue using your MySQL connector after upgrading from 8.7 or earlier, run the script below to fix your connector’s configuration. This will populate the configuration with the missing fields. The auxilliary information needed for the configuration will then be automatically added by by the self-managed connector.
```console
POST /.elastic-connectors/_update/connector_id
{
"doc" : {
"configuration": {
"tables": {
"type": "list",
"value": "*"
},
"ssl_enabled": {
"type": "bool",
"value": false
},
"ssl_ca": {
"type": "str",
"value": ""
},
"fetch_size": {
"type": "int",
"value": 50
},
"retry_count": {
"type": "int",
"value": 3
}
}
}
}
```
% TEST[skip:TODO]
* **Upgrading to 8.8 does not migrate MySQL sync rules.**
After upgrading, you must re-create your sync rules.
See [Known issues](/release-notes/known-issues.md) for any issues affecting all connectors.
### Documents and syncs [es-connectors-mysql-client-syncs]
The following describes the default syncing behavior for this connector. Use [sync rules](/reference/search-connectors/es-sync-rules.md) and [ingest pipelines](docs-content://solutions/search/ingest-for-search.md) to customize syncing for specific indices.
All records in the MySQL database included in your connector configuration are extracted and transformed into documents in your Elasticsearch index.
* For each row in your MySQL database table, the connector creates one **Elasticsearch document**.
* For each column, the connector transforms the column into an **Elasticsearch field**.
* Elasticsearch [dynamically maps^](docs-content://manage-data/data-store/mapping/dynamic-mapping.md) MySQL data types to **Elasticsearch data types**.
* Tables with no primary key defined are skipped.
* Field values that represent other records are replaced with the primary key for that record (composite primary keys are joined with `_`).
The Elasticsearch mapping is created when the first document is created.
Each sync is a "full" sync.
For each MySQL row discovered:
* If it does not exist, the document is created in Elasticsearch.
* If it already exists in Elasticsearch, the Elasticsearch document is replaced and the version is incremented.
* If an existing Elasticsearch document no longer exists in the MySQL table, it is deleted from Elasticsearch.
### Deployment using Docker [es-connectors-mysql-client-docker]
You can deploy the MySQL connector as a self-managed connector using Docker. Follow these instructions.
::::{dropdown} Step 1: Download sample configuration file
Download the sample configuration file. You can either download it manually or run the following command:
```sh
curl https://raw.githubusercontent.com/elastic/connectors/main/app/connectors_service/config.yml.example --output ~/connectors-config/config.yml
```
% NOTCONSOLE
Remember to update the `--output` argument value if your directory name is different, or you want to use a different config file name.
::::
::::{dropdown} Step 2: Update the configuration file for your self-managed connector
Update the configuration file with the following settings to match your environment:
* `elasticsearch.host`
* `elasticsearch.api_key`
* `connectors`
If you’re running the connector service against a Dockerized version of Elasticsearch and Kibana, your config file will look like this:
```yaml
# When connecting to your cloud deployment you should edit the host value
elasticsearch.host: http://host.docker.internal:9200
elasticsearch.api_key: <ELASTICSEARCH_API_KEY>
connectors:
-
connector_id: <CONNECTOR_ID_FROM_KIBANA>
service_type: mysql
api_key: <CONNECTOR_API_KEY_FROM_KIBANA> # Optional. If not provided, the connector will use the elasticsearch.api_key instead
```
Using the `elasticsearch.api_key` is the recommended authentication method. However, you can also use `elasticsearch.username` and `elasticsearch.password` to authenticate with your Elasticsearch instance.
Note: You can change other default configurations by simply uncommenting specific settings in the configuration file and modifying their values.
::::
::::{dropdown} Step 3: Run the Docker image
Run the Docker image with the Connector Service using the following command:
```sh subs=true
docker run \
-v ~/connectors-config:/config \
--network "elastic" \
--tty \
--rm \
docker.elastic.co/integrations/elastic-connectors:{{version.stack}} \
/app/bin/elastic-ingest \
-c /config/config.yml
```
::::
Refer to [`DOCKER.md`](https://github.com/elastic/connectors/tree/main/docs/DOCKER.md) in the `elastic/connectors` repo for more details.
Find all available Docker images in the [official registry](https://www.docker.elastic.co/r/integrations/elastic-connectors).
::::{tip}
We also have a quickstart self-managed option using Docker Compose, so you can spin up all required services at once: Elasticsearch, Kibana, and the connectors service. Refer to this [README](https://github.com/elastic/connectors/tree/main/scripts/stack#readme) in the `elastic/connectors` repo for more information.
::::
### Sync rules [es-connectors-mysql-client-sync-rules]
The following sections describe [Sync rules](/reference/search-connectors/es-sync-rules.md) for this connector.
[Basic sync rules](/reference/search-connectors/es-sync-rules.md#es-sync-rules-basic) are identical for all connectors and are available by default.
[Advanced rules](/reference/search-connectors/es-sync-rules.md#es-sync-rules-advanced) for MySQL can be used to pass arbitrary SQL statements to a MySQL instance.
::::{important}
You need to specify the tables used in your custom query in the "tables" field.
::::
For example:
```js
[
{
"tables": ["table1", "table2"],
"query": "SELECT ... FROM ..."
}
]
```
% NOTCONSOLE
::::{warning}
When using advanced rules, a query can bypass the configuration field `tables`. This will happen if the query specifies a table that doesn’t appear in the configuration. This can also happen if the configuration specifies `*` to fetch all tables while the advanced sync rule requests for only a subset of tables.
::::
### Troubleshooting [es-connectors-mysql-client-troubleshooting]
See [Troubleshooting](/reference/search-connectors/es-connectors-troubleshooting.md).
### Security [es-connectors-mysql-client-security]
See [Security](/reference/search-connectors/es-connectors-security.md). | unknown | github | https://github.com/elastic/elasticsearch | docs/reference/search-connectors/es-connectors-mysql.md |
fun foo(a: Int) {
val b: Int = 1
loop1@ for (p in 1..b) {
loop2@ for (n in 1..b) {
<expr>if (a > 0) throw Exception("")
if (a + b > 0) break@loop2
if (a - b > 0) continue@loop1</expr>
}
}
} | kotlin | github | https://github.com/JetBrains/kotlin | analysis/analysis-api/testData/components/dataFlowInfoProvider/exitPointSnapshot/controlFlow/breakContinue2.kt |
#!/bin/sh
# Copyright (c) 2011, Google Inc.
test_description='adding and checking out large blobs'
. ./test-lib.sh
test_expect_success 'core.bigFileThreshold must be non-negative' '
: >input &&
test_must_fail git -c core.bigFileThreshold=-1 hash-object input >out 2>err &&
grep "bad numeric config value" err &&
test_must_be_empty out
'
test_expect_success setup '
# clone does not allow us to pass core.bigfilethreshold to
# new repos, so set core.bigfilethreshold globally
git config --global core.bigfilethreshold 200k &&
printf "%2000000s" X >large1 &&
cp large1 large2 &&
cp large1 large3 &&
printf "%2500000s" Y >huge &&
GIT_ALLOC_LIMIT=1500k &&
export GIT_ALLOC_LIMIT
'
test_expect_success 'enter "large" codepath, with small core.bigFileThreshold' '
test_when_finished "rm -rf repo" &&
git init --bare repo &&
echo large | git -C repo hash-object -w --stdin &&
git -C repo -c core.bigfilethreshold=4 fsck
'
# add a large file with different settings
while read expect config
do
test_expect_success "add with $config" '
test_when_finished "rm -f .git/objects/pack/pack-*.* .git/index" &&
git $config add large1 &&
sz=$(test_file_size .git/objects/pack/pack-*.pack) &&
case "$expect" in
small) test "$sz" -le 100000 ;;
large) test "$sz" -ge 100000 ;;
esac
'
done <<\EOF
large -c core.compression=0
small -c core.compression=9
large -c core.compression=0 -c pack.compression=0
large -c core.compression=9 -c pack.compression=0
small -c core.compression=0 -c pack.compression=9
small -c core.compression=9 -c pack.compression=9
large -c pack.compression=0
small -c pack.compression=9
EOF
test_expect_success 'add a large file or two' '
git add large1 huge large2 &&
# make sure we got a single packfile and no loose objects
count=0 idx= &&
for p in .git/objects/pack/pack-*.pack
do
count=$(( $count + 1 )) &&
test_path_is_file "$p" &&
idx=${p%.pack}.idx &&
test_path_is_file "$idx" || return 1
done &&
test $count = 1 &&
cnt=$(git show-index <"$idx" | wc -l) &&
test $cnt = 2 &&
for l in .git/objects/$OIDPATH_REGEX
do
test_path_is_missing "$l" || return 1
done &&
# attempt to add another copy of the same
git add large3 &&
bad= count=0 &&
for p in .git/objects/pack/pack-*.pack
do
count=$(( $count + 1 )) &&
test_path_is_file "$p" &&
idx=${p%.pack}.idx &&
test_path_is_file "$idx" || return 1
done &&
test $count = 1
'
test_expect_success 'checkout a large file' '
large1=$(git rev-parse :large1) &&
git update-index --add --cacheinfo 100644 $large1 another &&
git checkout another &&
test_cmp large1 another
'
test_expect_success 'packsize limit' '
test_create_repo mid &&
(
cd mid &&
git config core.bigfilethreshold 64k &&
git config pack.packsizelimit 256k &&
# mid1 and mid2 will fit within 256k limit but
# appending mid3 will bust the limit and will
# result in a separate packfile.
test-tool genrandom "a" $(( 66 * 1024 )) >mid1 &&
test-tool genrandom "b" $(( 80 * 1024 )) >mid2 &&
test-tool genrandom "c" $(( 128 * 1024 )) >mid3 &&
git add mid1 mid2 mid3 &&
count=0 &&
for pi in .git/objects/pack/pack-*.idx
do
test_path_is_file "$pi" && count=$(( $count + 1 )) || return 1
done &&
test $count = 2 &&
(
git hash-object --stdin <mid1 &&
git hash-object --stdin <mid2 &&
git hash-object --stdin <mid3
) |
sort >expect &&
for pi in .git/objects/pack/pack-*.idx
do
git show-index <"$pi" || return 1
done |
sed -e "s/^[0-9]* \([0-9a-f]*\) .*/\1/" |
sort >actual &&
test_cmp expect actual
)
'
test_expect_success 'diff --raw' '
git commit -q -m initial &&
echo modified >>large1 &&
git add large1 &&
git commit -q -m modified &&
git diff --raw HEAD^
'
test_expect_success 'diff --stat' '
git diff --stat HEAD^ HEAD
'
test_expect_success 'diff' '
git diff HEAD^ HEAD >actual &&
grep "Binary files.*differ" actual
'
test_expect_success 'diff --cached' '
git diff --cached HEAD^ >actual &&
grep "Binary files.*differ" actual
'
test_expect_success 'hash-object' '
git hash-object large1
'
test_expect_success 'cat-file a large file' '
git cat-file blob :large1 >/dev/null
'
test_expect_success 'cat-file a large file from a tag' '
git tag -m largefile largefiletag :large1 &&
git cat-file blob largefiletag >/dev/null
'
test_expect_success 'git-show a large file' '
git show :large1 >/dev/null
'
test_expect_success 'index-pack' '
git clone file://"$(pwd)"/.git foo &&
GIT_DIR=non-existent git index-pack --object-format=$(test_oid algo) \
--strict --verify foo/.git/objects/pack/*.pack
'
test_expect_success 'repack' '
git repack -ad
'
test_expect_success 'pack-objects with large loose object' '
SHA1=$(git hash-object huge) &&
test_create_repo loose &&
echo $SHA1 | git pack-objects --stdout |
GIT_ALLOC_LIMIT=0 GIT_DIR=loose/.git git unpack-objects &&
echo $SHA1 | GIT_DIR=loose/.git git pack-objects pack &&
test_create_repo packed &&
mv pack-* packed/.git/objects/pack &&
GIT_DIR=packed/.git git cat-file blob $SHA1 >actual &&
test_cmp huge actual
'
test_expect_success 'tar archiving' '
git archive --format=tar HEAD >/dev/null
'
test_expect_success 'zip archiving, store only' '
git archive --format=zip -0 HEAD >/dev/null
'
test_expect_success 'zip archiving, deflate' '
git archive --format=zip HEAD >/dev/null
'
test_expect_success 'fsck large blobs' '
git fsck 2>err &&
test_must_be_empty err
'
test_done | unknown | github | https://github.com/git/git | t/t1050-large.sh |
import numpy as np
def sigmoid(x):
"""
Compute the sigmoid function for the input here.
"""
x = 1./(1 + np.exp(-x))
return x
def sigmoid_grad(f):
"""
Compute the gradient for the sigmoid function here. Note that
for this implementation, the input f should be the sigmoid
function value of your original input x.
"""
f = f*(1-f)
return f
def test_sigmoid_basic():
"""
Some simple tests to get you started.
Warning: these are not exhaustive.
"""
# print "Running basic tests..."
x = np.array([[1, 2], [-1, -2]])
f = sigmoid(x)
g = sigmoid_grad(f)
# print f
assert np.amax(f - np.array([[0.73105858, 0.88079708],
[0.26894142, 0.11920292]])) <= 1e-6
# print g
assert np.amax(g - np.array([[0.19661193, 0.10499359],
[0.19661193, 0.10499359]])) <= 1e-6
# print "You should verify these results!\n"
def test_sigmoid():
"""
Use this space to test your sigmoid implementation by running:
python q2_sigmoid.py
This function will not be called by the autograder, nor will
your tests be graded.
"""
# print "Running your tests..."
### YOUR CODE HERE
raise NotImplementedError
### END YOUR CODE
if __name__ == "__main__":
test_sigmoid_basic();
# test_sigmoid() | unknown | codeparrot/codeparrot-clean | ||
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""
Check that there are no imports of ORM classes in any of the alembic migration scripts.
This is to prevent the addition of migration code directly referencing any ORM definition,
which could potentially break downgrades. For more details, refer to the relevant discussion
thread at this link: https://github.com/apache/airflow/issues/59871
"""
from __future__ import annotations
import importlib
import inspect
import os
from pathlib import Path
from pprint import pformat
from typing import Final
import pytest
from airflow.models.base import Base
from tests_common.test_utils.file_loading import get_imports_from_file
from tests_common.test_utils.paths import AIRFLOW_CORE_SOURCES_PATH
_MIGRATIONS_DIRPATH: Final[Path] = Path(
os.path.join(AIRFLOW_CORE_SOURCES_PATH, "airflow/migrations/versions")
)
@pytest.mark.parametrize(
"migration_script_path",
[pytest.param(path, id=os.path.basename(path)) for path in list(_MIGRATIONS_DIRPATH.glob("**/*.py"))],
)
def test_migration_script_has_no_orm_references(migration_script_path: Path) -> None:
"""Ensures the given alembic migration script path does not contain any ORM imports."""
bad_imports = []
for import_ref in get_imports_from_file(filepath=str(migration_script_path)):
if _is_violating_orm_import(import_ref=import_ref):
bad_imports.append(import_ref)
assert not bad_imports, f"{str(migration_script_path)} has bad ORM imports: {pformat(bad_imports)}"
def _is_violating_orm_import(import_ref: str) -> bool:
"""Return `True` if the imported object is an ORM class from within `airflow.models`, otherwise return `False`."""
if not import_ref.startswith("airflow.models"):
return False
# import the fully qualified reference to check if the reference is a subclass of a declarative base
mod_to_import, _, attr_name = import_ref.rpartition(".")
referenced_module = importlib.import_module(mod_to_import)
referenced_obj = getattr(referenced_module, attr_name)
if inspect.isclass(referenced_obj) and referenced_obj in Base.__subclasses__():
return True
return False | python | github | https://github.com/apache/airflow | airflow-core/tests/unit/migrations/test_no_orm_refs_in_migration_scripts.py |
/*
* Copyright 2002-present the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.beans.factory.aot;
/**
* Public variant of {@link BeanRegistrationAotProcessor} for use in tests.
*
* @author Phillip Webb
*/
public class TestBeanRegistrationsAotProcessor extends BeanRegistrationsAotProcessor {
} | java | github | https://github.com/spring-projects/spring-framework | spring-beans/src/testFixtures/java/org/springframework/beans/factory/aot/TestBeanRegistrationsAotProcessor.java |
#pep : current path endpoint (for finding next segment(s))
#ipath : array of indices on this path
#iredund : if available, indices to ignore (from previous juncture analyses)
#pl : path length to reach
def main(pep, ipath, iredund, pl, index, strt, end, cid, rb):
import numpy as np
import length_builder
#IDEA: at junction, scan all paths out for certain distance (100m?)
# and if any reconnect, add one path to iarr to eliminate quick doubles
#IF LENGTH REACHED
if len(ipath) == pl:
#print('path finished, length = ',len(ipath))
paths = np.load(rb+'path_temp.npy')
if len(paths) == 0: paths = [ipath]
else: paths = np.append(paths,[ipath],axis=0)
np.save(rb+'path_temp',paths)
#If not, keep going
else:
building = 1
while building:
strt_dist = [(pep[0]-p[0])**2+(pep[1]-p[1])**2 for p in strt]
end_dist = [(pep[0]-p[0])**2+(pep[1]-p[1])**2 for p in end]
isps = np.array([],dtype=int)
ieps = np.array([],dtype=int)
for i in index:
if strt_dist[i] < 1.e-25: isps = np.append(isps,i)
if end_dist[i] < 1.e-25: ieps = np.append(ieps,i)
iredund = np.load(rb+'redundant.npy')
if len(isps) > 0:
isps2 = np.array([],dtype=int)
for i in isps:
if i not in iredund and i not in ipath: isps2 = np.append(isps2,i)
isps = isps2
if len(ieps) > 0:
ieps2 = np.array([],dtype=int)
for i in ieps:
if i not in iredund and i not in ipath: ieps2 = np.append(ieps2,i)
ieps = ieps2
isegs = np.append(isps,-(ieps+1))
npts = len(isegs) #number of segments found
if npts == 0: #end of route found
building = 0
if npts == 1: #no bifurcation
ii = isegs[0]
if ii >= 0: #was a start-pt
pep = end[ii]
ipath = np.append(ipath,ii)
else: #was an end-pt
pep = strt[abs(ii)-1]
ipath = np.append(ipath,abs(ii)-1)
if len(ipath) == pl:
building = 0
length_builder.main(pep,ipath,iredund,pl,index,strt,end,cid,rb)
if npts > 1: #track bifurcation
building = 0
for ii in isegs:
if ii >= 0:
pep = end[ii]
ipath2 = np.append(ipath,ii)
else:
pep = strt[abs(ii)-1]
ipath2 = np.append(ipath,abs(ii)-1)
length_builder.main(pep,ipath2,iredund,pl,index,strt,end,cid,rb) | unknown | codeparrot/codeparrot-clean | ||
from __future__ import absolute_import
from bokeh.exceptions import DataIntegrityException
class UnauthorizedException(Exception):
pass
class ServerModel(object):
idfield = None
typename = None
@classmethod
def modelkey(cls, objid):
return "model:%s:%s"% (cls.typename, objid)
def mykey(self):
return self.modelkey(getattr(self, self.idfield))
def to_json(self):
raise NotImplementedError
@staticmethod
def from_json(obj):
raise NotImplementedError
def save(self, client):
client.set(self.mykey(), self.to_json())
def create(self, client):
try:
client.create(self.mykey(), self.to_json())
except DataIntegrityException:
raise UnauthorizedException(self.mykey())
@classmethod
def load_json(cls, client, objid):
data = client.get(cls.modelkey(objid))
if data is None:
return None
return data
@classmethod
def load(cls, client, objid):
attrs = cls.load_json(client, objid)
if attrs is None:
return None
return cls.from_json(attrs) | unknown | codeparrot/codeparrot-clean | ||
// This file is part of OpenCV project.
// It is subject to the license terms in the LICENSE file found in the top-level directory
// of this distribution and at http://opencv.org/license.html.
// Copyright (C) 2025, Institute of Software, Chinese Academy of Sciences.
#include "rvv_hal.hpp"
#include "common.hpp"
namespace cv { namespace rvv_hal { namespace imgproc {
#if CV_HAL_RVV_1P0_ENABLED
namespace {
struct Filter2D
{
const uchar* kernel_data;
size_t kernel_step;
int kernel_type;
int kernel_width;
int kernel_height;
int src_type;
int dst_type;
int borderType;
double delta;
int anchor_x;
int anchor_y;
};
static void process3(int anchor, int left, int right, float delta, const float* kernel, const uchar* row0, const uchar* row1, const uchar* row2, uchar* dst)
{
int vl;
for (int i = left; i < right; i += vl)
{
vl = __riscv_vsetvl_e8m1(right - i);
auto s0 = __riscv_vfmv_v_f_f32m4(delta, vl);
auto s1 = __riscv_vfmv_v_f_f32m4(delta, vl);
auto s2 = __riscv_vfmv_v_f_f32m4(delta, vl);
auto s3 = __riscv_vfmv_v_f_f32m4(delta, vl);
auto addshift = [&](vfloat32m4_t a, vfloat32m4_t b, float k0, float k1, float k2, float r1, float r2) {
a = __riscv_vfmacc(a, k0, b, vl);
b = __riscv_vfslide1down(b, r1, vl);
a = __riscv_vfmacc(a, k1, b, vl);
b = __riscv_vfslide1down(b, r2, vl);
return __riscv_vfmacc(a, k2, b, vl);
};
auto loadsrc = [&](const uchar* row, float k0, float k1, float k2) {
if (!row) return;
const uchar* extra = row + (i - anchor) * 4;
auto src = __riscv_vlseg4e8_v_u8m1x4(extra, vl);
auto v0 = __riscv_vfwcvt_f(__riscv_vwcvtu_x(__riscv_vget_v_u8m1x4_u8m1(src, 0), vl), vl);
auto v1 = __riscv_vfwcvt_f(__riscv_vwcvtu_x(__riscv_vget_v_u8m1x4_u8m1(src, 1), vl), vl);
auto v2 = __riscv_vfwcvt_f(__riscv_vwcvtu_x(__riscv_vget_v_u8m1x4_u8m1(src, 2), vl), vl);
auto v3 = __riscv_vfwcvt_f(__riscv_vwcvtu_x(__riscv_vget_v_u8m1x4_u8m1(src, 3), vl), vl);
extra += vl * 4;
s0 = addshift(s0, v0, k0, k1, k2, extra[0], extra[4]);
s1 = addshift(s1, v1, k0, k1, k2, extra[1], extra[5]);
s2 = addshift(s2, v2, k0, k1, k2, extra[2], extra[6]);
s3 = addshift(s3, v3, k0, k1, k2, extra[3], extra[7]);
};
loadsrc(row0, kernel[0], kernel[1], kernel[2]);
loadsrc(row1, kernel[3], kernel[4], kernel[5]);
loadsrc(row2, kernel[6], kernel[7], kernel[8]);
vuint8m1x4_t val{};
val = __riscv_vset_v_u8m1_u8m1x4(val, 0, __riscv_vnclipu(__riscv_vfncvt_xu(s0, vl), 0, __RISCV_VXRM_RNU, vl));
val = __riscv_vset_v_u8m1_u8m1x4(val, 1, __riscv_vnclipu(__riscv_vfncvt_xu(s1, vl), 0, __RISCV_VXRM_RNU, vl));
val = __riscv_vset_v_u8m1_u8m1x4(val, 2, __riscv_vnclipu(__riscv_vfncvt_xu(s2, vl), 0, __RISCV_VXRM_RNU, vl));
val = __riscv_vset_v_u8m1_u8m1x4(val, 3, __riscv_vnclipu(__riscv_vfncvt_xu(s3, vl), 0, __RISCV_VXRM_RNU, vl));
__riscv_vsseg4e8(dst + i * 4, val, vl);
}
}
static void process5(int anchor, int left, int right, float delta, const float* kernel, const uchar* row0, const uchar* row1, const uchar* row2, const uchar* row3, const uchar* row4, uchar* dst)
{
int vl;
for (int i = left; i < right; i += vl)
{
vl = __riscv_vsetvl_e8m1(right - i);
auto s0 = __riscv_vfmv_v_f_f32m4(delta, vl);
auto s1 = __riscv_vfmv_v_f_f32m4(delta, vl);
auto s2 = __riscv_vfmv_v_f_f32m4(delta, vl);
auto s3 = __riscv_vfmv_v_f_f32m4(delta, vl);
auto addshift = [&](vfloat32m4_t a, vfloat32m4_t b, float k0, float k1, float k2, float k3, float k4, float r1, float r2, float r3, float r4) {
a = __riscv_vfmacc(a, k0, b, vl);
b = __riscv_vfslide1down(b, r1, vl);
a = __riscv_vfmacc(a, k1, b, vl);
b = __riscv_vfslide1down(b, r2, vl);
a = __riscv_vfmacc(a, k2, b, vl);
b = __riscv_vfslide1down(b, r3, vl);
a = __riscv_vfmacc(a, k3, b, vl);
b = __riscv_vfslide1down(b, r4, vl);
return __riscv_vfmacc(a, k4, b, vl);
};
auto loadsrc = [&](const uchar* row, float k0, float k1, float k2, float k3, float k4) {
if (!row) return;
const uchar* extra = row + (i - anchor) * 4;
auto src = __riscv_vlseg4e8_v_u8m1x4(extra, vl);
auto v0 = __riscv_vfwcvt_f(__riscv_vwcvtu_x(__riscv_vget_v_u8m1x4_u8m1(src, 0), vl), vl);
auto v1 = __riscv_vfwcvt_f(__riscv_vwcvtu_x(__riscv_vget_v_u8m1x4_u8m1(src, 1), vl), vl);
auto v2 = __riscv_vfwcvt_f(__riscv_vwcvtu_x(__riscv_vget_v_u8m1x4_u8m1(src, 2), vl), vl);
auto v3 = __riscv_vfwcvt_f(__riscv_vwcvtu_x(__riscv_vget_v_u8m1x4_u8m1(src, 3), vl), vl);
extra += vl * 4;
s0 = addshift(s0, v0, k0, k1, k2, k3, k4, extra[0], extra[4], extra[ 8], extra[12]);
s1 = addshift(s1, v1, k0, k1, k2, k3, k4, extra[1], extra[5], extra[ 9], extra[13]);
s2 = addshift(s2, v2, k0, k1, k2, k3, k4, extra[2], extra[6], extra[10], extra[14]);
s3 = addshift(s3, v3, k0, k1, k2, k3, k4, extra[3], extra[7], extra[11], extra[15]);
};
loadsrc(row0, kernel[ 0], kernel[ 1], kernel[ 2], kernel[ 3], kernel[ 4]);
loadsrc(row1, kernel[ 5], kernel[ 6], kernel[ 7], kernel[ 8], kernel[ 9]);
loadsrc(row2, kernel[10], kernel[11], kernel[12], kernel[13], kernel[14]);
loadsrc(row3, kernel[15], kernel[16], kernel[17], kernel[18], kernel[19]);
loadsrc(row4, kernel[20], kernel[21], kernel[22], kernel[23], kernel[24]);
vuint8m1x4_t val{};
val = __riscv_vset_v_u8m1_u8m1x4(val, 0, __riscv_vnclipu(__riscv_vfncvt_xu(s0, vl), 0, __RISCV_VXRM_RNU, vl));
val = __riscv_vset_v_u8m1_u8m1x4(val, 1, __riscv_vnclipu(__riscv_vfncvt_xu(s1, vl), 0, __RISCV_VXRM_RNU, vl));
val = __riscv_vset_v_u8m1_u8m1x4(val, 2, __riscv_vnclipu(__riscv_vfncvt_xu(s2, vl), 0, __RISCV_VXRM_RNU, vl));
val = __riscv_vset_v_u8m1_u8m1x4(val, 3, __riscv_vnclipu(__riscv_vfncvt_xu(s3, vl), 0, __RISCV_VXRM_RNU, vl));
__riscv_vsseg4e8(dst + i * 4, val, vl);
}
}
// the algorithm is copied from 3rdparty/carotene/src/convolution.cpp,
// in the function void CAROTENE_NS::convolution
template<int ksize>
static inline int filter(int start, int end, Filter2D* data, const uchar* src_data, size_t src_step, uchar* dst_data, int width, int height, int full_width, int full_height, int offset_x, int offset_y)
{
float kernel[ksize * ksize];
for (int i = 0; i < ksize * ksize; i++)
{
kernel[i] = reinterpret_cast<const float*>(data->kernel_data + (i / ksize) * data->kernel_step)[i % ksize];
}
constexpr int noval = std::numeric_limits<int>::max();
auto access = [&](int x, int y) {
int pi, pj;
if (data->borderType & BORDER_ISOLATED)
{
pi = common::borderInterpolate(x - data->anchor_y, height, data->borderType & ~BORDER_ISOLATED);
pj = common::borderInterpolate(y - data->anchor_x, width , data->borderType & ~BORDER_ISOLATED);
pi = pi < 0 ? noval : pi;
pj = pj < 0 ? noval : pj;
}
else
{
pi = common::borderInterpolate(offset_y + x - data->anchor_y, full_height, data->borderType);
pj = common::borderInterpolate(offset_x + y - data->anchor_x, full_width , data->borderType);
pi = pi < 0 ? noval : pi - offset_y;
pj = pj < 0 ? noval : pj - offset_x;
}
return std::make_pair(pi, pj);
};
auto process = [&](int x, int y) {
float sum0, sum1, sum2, sum3;
sum0 = sum1 = sum2 = sum3 = data->delta;
for (int i = 0; i < ksize * ksize; i++)
{
auto p = access(x + i / ksize, y + i % ksize);
if (p.first != noval && p.second != noval)
{
sum0 += kernel[i] * src_data[p.first * src_step + p.second * 4 ];
sum1 += kernel[i] * src_data[p.first * src_step + p.second * 4 + 1];
sum2 += kernel[i] * src_data[p.first * src_step + p.second * 4 + 2];
sum3 += kernel[i] * src_data[p.first * src_step + p.second * 4 + 3];
}
}
dst_data[(x * width + y) * 4 ] = std::max(0, std::min((int)std::round(sum0), (int)std::numeric_limits<uchar>::max()));
dst_data[(x * width + y) * 4 + 1] = std::max(0, std::min((int)std::round(sum1), (int)std::numeric_limits<uchar>::max()));
dst_data[(x * width + y) * 4 + 2] = std::max(0, std::min((int)std::round(sum2), (int)std::numeric_limits<uchar>::max()));
dst_data[(x * width + y) * 4 + 3] = std::max(0, std::min((int)std::round(sum3), (int)std::numeric_limits<uchar>::max()));
};
const int left = data->anchor_x, right = width - (ksize - 1 - data->anchor_x);
for (int i = start; i < end; i++)
{
if (left >= right)
{
for (int j = 0; j < width; j++)
process(i, j);
}
else
{
for (int j = 0; j < left; j++)
process(i, j);
for (int j = right; j < width; j++)
process(i, j);
const uchar* row0 = access(i , 0).first == noval ? nullptr : src_data + access(i , 0).first * src_step;
const uchar* row1 = access(i + 1, 0).first == noval ? nullptr : src_data + access(i + 1, 0).first * src_step;
const uchar* row2 = access(i + 2, 0).first == noval ? nullptr : src_data + access(i + 2, 0).first * src_step;
if (ksize == 3)
{
process3(data->anchor_x, left, right, data->delta, kernel, row0, row1, row2, dst_data + i * width * 4);
}
else
{
const uchar* row3 = access(i + 3, 0).first == noval ? nullptr : src_data + access(i + 3, 0).first * src_step;
const uchar* row4 = access(i + 4, 0).first == noval ? nullptr : src_data + access(i + 4, 0).first * src_step;
process5(data->anchor_x, left, right, data->delta, kernel, row0, row1, row2, row3, row4, dst_data + i * width * 4);
}
}
}
return CV_HAL_ERROR_OK;
}
} // anonymous
int filterInit(cvhalFilter2D** context, uchar* kernel_data, size_t kernel_step, int kernel_type, int kernel_width, int kernel_height, int /*max_width*/, int /*max_height*/, int src_type, int dst_type, int borderType, double delta, int anchor_x, int anchor_y, bool /*allowSubmatrix*/, bool /*allowInplace*/)
{
if (kernel_type != CV_32FC1 || src_type != CV_8UC4 || dst_type != CV_8UC4)
return CV_HAL_ERROR_NOT_IMPLEMENTED;
if (kernel_width != kernel_height)
return CV_HAL_ERROR_NOT_IMPLEMENTED;
if (kernel_width != 3 && kernel_width != 5)
return CV_HAL_ERROR_NOT_IMPLEMENTED;
if ((borderType & ~BORDER_ISOLATED) == BORDER_WRAP)
return CV_HAL_ERROR_NOT_IMPLEMENTED;
anchor_x = anchor_x < 0 ? kernel_width / 2 : anchor_x;
anchor_y = anchor_y < 0 ? kernel_height / 2 : anchor_y;
*context = reinterpret_cast<cvhalFilter2D*>(new Filter2D{kernel_data, kernel_step, kernel_type, kernel_width, kernel_height, src_type, dst_type, borderType, delta, anchor_x, anchor_y});
return CV_HAL_ERROR_OK;
}
int filter(cvhalFilter2D* context, uchar* src_data, size_t src_step, uchar* dst_data, size_t dst_step, int width, int height, int full_width, int full_height, int offset_x, int offset_y)
{
Filter2D* data = reinterpret_cast<Filter2D*>(context);
std::vector<uchar> dst(width * height * 4);
int res = CV_HAL_ERROR_NOT_IMPLEMENTED;
switch (data->kernel_width)
{
case 3:
res = common::invoke(height, {filter<3>}, data, src_data, src_step, dst.data(), width, height, full_width, full_height, offset_x, offset_y);
break;
case 5:
res = common::invoke(height, {filter<5>}, data, src_data, src_step, dst.data(), width, height, full_width, full_height, offset_x, offset_y);
break;
}
for (int i = 0; i < height; i++)
memcpy(dst_data + i * dst_step, dst.data() + i * width * 4, width * 4);
return res;
}
int filterFree(cvhalFilter2D* context)
{
delete reinterpret_cast<Filter2D*>(context);
return CV_HAL_ERROR_OK;
}
#endif // CV_HAL_RVV_1P0_ENABLED
}}} // cv::rvv_hal::imgproc | cpp | github | https://github.com/opencv/opencv | hal/riscv-rvv/src/imgproc/filter.cpp |
"""Tests for PEP 810 lazy imports."""
import io
import dis
import subprocess
import sys
import textwrap
import threading
import types
import unittest
try:
import _testcapi
except ImportError:
_testcapi = None
class LazyImportTests(unittest.TestCase):
"""Tests for basic lazy import functionality."""
def tearDown(self):
"""Clean up any test modules from sys.modules."""
for key in list(sys.modules.keys()):
if key.startswith('test.test_import.data.lazy_imports'):
del sys.modules[key]
sys.set_lazy_imports_filter(None)
sys.set_lazy_imports("normal")
sys.lazy_modules.clear()
def test_basic_unused(self):
"""Lazy imported module should not be loaded if never accessed."""
import test.test_import.data.lazy_imports.basic_unused
self.assertNotIn("test.test_import.data.lazy_imports.basic2", sys.modules)
self.assertIn("test.test_import.data.lazy_imports", sys.lazy_modules)
self.assertEqual(sys.lazy_modules["test.test_import.data.lazy_imports"], {"basic2"})
def test_sys_lazy_modules(self):
try:
import test.test_import.data.lazy_imports.basic_from_unused
except ImportError as e:
self.fail('lazy import failed')
self.assertFalse("test.test_import.data.lazy_imports.basic2" in sys.modules)
self.assertIn("test.test_import.data.lazy_imports", sys.lazy_modules)
self.assertEqual(sys.lazy_modules["test.test_import.data.lazy_imports"], {"basic2"})
test.test_import.data.lazy_imports.basic_from_unused.basic2
self.assertNotIn("test.test_import.data", sys.lazy_modules)
def test_basic_unused_use_externally(self):
"""Lazy import should load module when accessed from outside."""
from test.test_import.data.lazy_imports import basic_unused
self.assertNotIn("test.test_import.data.lazy_imports.basic2", sys.modules)
x = basic_unused.test.test_import.data.lazy_imports.basic2
self.assertIn("test.test_import.data.lazy_imports.basic2", sys.modules)
def test_basic_from_unused_use_externally(self):
"""Lazy 'from' import should load when accessed from outside."""
from test.test_import.data.lazy_imports import basic_from_unused
self.assertNotIn("test.test_import.data.lazy_imports.basic2", sys.modules)
x = basic_from_unused.basic2
self.assertIn("test.test_import.data.lazy_imports.basic2", sys.modules)
def test_basic_unused_dir(self):
"""dir() on module should not trigger lazy import reification."""
import test.test_import.data.lazy_imports.basic_unused
x = dir(test.test_import.data.lazy_imports.basic_unused)
self.assertIn("test", x)
self.assertNotIn("test.test_import.data.lazy_imports.basic2", sys.modules)
def test_basic_dir(self):
"""dir() at module scope should not trigger lazy import reification."""
from test.test_import.data.lazy_imports import basic_dir
self.assertIn("test", basic_dir.x)
self.assertNotIn("test.test_import.data.lazy_imports.basic2", sys.modules)
def test_basic_used(self):
"""Lazy import should load when accessed within the module."""
import test.test_import.data.lazy_imports.basic_used
self.assertIn("test.test_import.data.lazy_imports.basic2", sys.modules)
class GlobalLazyImportModeTests(unittest.TestCase):
"""Tests for sys.set_lazy_imports() global mode control."""
def tearDown(self):
for key in list(sys.modules.keys()):
if key.startswith('test.test_import.data.lazy_imports'):
del sys.modules[key]
sys.set_lazy_imports_filter(None)
sys.set_lazy_imports("normal")
def test_global_off(self):
"""Mode 'none' should disable lazy imports entirely."""
import test.test_import.data.lazy_imports.global_off
self.assertIn("test.test_import.data.lazy_imports.basic2", sys.modules)
def test_global_on(self):
"""Mode 'all' should make regular imports lazy."""
import test.test_import.data.lazy_imports.global_on
self.assertNotIn("test.test_import.data.lazy_imports.basic2", sys.modules)
def test_global_filter(self):
"""Filter returning False should prevent lazy loading."""
import test.test_import.data.lazy_imports.global_filter
self.assertIn("test.test_import.data.lazy_imports.basic2", sys.modules)
def test_global_filter_true(self):
"""Filter returning True should allow lazy loading."""
import test.test_import.data.lazy_imports.global_filter_true
self.assertNotIn("test.test_import.data.lazy_imports.basic2", sys.modules)
def test_global_filter_from(self):
"""Filter should work with 'from' imports."""
import test.test_import.data.lazy_imports.global_filter
self.assertIn("test.test_import.data.lazy_imports.basic2", sys.modules)
def test_global_filter_from_true(self):
"""Filter returning True should allow lazy 'from' imports."""
import test.test_import.data.lazy_imports.global_filter_true
self.assertNotIn("test.test_import.data.lazy_imports.basic2", sys.modules)
class CompatibilityModeTests(unittest.TestCase):
"""Tests for __lazy_modules__ compatibility mode."""
def tearDown(self):
for key in list(sys.modules.keys()):
if key.startswith('test.test_import.data.lazy_imports'):
del sys.modules[key]
sys.set_lazy_imports_filter(None)
sys.set_lazy_imports("normal")
def test_compatibility_mode(self):
"""__lazy_modules__ should enable lazy imports for listed modules."""
import test.test_import.data.lazy_imports.basic_compatibility_mode
self.assertNotIn("test.test_import.data.lazy_imports.basic2", sys.modules)
def test_compatibility_mode_used(self):
"""Using a lazy import from __lazy_modules__ should load the module."""
import test.test_import.data.lazy_imports.basic_compatibility_mode_used
self.assertIn("test.test_import.data.lazy_imports.basic2", sys.modules)
def test_compatibility_mode_func(self):
"""Imports inside functions should be eager even in compatibility mode."""
import test.test_import.data.lazy_imports.compatibility_mode_func
self.assertIn("test.test_import.data.lazy_imports.basic2", sys.modules)
def test_compatibility_mode_try_except(self):
"""Imports in try/except should be eager even in compatibility mode."""
import test.test_import.data.lazy_imports.compatibility_mode_try_except
self.assertIn("test.test_import.data.lazy_imports.basic2", sys.modules)
def test_compatibility_mode_relative(self):
"""__lazy_modules__ should work with relative imports."""
import test.test_import.data.lazy_imports.basic_compatibility_mode_relative
self.assertNotIn("test.test_import.data.lazy_imports.basic2", sys.modules)
class ModuleIntrospectionTests(unittest.TestCase):
"""Tests for module dict and getattr behavior with lazy imports."""
def tearDown(self):
for key in list(sys.modules.keys()):
if key.startswith('test.test_import.data.lazy_imports'):
del sys.modules[key]
sys.set_lazy_imports_filter(None)
sys.set_lazy_imports("normal")
def test_modules_dict(self):
"""Accessing module.__dict__ should not trigger reification."""
import test.test_import.data.lazy_imports.modules_dict
self.assertNotIn("test.test_import.data.lazy_imports.basic2", sys.modules)
def test_modules_getattr(self):
"""Module __getattr__ for lazy import name should trigger reification."""
import test.test_import.data.lazy_imports.modules_getattr
self.assertIn("test.test_import.data.lazy_imports.basic2", sys.modules)
def test_modules_getattr_other(self):
"""Module __getattr__ for other names should not trigger reification."""
import test.test_import.data.lazy_imports.modules_getattr_other
self.assertNotIn("test.test_import.data.lazy_imports.basic2", sys.modules)
class LazyImportTypeTests(unittest.TestCase):
"""Tests for the LazyImportType and its resolve() method."""
def tearDown(self):
for key in list(sys.modules.keys()):
if key.startswith('test.test_import.data.lazy_imports'):
del sys.modules[key]
sys.set_lazy_imports_filter(None)
sys.set_lazy_imports("normal")
def test_lazy_value_resolve(self):
"""resolve() method should force the lazy import to load."""
import test.test_import.data.lazy_imports.lazy_get_value
self.assertIn("test.test_import.data.lazy_imports.basic2", sys.modules)
def test_lazy_import_type_exposed(self):
"""LazyImportType should be exposed in types module."""
self.assertHasAttr(types, 'LazyImportType')
self.assertEqual(types.LazyImportType.__name__, 'lazy_import')
def test_lazy_import_type_cant_construct(self):
"""LazyImportType should not be directly constructible."""
self.assertRaises(TypeError, types.LazyImportType, {}, "module")
class SyntaxRestrictionTests(unittest.TestCase):
"""Tests for syntax restrictions on lazy imports."""
def tearDown(self):
for key in list(sys.modules.keys()):
if key.startswith('test.test_import.data.lazy_imports'):
del sys.modules[key]
sys.set_lazy_imports_filter(None)
sys.set_lazy_imports("normal")
def test_lazy_try_except(self):
"""lazy import inside try/except should raise SyntaxError."""
with self.assertRaises(SyntaxError):
import test.test_import.data.lazy_imports.lazy_try_except
def test_lazy_try_except_from(self):
"""lazy from import inside try/except should raise SyntaxError."""
with self.assertRaises(SyntaxError):
import test.test_import.data.lazy_imports.lazy_try_except_from
def test_lazy_try_except_from_star(self):
"""lazy from import * should raise SyntaxError."""
with self.assertRaises(SyntaxError):
import test.test_import.data.lazy_imports.lazy_try_except_from_star
def test_lazy_future_import(self):
"""lazy from __future__ import should raise SyntaxError."""
with self.assertRaises(SyntaxError) as cm:
import test.test_import.data.lazy_imports.lazy_future_import
# Check we highlight 'lazy' (column offset 0, end offset 4)
self.assertEqual(cm.exception.offset, 1)
self.assertEqual(cm.exception.end_offset, 5)
def test_lazy_import_func(self):
"""lazy import inside function should raise SyntaxError."""
with self.assertRaises(SyntaxError):
import test.test_import.data.lazy_imports.lazy_import_func
def test_lazy_import_exec_in_function(self):
"""lazy import via exec() inside a function should raise SyntaxError."""
# exec() inside a function creates a non-module-level context
# where lazy imports are not allowed
def f():
exec("lazy import json")
with self.assertRaises(SyntaxError) as cm:
f()
self.assertIn("only allowed at module level", str(cm.exception))
def test_lazy_import_exec_at_module_level(self):
"""lazy import via exec() at module level should work."""
# exec() at module level (globals == locals) should allow lazy imports
code = textwrap.dedent("""
import sys
exec("lazy import json")
# Should be lazy - not loaded yet
assert 'json' not in sys.modules
print("OK")
""")
result = subprocess.run(
[sys.executable, "-c", code],
capture_output=True,
text=True
)
self.assertEqual(result.returncode, 0, f"stdout: {result.stdout}, stderr: {result.stderr}")
self.assertIn("OK", result.stdout)
class EagerImportInLazyModeTests(unittest.TestCase):
"""Tests for imports that should remain eager even in lazy mode."""
def tearDown(self):
for key in list(sys.modules.keys()):
if key.startswith('test.test_import.data.lazy_imports'):
del sys.modules[key]
sys.set_lazy_imports_filter(None)
sys.set_lazy_imports("normal")
def test_try_except_eager(self):
"""Imports in try/except should be eager even with mode='all'."""
sys.set_lazy_imports("all")
import test.test_import.data.lazy_imports.try_except_eager
self.assertIn("test.test_import.data.lazy_imports.basic2", sys.modules)
def test_try_except_eager_from(self):
"""From imports in try/except should be eager even with mode='all'."""
sys.set_lazy_imports("all")
import test.test_import.data.lazy_imports.try_except_eager_from
self.assertIn("test.test_import.data.lazy_imports.basic2", sys.modules)
def test_eager_import_func(self):
"""Imports inside functions should return modules, not proxies."""
sys.set_lazy_imports("all")
import test.test_import.data.lazy_imports.eager_import_func
f = test.test_import.data.lazy_imports.eager_import_func.f
self.assertEqual(type(f()), type(sys))
class WithStatementTests(unittest.TestCase):
"""Tests for lazy imports in with statement context."""
def tearDown(self):
for key in list(sys.modules.keys()):
if key.startswith('test.test_import.data.lazy_imports'):
del sys.modules[key]
sys.set_lazy_imports_filter(None)
sys.set_lazy_imports("normal")
def test_lazy_with(self):
"""lazy import with 'with' statement should work."""
import test.test_import.data.lazy_imports.lazy_with
self.assertNotIn("test.test_import.data.lazy_imports.basic2", sys.modules)
def test_lazy_with_from(self):
"""lazy from import with 'with' statement should work."""
import test.test_import.data.lazy_imports.lazy_with_from
self.assertNotIn("test.test_import.data.lazy_imports.basic2", sys.modules)
class PackageTests(unittest.TestCase):
"""Tests for lazy imports with packages."""
def tearDown(self):
for key in list(sys.modules.keys()):
if key.startswith('test.test_import.data.lazy_imports'):
del sys.modules[key]
sys.set_lazy_imports_filter(None)
sys.set_lazy_imports("normal")
def test_lazy_import_pkg(self):
"""lazy import of package submodule should load the package."""
import test.test_import.data.lazy_imports.lazy_import_pkg
self.assertIn("test.test_import.data.lazy_imports.pkg", sys.modules)
self.assertIn("test.test_import.data.lazy_imports.pkg.bar", sys.modules)
def test_lazy_import_pkg_cross_import(self):
"""Cross-imports within package should preserve lazy imports."""
import test.test_import.data.lazy_imports.pkg.c
self.assertIn("test.test_import.data.lazy_imports.pkg", sys.modules)
self.assertIn("test.test_import.data.lazy_imports.pkg.c", sys.modules)
self.assertNotIn("test.test_import.data.lazy_imports.pkg.b", sys.modules)
g = test.test_import.data.lazy_imports.pkg.c.get_globals()
self.assertEqual(type(g["x"]), int)
self.assertEqual(type(g["b"]), types.LazyImportType)
class DunderLazyImportTests(unittest.TestCase):
"""Tests for __lazy_import__ builtin function."""
def tearDown(self):
for key in list(sys.modules.keys()):
if key.startswith('test.test_import.data.lazy_imports'):
del sys.modules[key]
sys.set_lazy_imports_filter(None)
sys.set_lazy_imports("normal")
def test_dunder_lazy_import(self):
"""__lazy_import__ should create lazy import proxy."""
import test.test_import.data.lazy_imports.dunder_lazy_import
self.assertNotIn("test.test_import.data.lazy_imports.basic2", sys.modules)
def test_dunder_lazy_import_used(self):
"""Using __lazy_import__ result should trigger module load."""
import test.test_import.data.lazy_imports.dunder_lazy_import_used
self.assertIn("test.test_import.data.lazy_imports.basic2", sys.modules)
def test_dunder_lazy_import_builtins(self):
"""__lazy_import__ should use module's __builtins__ for __import__."""
from test.test_import.data.lazy_imports import dunder_lazy_import_builtins
self.assertNotIn("test.test_import.data.lazy_imports.basic2", sys.modules)
self.assertEqual(dunder_lazy_import_builtins.basic.basic2, 42)
class SysLazyImportsAPITests(unittest.TestCase):
"""Tests for sys lazy imports API functions."""
def tearDown(self):
for key in list(sys.modules.keys()):
if key.startswith('test.test_import.data.lazy_imports'):
del sys.modules[key]
sys.set_lazy_imports_filter(None)
sys.set_lazy_imports("normal")
def test_set_lazy_imports_requires_string(self):
"""set_lazy_imports should reject non-string arguments."""
with self.assertRaises(TypeError):
sys.set_lazy_imports(True)
with self.assertRaises(TypeError):
sys.set_lazy_imports(None)
with self.assertRaises(TypeError):
sys.set_lazy_imports(1)
def test_set_lazy_imports_rejects_invalid_mode(self):
"""set_lazy_imports should reject invalid mode strings."""
with self.assertRaises(ValueError):
sys.set_lazy_imports("invalid")
with self.assertRaises(ValueError):
sys.set_lazy_imports("on")
with self.assertRaises(ValueError):
sys.set_lazy_imports("off")
def test_get_lazy_imports_returns_string(self):
"""get_lazy_imports should return string modes."""
sys.set_lazy_imports("normal")
self.assertEqual(sys.get_lazy_imports(), "normal")
sys.set_lazy_imports("all")
self.assertEqual(sys.get_lazy_imports(), "all")
sys.set_lazy_imports("none")
self.assertEqual(sys.get_lazy_imports(), "none")
def test_get_lazy_imports_filter_default(self):
"""get_lazy_imports_filter should return None by default."""
sys.set_lazy_imports_filter(None)
self.assertIsNone(sys.get_lazy_imports_filter())
def test_set_and_get_lazy_imports_filter(self):
"""set/get_lazy_imports_filter should round-trip filter function."""
def my_filter(name):
return name.startswith("test.")
sys.set_lazy_imports_filter(my_filter)
self.assertIs(sys.get_lazy_imports_filter(), my_filter)
def test_lazy_modules_attribute_is_set(self):
"""sys.lazy_modules should be a set per PEP 810."""
self.assertIsInstance(sys.lazy_modules, dict)
def test_lazy_modules_tracks_lazy_imports(self):
"""sys.lazy_modules should track lazily imported module names."""
code = textwrap.dedent("""
import sys
initial_count = len(sys.lazy_modules)
import test.test_import.data.lazy_imports.basic_unused
assert "test.test_import.data.lazy_imports" in sys.lazy_modules
assert sys.lazy_modules["test.test_import.data.lazy_imports"] == {"basic2"}
assert len(sys.lazy_modules) > initial_count
print("OK")
""")
result = subprocess.run(
[sys.executable, "-c", code],
capture_output=True,
text=True
)
self.assertEqual(result.returncode, 0, f"stdout: {result.stdout}, stderr: {result.stderr}")
self.assertIn("OK", result.stdout)
class ErrorHandlingTests(unittest.TestCase):
"""Tests for error handling during lazy import reification.
PEP 810: Errors during reification should show exception chaining with
both the lazy import definition location and the access location.
"""
def tearDown(self):
for key in list(sys.modules.keys()):
if key.startswith('test.test_import.data.lazy_imports'):
del sys.modules[key]
sys.set_lazy_imports_filter(None)
sys.set_lazy_imports("normal")
def test_import_error_shows_chained_traceback(self):
"""ImportError during reification should chain to show both definition and access."""
# Errors at reification must show where the lazy import was defined
# AND where the access happened, per PEP 810 "Reification" section
code = textwrap.dedent("""
import sys
lazy import test.test_import.data.lazy_imports.nonexistent_module
try:
x = test.test_import.data.lazy_imports.nonexistent_module
except ImportError as e:
# Should have __cause__ showing the original error
# The exception chain shows both where import was defined and where access happened
assert e.__cause__ is not None, "Expected chained exception"
print("OK")
""")
result = subprocess.run(
[sys.executable, "-c", code],
capture_output=True,
text=True
)
self.assertEqual(result.returncode, 0, f"stdout: {result.stdout}, stderr: {result.stderr}")
self.assertIn("OK", result.stdout)
def test_attribute_error_on_from_import_shows_chained_traceback(self):
"""Accessing missing attribute from lazy from-import should chain errors."""
# Tests 'lazy from module import nonexistent' behavior
code = textwrap.dedent("""
import sys
lazy from test.test_import.data.lazy_imports.basic2 import nonexistent_name
try:
x = nonexistent_name
except ImportError as e:
# PEP 810: Enhanced error reporting through exception chaining
assert e.__cause__ is not None, "Expected chained exception"
print("OK")
""")
result = subprocess.run(
[sys.executable, "-c", code],
capture_output=True,
text=True
)
self.assertEqual(result.returncode, 0, f"stdout: {result.stdout}, stderr: {result.stderr}")
self.assertIn("OK", result.stdout)
def test_reification_retries_on_failure(self):
"""Failed reification should allow retry on subsequent access.
PEP 810: "If reification fails, the lazy object is not reified or replaced.
Subsequent uses of the lazy object will re-try the reification."
"""
code = textwrap.dedent("""
import sys
import types
lazy import test.test_import.data.lazy_imports.broken_module
# First access - should fail
try:
x = test.test_import.data.lazy_imports.broken_module
except ValueError:
pass
# The lazy object should still be a lazy proxy (not reified)
g = globals()
lazy_obj = g['test']
# The root 'test' binding should still allow retry
# Second access - should also fail (retry the import)
try:
x = test.test_import.data.lazy_imports.broken_module
except ValueError:
print("OK - retry worked")
""")
result = subprocess.run(
[sys.executable, "-c", code],
capture_output=True,
text=True
)
self.assertEqual(result.returncode, 0, f"stdout: {result.stdout}, stderr: {result.stderr}")
self.assertIn("OK", result.stdout)
def test_error_during_module_execution_propagates(self):
"""Errors in module code during reification should propagate correctly."""
# Module that raises during import should propagate with chaining
code = textwrap.dedent("""
import sys
lazy import test.test_import.data.lazy_imports.broken_module
try:
_ = test.test_import.data.lazy_imports.broken_module
print("FAIL - should have raised")
except ValueError as e:
# The ValueError from the module should be the cause
if "always fails" in str(e) or (e.__cause__ and "always fails" in str(e.__cause__)):
print("OK")
else:
print(f"FAIL - wrong error: {e}")
""")
result = subprocess.run(
[sys.executable, "-c", code],
capture_output=True,
text=True
)
self.assertEqual(result.returncode, 0, f"stdout: {result.stdout}, stderr: {result.stderr}")
self.assertIn("OK", result.stdout)
class GlobalsAndDictTests(unittest.TestCase):
"""Tests for globals() and __dict__ behavior with lazy imports.
PEP 810: "Calling globals() or accessing a module's __dict__ does not trigger
reification – they return the module's dictionary, and accessing lazy objects
through that dictionary still returns lazy proxy objects."
"""
def tearDown(self):
for key in list(sys.modules.keys()):
if key.startswith('test.test_import.data.lazy_imports'):
del sys.modules[key]
sys.set_lazy_imports_filter(None)
sys.set_lazy_imports("normal")
def test_globals_returns_lazy_proxy_when_accessed_from_function(self):
"""globals() accessed from a function should return lazy proxy without reification.
Note: At module level, accessing globals()['name'] triggers LOAD_NAME which
automatically resolves lazy imports. Inside a function, accessing globals()['name']
uses BINARY_SUBSCR which returns the lazy proxy without resolution.
"""
code = textwrap.dedent("""
import sys
import types
lazy from test.test_import.data.lazy_imports.basic2 import x
# Check that module is not yet loaded
assert 'test.test_import.data.lazy_imports.basic2' not in sys.modules
def check_lazy():
# Access through globals() from inside a function
g = globals()
lazy_obj = g['x']
return type(lazy_obj) is types.LazyImportType
# Inside function, should get lazy proxy
is_lazy = check_lazy()
assert is_lazy, "Expected LazyImportType from function scope"
# Module should STILL not be loaded
assert 'test.test_import.data.lazy_imports.basic2' not in sys.modules
print("OK")
""")
result = subprocess.run(
[sys.executable, "-c", code],
capture_output=True,
text=True
)
self.assertEqual(result.returncode, 0, f"stdout: {result.stdout}, stderr: {result.stderr}")
self.assertIn("OK", result.stdout)
def test_globals_dict_access_returns_lazy_proxy_inline(self):
"""Accessing globals()['name'] inline should return lazy proxy.
Note: Assigning g['name'] to a local variable at module level triggers
reification due to STORE_NAME bytecode. Inline access preserves laziness.
"""
code = textwrap.dedent("""
import sys
import types
lazy import json
# Inline access without assignment to local variable preserves lazy proxy
assert type(globals()['json']) is types.LazyImportType
assert 'json' not in sys.modules
print("OK")
""")
result = subprocess.run(
[sys.executable, "-c", code],
capture_output=True,
text=True
)
self.assertEqual(result.returncode, 0, f"stdout: {result.stdout}, stderr: {result.stderr}")
self.assertIn("OK", result.stdout)
def test_module_dict_returns_lazy_proxy_without_reifying(self):
"""module.__dict__ access should not trigger reification."""
import test.test_import.data.lazy_imports.globals_access
# Module not loaded yet via direct dict access
self.assertNotIn("test.test_import.data.lazy_imports.basic2", sys.modules)
# Access via get_from_globals should return lazy proxy
lazy_obj = test.test_import.data.lazy_imports.globals_access.get_from_globals()
self.assertEqual(type(lazy_obj), types.LazyImportType)
self.assertNotIn("test.test_import.data.lazy_imports.basic2", sys.modules)
def test_direct_access_triggers_reification(self):
"""Direct name access (not through globals()) should trigger reification."""
import test.test_import.data.lazy_imports.globals_access
self.assertNotIn("test.test_import.data.lazy_imports.basic2", sys.modules)
# Direct access should reify
result = test.test_import.data.lazy_imports.globals_access.get_direct()
self.assertIn("test.test_import.data.lazy_imports.basic2", sys.modules)
def test_resolve_method_forces_reification(self):
"""Calling resolve() on lazy proxy should force reification.
Note: Must access lazy proxy from within a function to avoid automatic
reification by LOAD_NAME at module level.
"""
code = textwrap.dedent("""
import sys
import types
lazy from test.test_import.data.lazy_imports.basic2 import x
assert 'test.test_import.data.lazy_imports.basic2' not in sys.modules
def test_resolve():
g = globals()
lazy_obj = g['x']
assert type(lazy_obj) is types.LazyImportType, f"Expected lazy proxy, got {type(lazy_obj)}"
resolved = lazy_obj.resolve()
# Now module should be loaded
assert 'test.test_import.data.lazy_imports.basic2' in sys.modules
assert resolved == 42 # x is 42 in basic2.py
return True
assert test_resolve()
print("OK")
""")
result = subprocess.run(
[sys.executable, "-c", code],
capture_output=True,
text=True
)
self.assertEqual(result.returncode, 0, f"stdout: {result.stdout}, stderr: {result.stderr}")
self.assertIn("OK", result.stdout)
def test_add_lazy_to_globals(self):
code = textwrap.dedent("""
import sys
import types
lazy from test.test_import.data.lazy_imports import basic2
assert 'test.test_import.data.lazy_imports.basic2' not in sys.modules
class C: pass
sneaky = C()
sneaky.x = 1
def f():
t = 0
for _ in range(5):
t += sneaky.x
return t
f()
globals()["sneaky"] = globals()["basic2"]
assert f() == 210
print("OK")
""")
result = subprocess.run(
[sys.executable, "-c", code],
capture_output=True,
text=True
)
self.assertEqual(result.returncode, 0, f"stdout: {result.stdout}, stderr: {result.stderr}")
self.assertIn("OK", result.stdout)
class MultipleNameFromImportTests(unittest.TestCase):
"""Tests for lazy from ... import with multiple names.
PEP 810: "When using lazy from ... import, each imported name is bound to a
lazy proxy object. The first access to any of these names triggers loading
of the entire module and reifies only that specific name to its actual value.
Other names remain as lazy proxies until they are accessed."
"""
def tearDown(self):
for key in list(sys.modules.keys()):
if key.startswith('test.test_import.data.lazy_imports'):
del sys.modules[key]
sys.set_lazy_imports_filter(None)
sys.set_lazy_imports("normal")
def test_accessing_one_name_leaves_others_as_proxies(self):
"""Accessing one name from multi-name import should leave others lazy."""
code = textwrap.dedent("""
import sys
import types
lazy from test.test_import.data.lazy_imports.basic2 import f, x
# Neither should be loaded yet
assert 'test.test_import.data.lazy_imports.basic2' not in sys.modules
g = globals()
assert type(g['f']) is types.LazyImportType
assert type(g['x']) is types.LazyImportType
# Access 'x' - this loads the module and reifies only 'x'
value = x
assert value == 42
# Module is now loaded
assert 'test.test_import.data.lazy_imports.basic2' in sys.modules
# 'x' should be reified (int), 'f' should still be lazy proxy
assert type(g['x']) is int, f"Expected int, got {type(g['x'])}"
assert type(g['f']) is types.LazyImportType, f"Expected LazyImportType, got {type(g['f'])}"
print("OK")
""")
result = subprocess.run(
[sys.executable, "-c", code],
capture_output=True,
text=True
)
self.assertEqual(result.returncode, 0, f"stdout: {result.stdout}, stderr: {result.stderr}")
self.assertIn("OK", result.stdout)
def test_all_names_reified_after_all_accessed(self):
"""All names should be reified after each is accessed."""
code = textwrap.dedent("""
import sys
import types
lazy from test.test_import.data.lazy_imports.basic2 import f, x
g = globals()
# Access both
_ = x
_ = f
# Both should be reified now
assert type(g['x']) is int
assert callable(g['f'])
print("OK")
""")
result = subprocess.run(
[sys.executable, "-c", code],
capture_output=True,
text=True
)
self.assertEqual(result.returncode, 0, f"stdout: {result.stdout}, stderr: {result.stderr}")
self.assertIn("OK", result.stdout)
class SysLazyModulesTrackingTests(unittest.TestCase):
"""Tests for sys.lazy_modules tracking behavior.
PEP 810: "When the module is reified, it's removed from sys.lazy_modules"
"""
def tearDown(self):
for key in list(sys.modules.keys()):
if key.startswith('test.test_import.data.lazy_imports'):
del sys.modules[key]
sys.set_lazy_imports_filter(None)
sys.set_lazy_imports("normal")
def test_module_added_to_lazy_modules_on_lazy_import(self):
"""Module should be added to sys.lazy_modules when lazily imported."""
# PEP 810 states lazy_modules tracks modules that have been lazily imported
# Note: The current implementation keeps modules in lazy_modules even after
# reification (primarily for diagnostics and introspection)
code = textwrap.dedent("""
import sys
initial_count = len(sys.lazy_modules)
lazy import test.test_import.data.lazy_imports.basic2
# Should be in lazy_modules after lazy import
assert "test.test_import.data.lazy_imports" in sys.lazy_modules
assert sys.lazy_modules["test.test_import.data.lazy_imports"] == {"basic2"}
assert len(sys.lazy_modules) > initial_count
# Trigger reification
_ = test.test_import.data.lazy_imports.basic2.x
# Module should still be tracked (for diagnostics per PEP 810)
assert "test.test_import.data.lazy_imports" not in sys.lazy_modules
print("OK")
""")
result = subprocess.run(
[sys.executable, "-c", code],
capture_output=True,
text=True
)
self.assertEqual(result.returncode, 0, f"stdout: {result.stdout}, stderr: {result.stderr}")
self.assertIn("OK", result.stdout)
def test_lazy_modules_is_per_interpreter(self):
"""Each interpreter should have independent sys.lazy_modules."""
# Basic test that sys.lazy_modules exists and is a set
self.assertIsInstance(sys.lazy_modules, dict)
class CommandLineAndEnvVarTests(unittest.TestCase):
"""Tests for command-line and environment variable control.
PEP 810: The global lazy imports flag can be controlled through:
- The -X lazy_imports=<mode> command-line option
- The PYTHON_LAZY_IMPORTS=<mode> environment variable
"""
def test_cli_lazy_imports_all_makes_regular_imports_lazy(self):
"""-X lazy_imports=all should make all imports potentially lazy."""
code = textwrap.dedent("""
import sys
# In 'all' mode, regular imports become lazy
import json
# json should not be in sys.modules yet (lazy)
# Actually accessing it triggers reification
if 'json' not in sys.modules:
print("LAZY")
else:
print("EAGER")
""")
result = subprocess.run(
[sys.executable, "-X", "lazy_imports=all", "-c", code],
capture_output=True,
text=True
)
self.assertEqual(result.returncode, 0, f"stderr: {result.stderr}")
self.assertIn("LAZY", result.stdout)
def test_cli_lazy_imports_none_forces_all_imports_eager(self):
"""-X lazy_imports=none should force all imports to be eager."""
code = textwrap.dedent("""
import sys
# Even explicit lazy imports should be eager in 'none' mode
lazy import json
if 'json' in sys.modules:
print("EAGER")
else:
print("LAZY")
""")
result = subprocess.run(
[sys.executable, "-X", "lazy_imports=none", "-c", code],
capture_output=True,
text=True
)
self.assertEqual(result.returncode, 0, f"stderr: {result.stderr}")
self.assertIn("EAGER", result.stdout)
def test_cli_lazy_imports_normal_respects_lazy_keyword_only(self):
"""-X lazy_imports=normal should respect lazy keyword only."""
# Note: Use test modules instead of stdlib modules to avoid
# modules already loaded by the interpreter startup
code = textwrap.dedent("""
import sys
import test.test_import.data.lazy_imports.basic2 # Should be eager
lazy import test.test_import.data.lazy_imports.pkg.b # Should be lazy
eager_loaded = 'test.test_import.data.lazy_imports.basic2' in sys.modules
lazy_loaded = 'test.test_import.data.lazy_imports.pkg.b' in sys.modules
if eager_loaded and not lazy_loaded:
print("OK")
else:
print(f"FAIL: eager={eager_loaded}, lazy={lazy_loaded}")
""")
result = subprocess.run(
[sys.executable, "-X", "lazy_imports=normal", "-c", code],
capture_output=True,
text=True
)
self.assertEqual(result.returncode, 0, f"stderr: {result.stderr}")
self.assertIn("OK", result.stdout)
def test_env_var_lazy_imports_all_enables_global_lazy(self):
"""PYTHON_LAZY_IMPORTS=all should enable global lazy imports."""
code = textwrap.dedent("""
import sys
import json
if 'json' not in sys.modules:
print("LAZY")
else:
print("EAGER")
""")
import os
env = os.environ.copy()
env["PYTHON_LAZY_IMPORTS"] = "all"
result = subprocess.run(
[sys.executable, "-c", code],
capture_output=True,
text=True,
env=env
)
self.assertEqual(result.returncode, 0, f"stderr: {result.stderr}")
self.assertIn("LAZY", result.stdout)
def test_env_var_lazy_imports_none_disables_all_lazy(self):
"""PYTHON_LAZY_IMPORTS=none should disable all lazy imports."""
code = textwrap.dedent("""
import sys
lazy import json
if 'json' in sys.modules:
print("EAGER")
else:
print("LAZY")
""")
import os
env = os.environ.copy()
env["PYTHON_LAZY_IMPORTS"] = "none"
result = subprocess.run(
[sys.executable, "-c", code],
capture_output=True,
text=True,
env=env
)
self.assertEqual(result.returncode, 0, f"stderr: {result.stderr}")
self.assertIn("EAGER", result.stdout)
def test_cli_overrides_env_var(self):
"""Command-line option should take precedence over environment variable."""
# PEP 810: -X lazy_imports takes precedence over PYTHON_LAZY_IMPORTS
code = textwrap.dedent("""
import sys
lazy import json
if 'json' in sys.modules:
print("EAGER")
else:
print("LAZY")
""")
import os
env = os.environ.copy()
env["PYTHON_LAZY_IMPORTS"] = "all" # env says all
result = subprocess.run(
[sys.executable, "-X", "lazy_imports=none", "-c", code], # CLI says none
capture_output=True,
text=True,
env=env
)
self.assertEqual(result.returncode, 0, f"stderr: {result.stderr}")
# CLI should win - imports should be eager
self.assertIn("EAGER", result.stdout)
def test_sys_set_lazy_imports_overrides_cli(self):
"""sys.set_lazy_imports() should take precedence over CLI option."""
code = textwrap.dedent("""
import sys
sys.set_lazy_imports("none") # Override CLI
lazy import json
if 'json' in sys.modules:
print("EAGER")
else:
print("LAZY")
""")
result = subprocess.run(
[sys.executable, "-X", "lazy_imports=all", "-c", code],
capture_output=True,
text=True
)
self.assertEqual(result.returncode, 0, f"stderr: {result.stderr}")
self.assertIn("EAGER", result.stdout)
class FilterFunctionSignatureTests(unittest.TestCase):
"""Tests for the filter function signature per PEP 810.
PEP 810: func(importer: str, name: str, fromlist: tuple[str, ...] | None) -> bool
"""
def tearDown(self):
for key in list(sys.modules.keys()):
if key.startswith('test.test_import.data.lazy_imports'):
del sys.modules[key]
sys.set_lazy_imports_filter(None)
sys.set_lazy_imports("normal")
def test_filter_receives_correct_arguments_for_import(self):
"""Filter should receive (importer, name, fromlist=None) for 'import x'."""
code = textwrap.dedent("""
import sys
received_args = []
def my_filter(importer, name, fromlist):
received_args.append((importer, name, fromlist))
return True
sys.set_lazy_imports_filter(my_filter)
lazy import json
assert len(received_args) == 1, f"Expected 1 call, got {len(received_args)}"
importer, name, fromlist = received_args[0]
assert name == "json", f"Expected name='json', got {name!r}"
assert fromlist is None, f"Expected fromlist=None, got {fromlist!r}"
assert isinstance(importer, str), f"Expected str importer, got {type(importer)}"
print("OK")
""")
result = subprocess.run(
[sys.executable, "-c", code],
capture_output=True,
text=True
)
self.assertEqual(result.returncode, 0, f"stdout: {result.stdout}, stderr: {result.stderr}")
self.assertIn("OK", result.stdout)
def test_filter_receives_fromlist_for_from_import(self):
"""Filter should receive fromlist tuple for 'from x import y, z'."""
code = textwrap.dedent("""
import sys
received_args = []
def my_filter(importer, name, fromlist):
received_args.append((importer, name, fromlist))
return True
sys.set_lazy_imports_filter(my_filter)
lazy from json import dumps, loads
assert len(received_args) == 1, f"Expected 1 call, got {len(received_args)}"
importer, name, fromlist = received_args[0]
assert name == "json", f"Expected name='json', got {name!r}"
assert fromlist == ("dumps", "loads"), f"Expected ('dumps', 'loads'), got {fromlist!r}"
print("OK")
""")
result = subprocess.run(
[sys.executable, "-c", code],
capture_output=True,
text=True
)
self.assertEqual(result.returncode, 0, f"stdout: {result.stdout}, stderr: {result.stderr}")
self.assertIn("OK", result.stdout)
def test_filter_returning_false_forces_eager_import(self):
"""Filter returning False should make import eager."""
code = textwrap.dedent("""
import sys
def deny_filter(importer, name, fromlist):
return False
sys.set_lazy_imports_filter(deny_filter)
lazy import json
# Should be eager due to filter
if 'json' in sys.modules:
print("EAGER")
else:
print("LAZY")
""")
result = subprocess.run(
[sys.executable, "-c", code],
capture_output=True,
text=True
)
self.assertEqual(result.returncode, 0, f"stderr: {result.stderr}")
self.assertIn("EAGER", result.stdout)
class AdditionalSyntaxRestrictionTests(unittest.TestCase):
"""Additional syntax restriction tests per PEP 810."""
def tearDown(self):
for key in list(sys.modules.keys()):
if key.startswith('test.test_import.data.lazy_imports'):
del sys.modules[key]
sys.set_lazy_imports_filter(None)
sys.set_lazy_imports("normal")
def test_lazy_import_inside_class_raises_syntax_error(self):
"""lazy import inside class body should raise SyntaxError."""
# PEP 810: "The soft keyword is only allowed at the global (module) level,
# not inside functions, class bodies, try blocks, or import *"
with self.assertRaises(SyntaxError):
import test.test_import.data.lazy_imports.lazy_class_body
class MixedLazyEagerImportTests(unittest.TestCase):
"""Tests for mixing lazy and eager imports of the same module.
PEP 810: "If module foo is imported both lazily and eagerly in the same
program, the eager import takes precedence and both bindings resolve to
the same module object."
"""
def tearDown(self):
for key in list(sys.modules.keys()):
if key.startswith('test.test_import.data.lazy_imports'):
del sys.modules[key]
sys.set_lazy_imports_filter(None)
sys.set_lazy_imports("normal")
def test_eager_import_before_lazy_resolves_to_same_module(self):
"""Eager import before lazy should make lazy resolve to same module."""
code = textwrap.dedent("""
import sys
import json # Eager import first
lazy import json as lazy_json # Lazy import same module
# lazy_json should resolve to the same object
assert json is lazy_json, "Lazy and eager imports should resolve to same module"
print("OK")
""")
result = subprocess.run(
[sys.executable, "-c", code],
capture_output=True,
text=True
)
self.assertEqual(result.returncode, 0, f"stdout: {result.stdout}, stderr: {result.stderr}")
self.assertIn("OK", result.stdout)
def test_lazy_import_before_eager_resolves_to_same_module(self):
"""Lazy import followed by eager should both point to same module."""
code = textwrap.dedent("""
import sys
lazy import json as lazy_json
# Lazy not loaded yet
assert 'json' not in sys.modules
import json # Eager import triggers load
# Both should be the same object
assert json is lazy_json
print("OK")
""")
result = subprocess.run(
[sys.executable, "-c", code],
capture_output=True,
text=True
)
self.assertEqual(result.returncode, 0, f"stdout: {result.stdout}, stderr: {result.stderr}")
self.assertIn("OK", result.stdout)
class RelativeImportTests(unittest.TestCase):
"""Tests for relative imports with lazy keyword."""
def tearDown(self):
for key in list(sys.modules.keys()):
if key.startswith('test.test_import.data.lazy_imports'):
del sys.modules[key]
sys.set_lazy_imports_filter(None)
sys.set_lazy_imports("normal")
def test_relative_lazy_import(self):
"""lazy from . import submodule should work."""
from test.test_import.data.lazy_imports import relative_lazy
# basic2 should not be loaded yet
self.assertNotIn("test.test_import.data.lazy_imports.basic2", sys.modules)
# Access triggers reification
result = relative_lazy.get_basic2()
self.assertIn("test.test_import.data.lazy_imports.basic2", sys.modules)
def test_relative_lazy_from_import(self):
"""lazy from .module import name should work."""
from test.test_import.data.lazy_imports import relative_lazy_from
# basic2 should not be loaded yet
self.assertNotIn("test.test_import.data.lazy_imports.basic2", sys.modules)
# Access triggers reification
result = relative_lazy_from.get_x()
self.assertEqual(result, 42)
self.assertIn("test.test_import.data.lazy_imports.basic2", sys.modules)
class LazyModulesCompatibilityFromImportTests(unittest.TestCase):
"""Tests for __lazy_modules__ with from imports.
PEP 810: "When a module is made lazy this way, from-imports using that
module are also lazy"
"""
def tearDown(self):
for key in list(sys.modules.keys()):
if key.startswith('test.test_import.data.lazy_imports'):
del sys.modules[key]
sys.set_lazy_imports_filter(None)
sys.set_lazy_imports("normal")
def test_lazy_modules_makes_from_imports_lazy(self):
"""__lazy_modules__ should make from imports of listed modules lazy."""
from test.test_import.data.lazy_imports import lazy_compat_from
# basic2 should not be loaded yet because it's in __lazy_modules__
self.assertNotIn("test.test_import.data.lazy_imports.basic2", sys.modules)
# Access triggers reification
result = lazy_compat_from.get_x()
self.assertEqual(result, 42)
self.assertIn("test.test_import.data.lazy_imports.basic2", sys.modules)
class ImportStateAtReificationTests(unittest.TestCase):
"""Tests for import system state at reification time.
PEP 810: "Reification still calls __import__ to resolve the import, which uses
the state of the import system (e.g. sys.path, sys.meta_path, sys.path_hooks
and __import__) at reification time, not the state when the lazy import
statement was evaluated."
"""
def tearDown(self):
for key in list(sys.modules.keys()):
if key.startswith('test.test_import.data.lazy_imports'):
del sys.modules[key]
sys.set_lazy_imports_filter(None)
sys.set_lazy_imports("normal")
def test_sys_path_at_reification_time_is_used(self):
"""sys.path changes after lazy import should affect reification."""
code = textwrap.dedent("""
import sys
import tempfile
import os
# Create a temporary module
with tempfile.TemporaryDirectory() as tmpdir:
mod_path = os.path.join(tmpdir, "dynamic_test_module.py")
with open(mod_path, "w") as f:
f.write("VALUE = 'from_temp_dir'\\n")
# Lazy import before adding to path
lazy import dynamic_test_module
# Module cannot be found yet
try:
_ = dynamic_test_module
print("FAIL - should not find module")
except ModuleNotFoundError:
pass
# Now add temp dir to path
sys.path.insert(0, tmpdir)
# Now reification should succeed using current sys.path
assert dynamic_test_module.VALUE == 'from_temp_dir'
print("OK")
sys.path.remove(tmpdir)
""")
result = subprocess.run(
[sys.executable, "-c", code],
capture_output=True,
text=True
)
self.assertEqual(result.returncode, 0, f"stdout: {result.stdout}, stderr: {result.stderr}")
self.assertIn("OK", result.stdout)
class ThreadSafetyTests(unittest.TestCase):
"""Tests for thread-safety of lazy imports."""
def tearDown(self):
for key in list(sys.modules.keys()):
if key.startswith('test.test_import.data.lazy_imports'):
del sys.modules[key]
sys.set_lazy_imports_filter(None)
sys.set_lazy_imports("normal")
def test_concurrent_lazy_import_reification(self):
"""Multiple threads racing to reify the same lazy import should succeed."""
from test.test_import.data.lazy_imports import basic_unused
num_threads = 10
results = [None] * num_threads
errors = []
barrier = threading.Barrier(num_threads)
def access_lazy_import(idx):
try:
barrier.wait()
module = basic_unused.test.test_import.data.lazy_imports.basic2
results[idx] = module
except Exception as e:
errors.append((idx, e))
threads = [
threading.Thread(target=access_lazy_import, args=(i,))
for i in range(num_threads)
]
for t in threads:
t.start()
for t in threads:
t.join()
self.assertEqual(errors, [], f"Errors occurred: {errors}")
self.assertTrue(all(r is not None for r in results))
first_module = results[0]
for r in results[1:]:
self.assertIs(r, first_module)
def test_concurrent_reification_multiple_modules(self):
"""Multiple threads reifying different lazy imports concurrently."""
code = textwrap.dedent("""
import sys
import threading
sys.set_lazy_imports("all")
lazy import json
lazy import os
lazy import io
lazy import re
num_threads = 8
results = {}
errors = []
barrier = threading.Barrier(num_threads)
def access_modules(idx):
try:
barrier.wait()
mods = [json, os, io, re]
results[idx] = [type(m).__name__ for m in mods]
except Exception as e:
errors.append((idx, e))
threads = [
threading.Thread(target=access_modules, args=(i,))
for i in range(num_threads)
]
for t in threads:
t.start()
for t in threads:
t.join()
assert not errors, f"Errors: {errors}"
for idx, mods in results.items():
assert all(m == 'module' for m in mods), f"Thread {idx} got: {mods}"
print("OK")
""")
result = subprocess.run(
[sys.executable, "-c", code],
capture_output=True,
text=True
)
self.assertEqual(result.returncode, 0, f"stdout: {result.stdout}, stderr: {result.stderr}")
self.assertIn("OK", result.stdout)
def test_concurrent_lazy_modules_set_updates(self):
"""Multiple threads creating lazy imports should safely update sys.lazy_modules."""
code = textwrap.dedent("""
import sys
import threading
num_threads = 16
iterations = 50
errors = []
barrier = threading.Barrier(num_threads)
def create_lazy_imports(idx):
try:
barrier.wait()
for i in range(iterations):
exec(f"lazy import json as json_{idx}_{i}", globals())
exec(f"lazy import os as os_{idx}_{i}", globals())
except Exception as e:
errors.append((idx, e))
threads = [
threading.Thread(target=create_lazy_imports, args=(i,))
for i in range(num_threads)
]
for t in threads:
t.start()
for t in threads:
t.join()
assert not errors, f"Errors: {errors}"
assert isinstance(sys.lazy_modules, dict), "sys.lazy_modules is not a dict"
print("OK")
""")
result = subprocess.run(
[sys.executable, "-c", code],
capture_output=True,
text=True
)
self.assertEqual(result.returncode, 0, f"stdout: {result.stdout}, stderr: {result.stderr}")
self.assertIn("OK", result.stdout)
def test_concurrent_reification_same_module_high_contention(self):
"""High contention: many threads reifying the exact same lazy import."""
code = textwrap.dedent("""
import sys
import threading
import types
sys.set_lazy_imports("all")
lazy import json
num_threads = 20
results = [None] * num_threads
errors = []
barrier = threading.Barrier(num_threads)
def access_json(idx):
try:
barrier.wait()
for _ in range(100):
_ = json.dumps
_ = json.loads
results[idx] = json
except Exception as e:
errors.append((idx, e))
threads = [
threading.Thread(target=access_json, args=(i,))
for i in range(num_threads)
]
for t in threads:
t.start()
for t in threads:
t.join()
assert not errors, f"Errors: {errors}"
assert all(r is not None for r in results), "Some threads got None"
first = results[0]
assert all(r is first for r in results), "Inconsistent module objects"
assert not isinstance(first, types.LazyImportType), "Got lazy import instead of module"
print("OK")
""")
result = subprocess.run(
[sys.executable, "-c", code],
capture_output=True,
text=True
)
self.assertEqual(result.returncode, 0, f"stdout: {result.stdout}, stderr: {result.stderr}")
self.assertIn("OK", result.stdout)
def test_concurrent_reification_with_module_attribute_access(self):
"""Threads racing to reify and immediately access module attributes."""
code = textwrap.dedent("""
import sys
import threading
sys.set_lazy_imports("all")
lazy import collections
lazy import functools
lazy import itertools
num_threads = 12
results = {}
errors = []
barrier = threading.Barrier(num_threads)
def stress_lazy_imports(idx):
try:
barrier.wait()
for _ in range(50):
_ = collections.OrderedDict
_ = functools.partial
_ = itertools.chain
_ = collections.defaultdict
_ = functools.lru_cache
_ = itertools.islice
results[idx] = (
type(collections).__name__,
type(functools).__name__,
type(itertools).__name__,
)
except Exception as e:
errors.append((idx, e))
threads = [
threading.Thread(target=stress_lazy_imports, args=(i,))
for i in range(num_threads)
]
for t in threads:
t.start()
for t in threads:
t.join()
assert not errors, f"Errors: {errors}"
for idx, types_tuple in results.items():
assert all(t == 'module' for t in types_tuple), f"Thread {idx}: {types_tuple}"
print("OK")
""")
result = subprocess.run(
[sys.executable, "-c", code],
capture_output=True,
text=True
)
self.assertEqual(result.returncode, 0, f"stdout: {result.stdout}, stderr: {result.stderr}")
self.assertIn("OK", result.stdout)
class LazyImportDisTests(unittest.TestCase):
def test_lazy_import_dis(self):
"""dis should properly show lazy import"""
code = compile("lazy import foo", "exec", "exec")
f = io.StringIO()
dis.dis(code, file=f)
self.assertIn("foo + lazy", f.getvalue())
def test_normal_import_dis(self):
"""non lazy imports should just show the name"""
code = compile("import foo", "exec", "exec")
f = io.StringIO()
dis.dis(code, file=f)
for line in f.getvalue().split('\n'):
if "IMPORT_NAME" in line:
self.assertIn("(foo)", line)
break
else:
self.assertFail("IMPORT_NAME not found")
@unittest.skipIf(_testcapi is None, 'need the _testcapi module')
class LazyCApiTests(unittest.TestCase):
def tearDown(self):
sys.set_lazy_imports("normal")
sys.set_lazy_imports_filter(None)
def test_set_matches_sys(self):
self.assertEqual(_testcapi.PyImport_GetLazyImportsMode(), sys.get_lazy_imports())
for mode in ("normal", "all", "none"):
_testcapi.PyImport_SetLazyImportsMode(mode)
self.assertEqual(_testcapi.PyImport_GetLazyImportsMode(), sys.get_lazy_imports())
def test_filter_matches_sys(self):
self.assertEqual(_testcapi.PyImport_GetLazyImportsFilter(), sys.get_lazy_imports_filter())
def filter(*args):
pass
_testcapi.PyImport_SetLazyImportsFilter(filter)
self.assertEqual(_testcapi.PyImport_GetLazyImportsFilter(), sys.get_lazy_imports_filter())
def test_set_bad_filter(self):
self.assertRaises(ValueError, _testcapi.PyImport_SetLazyImportsFilter, 42)
if __name__ == '__main__':
unittest.main() | python | github | https://github.com/python/cpython | Lib/test/test_import/test_lazy_imports.py |
"""GUIMiner - graphical frontend to Bitcoin miners.
Currently supports:
- m0mchil's "poclbm"
- puddinpop's "rpcminer"
- jedi95's "Phoenix"
- ufasoft's "bitcoin-miner"
Copyright 2011 Chris MacLeod
This program is released under the GNU GPL. See LICENSE.txt for details.
"""
import sys, os, subprocess, errno, re, threading, logging, time, httplib, urllib
import wx
import json
import collections
try:
import win32api, win32con, win32process
except ImportError:
pass
from wx.lib.agw import flatnotebook as fnb
from wx.lib.agw import hyperlink
from wx.lib.newevent import NewEvent
__version__ = '2011-11-22'
def get_module_path():
"""Return the folder containing this script (or its .exe)."""
module_name = sys.executable if hasattr(sys, 'frozen') else __file__
abs_path = os.path.abspath(module_name)
return os.path.dirname(abs_path)
USE_MOCK = '--mock' in sys.argv
# Set up localization; requires the app to be created
app = wx.PySimpleApp(0)
wx.InitAllImageHandlers()
_ = wx.GetTranslation
LANGUAGES = {
"Chinese Simplified": wx.LANGUAGE_CHINESE_SIMPLIFIED,
"English": wx.LANGUAGE_ENGLISH,
"French": wx.LANGUAGE_FRENCH,
"German": wx.LANGUAGE_GERMAN,
"Hungarian": wx.LANGUAGE_HUNGARIAN,
"Italian": wx.LANGUAGE_ITALIAN,
"Spanish": wx.LANGUAGE_SPANISH,
"Russian": wx.LANGUAGE_RUSSIAN,
"Dutch": wx.LANGUAGE_DUTCH,
}
LANGUAGES_REVERSE = dict((v, k) for (k, v) in LANGUAGES.items())
DONATION_ADDRESS = "1MDDh2h4cAZDafgc94mr9q95dhRYcJbNQo"
locale = None
language = None
def update_language(new_language):
global locale, language
language = new_language
if locale:
del locale
locale = wx.Locale(language)
if locale.IsOk():
locale.AddCatalogLookupPathPrefix(os.path.join(get_module_path(), "locale"))
locale.AddCatalog("guiminer")
else:
locale = None
def load_language():
language_config = os.path.join(get_module_path(), 'default_language.ini')
language_data = dict()
if os.path.exists(language_config):
with open(language_config) as f:
language_data.update(json.load(f))
language_str = language_data.get('language', "English")
update_language(LANGUAGES.get(language_str, wx.LANGUAGE_ENGLISH))
def save_language():
language_config = os.path.join(get_module_path(), 'default_language.ini')
language_str = LANGUAGES_REVERSE.get(language)
with open(language_config, 'w') as f:
json.dump(dict(language=language_str), f)
load_language()
ABOUT_TEXT = _(
"""GUIMiner
Version: %(version)s
GUI by Chris 'Kiv' MacLeod
Original poclbm miner by m0mchil
Original rpcminer by puddinpop
Get the source code or file issues at GitHub:
https://github.com/Kiv/poclbm
If you enjoyed this software, support its development
by donating to:
%(address)s
Even a single Bitcoin is appreciated and helps motivate
further work on this software.
""")
# Translatable strings that are used repeatedly
STR_NOT_STARTED = _("Not started")
STR_STARTING = _("Starting...")
STR_STOPPED = _("Stopped")
STR_PAUSED = _("Paused")
STR_START_MINING = _("Start mining!")
STR_STOP_MINING = _("Stop mining")
STR_REFRESH_BALANCE = _("Refresh balance")
STR_CONNECTION_ERROR = _("Connection error")
STR_USERNAME = _("Username:")
STR_PASSWORD = _("Password:")
STR_QUIT = _("Quit this program")
STR_ABOUT = _("Show about dialog")
# Alternate backends that we know how to call
SUPPORTED_BACKENDS = [
"rpcminer-4way.exe",
"rpcminer-cpu.exe",
"rpcminer-cuda.exe",
"rpcminer-opencl.exe",
"phoenix.py",
"phoenix.exe",
"bitcoin-miner.exe"
]
USER_AGENT = "guiminer/" + __version__
# Time constants
SAMPLE_TIME_SECS = 3600
REFRESH_RATE_MILLIS = 2000
# Layout constants
LBL_STYLE = wx.ALIGN_RIGHT | wx.ALIGN_CENTER_VERTICAL
BTN_STYLE = wx.ALIGN_CENTER_HORIZONTAL | wx.ALL
# Events sent from the worker threads
(UpdateHashRateEvent, EVT_UPDATE_HASHRATE) = NewEvent()
(UpdateAcceptedEvent, EVT_UPDATE_ACCEPTED) = NewEvent()
(UpdateSoloCheckEvent, EVT_UPDATE_SOLOCHECK) = NewEvent()
(UpdateStatusEvent, EVT_UPDATE_STATUS) = NewEvent()
# Utility functions
def merge_whitespace(s):
"""Combine multiple whitespace characters found in s into one."""
s = re.sub(r"( +)|\t+", " ", s)
return s.strip()
def get_opencl_devices():
"""Return a list of available OpenCL devices.
Raises ImportError if OpenCL is not found.
Raises IOError if no OpenCL devices are found.
"""
import pyopencl
device_strings = []
platforms = pyopencl.get_platforms() #@UndefinedVariable
for i, platform in enumerate(platforms):
devices = platform.get_devices()
for j, device in enumerate(devices):
device_strings.append('[%d-%d] %s' %
(i, j, merge_whitespace(device.name)[:25]))
if len(device_strings) == 0:
raise IOError
return device_strings
def get_icon_bundle():
"""Return the Bitcoin program icon bundle."""
return wx.IconBundleFromFile(os.path.join(get_module_path(), "logo.ico"), wx.BITMAP_TYPE_ICO)
def get_taskbar_icon():
"""Return the taskbar icon.
This works around Window's annoying behavior of ignoring the 16x16 image
and using nearest neighbour downsampling on the 32x32 image instead."""
ib = get_icon_bundle()
return ib.GetIcon((16, 16))
def mkdir_p(path):
"""If the directory 'path' doesn't exist, create it. Same as mkdir -p."""
try:
os.makedirs(path)
except OSError as exc:
if exc.errno != errno.EEXIST:
raise
def add_tooltip(widget, text):
"""Add a tooltip to widget with the specified text."""
tooltip = wx.ToolTip(text)
widget.SetToolTip(tooltip)
def format_khash(rate):
"""Format rate for display. A rate of 0 means just connected."""
if rate > 10 ** 6:
return _("%.3f Ghash/s") % (rate / 1000000.)
if rate > 10 ** 3:
return _("%.1f Mhash/s") % (rate / 1000.)
elif rate == 0:
return _("Connecting...")
else:
return _("%d khash/s") % rate
def format_balance(amount):
"""Format a quantity of Bitcoins in BTC."""
return "%.3f BTC" % float(amount)
def init_logger():
"""Set up and return the logging object and custom formatter."""
logger = logging.getLogger("poclbm-gui")
logger.setLevel(logging.DEBUG)
file_handler = logging.FileHandler(
os.path.join(get_module_path(), 'guiminer.log'), 'w')
formatter = logging.Formatter("%(asctime)s: %(message)s",
"%Y-%m-%d %H:%M:%S")
file_handler.setFormatter(formatter)
logger.addHandler(file_handler)
return logger, formatter
logger, formatter = init_logger()
def http_request(hostname, *args, **kwargs):
"""Do a HTTP request and return the response data."""
conn_cls = httplib.HTTPSConnection if kwargs.get('use_https') else httplib.HTTPConnection
conn = conn_cls(hostname)
try:
logger.debug(_("Requesting balance: %(request)s"), dict(request=args))
conn.request(*args)
response = conn.getresponse()
data = response.read()
logger.debug(_("Server replied: %(status)s, %(data)s"),
dict(status=str(response.status), data=data))
return response, data
finally:
conn.close()
def get_process_affinity(pid):
"""Return the affinity mask for the specified process."""
flags = win32con.PROCESS_QUERY_INFORMATION
handle = win32api.OpenProcess(flags, 0, pid)
return win32process.GetProcessAffinityMask(handle)[0]
def set_process_affinity(pid, mask):
"""Set the affinity for process to mask."""
flags = win32con.PROCESS_QUERY_INFORMATION | win32con.PROCESS_SET_INFORMATION
handle = win32api.OpenProcess(flags, 0, pid)
win32process.SetProcessAffinityMask(handle, mask)
def find_nth(haystack, needle, n):
"""Return the index of the nth occurrence of needle in haystack."""
start = haystack.find(needle)
while start >= 0 and n > 1:
start = haystack.find(needle, start + len(needle))
n -= 1
return start
class ConsolePanel(wx.Panel):
"""Panel that displays logging events.
Uses with a StreamHandler to log events to a TextCtrl. Thread-safe.
"""
def __init__(self, parent, n_max_lines):
wx.Panel.__init__(self, parent, -1)
self.parent = parent
self.n_max_lines = n_max_lines
vbox = wx.BoxSizer(wx.VERTICAL)
style = wx.TE_MULTILINE | wx.TE_READONLY | wx.HSCROLL
self.text = wx.TextCtrl(self, -1, "", style=style)
vbox.Add(self.text, 1, wx.EXPAND)
self.SetSizer(vbox)
self.handler = logging.StreamHandler(self)
formatter = logging.Formatter("%(asctime)s: %(message)s",
"%Y-%m-%d %H:%M:%S")
self.handler.setFormatter(formatter)
logger.addHandler(self.handler)
def on_focus(self):
"""On focus, clear the status bar."""
self.parent.statusbar.SetStatusText("", 0)
self.parent.statusbar.SetStatusText("", 1)
def on_close(self):
"""On closing, stop handling logging events."""
logger.removeHandler(self.handler)
def append_text(self, text):
self.text.AppendText(text)
lines_to_cut = self.text.GetNumberOfLines() - self.n_max_lines
if lines_to_cut > 0:
contents = self.text.GetValue()
position = find_nth(contents, '\n', lines_to_cut)
self.text.ChangeValue(contents[position + 1:])
def write(self, text):
"""Forward logging events to our TextCtrl."""
wx.CallAfter(self.append_text, text)
class SummaryPanel(wx.Panel):
"""Panel that displays a summary of all miners."""
def __init__(self, parent):
wx.Panel.__init__(self, parent, -1)
self.parent = parent
self.timer = wx.Timer(self)
self.timer.Start(REFRESH_RATE_MILLIS)
self.Bind(wx.EVT_TIMER, self.on_timer)
flags = wx.ALIGN_CENTER_HORIZONTAL | wx.ALL
border = 5
self.column_headers = [
(wx.StaticText(self, -1, _("Miner")), 0, flags, border),
(wx.StaticText(self, -1, _("Speed")), 0, flags, border),
(wx.StaticText(self, -1, _("Accepted")), 0, flags, border),
(wx.StaticText(self, -1, _("Stale")), 0, flags, border),
(wx.StaticText(self, -1, _("Start/Stop")), 0, flags, border),
(wx.StaticText(self, -1, _("Autostart")), 0, flags, border),
]
font = wx.SystemSettings_GetFont(wx.SYS_DEFAULT_GUI_FONT)
font.SetUnderlined(True)
for st in self.column_headers:
st[0].SetFont(font)
self.grid = wx.FlexGridSizer(0, len(self.column_headers), 2, 2)
self.grid.AddMany(self.column_headers)
self.add_miners_to_grid()
self.grid.AddGrowableCol(0)
self.grid.AddGrowableCol(1)
self.grid.AddGrowableCol(2)
self.grid.AddGrowableCol(3)
self.SetSizer(self.grid)
def add_miners_to_grid(self):
"""Add a summary row for each miner to the summary grid."""
# Remove any existing widgets except the column headers.
for i in reversed(range(len(self.column_headers), len(self.grid.GetChildren()))):
self.grid.Hide(i)
self.grid.Remove(i)
for p in self.parent.profile_panels:
p.clear_summary_widgets()
self.grid.AddMany(p.get_summary_widgets(self))
self.grid.Layout()
def on_close(self):
self.timer.Stop()
def on_timer(self, event=None):
"""Whenever the timer goes off, fefresh the summary data."""
if self.parent.nb.GetSelection() != self.parent.nb.GetPageIndex(self):
return
for p in self.parent.profile_panels:
p.update_summary()
self.parent.statusbar.SetStatusText("", 0) # TODO: show something
total_rate = sum(p.last_rate for p in self.parent.profile_panels
if p.is_mining)
if any(p.is_mining for p in self.parent.profile_panels):
self.parent.statusbar.SetStatusText(format_khash(total_rate), 1)
else:
self.parent.statusbar.SetStatusText("", 1)
def on_focus(self):
"""On focus, show the statusbar text."""
self.on_timer()
class GUIMinerTaskBarIcon(wx.TaskBarIcon):
"""Taskbar icon for the GUI.
Shows status messages on hover and opens on click.
"""
TBMENU_RESTORE = wx.NewId()
TBMENU_PAUSE = wx.NewId()
TBMENU_CLOSE = wx.NewId()
TBMENU_CHANGE = wx.NewId()
TBMENU_REMOVE = wx.NewId()
def __init__(self, frame):
wx.TaskBarIcon.__init__(self)
self.frame = frame
self.icon = get_taskbar_icon()
self.timer = wx.Timer(self)
self.timer.Start(REFRESH_RATE_MILLIS)
self.is_paused = False
self.SetIcon(self.icon, "GUIMiner")
self.imgidx = 1
self.Bind(wx.EVT_TASKBAR_LEFT_DCLICK, self.on_taskbar_activate)
self.Bind(wx.EVT_MENU, self.on_taskbar_activate, id=self.TBMENU_RESTORE)
self.Bind(wx.EVT_MENU, self.on_taskbar_close, id=self.TBMENU_CLOSE)
self.Bind(wx.EVT_MENU, self.on_pause, id=self.TBMENU_PAUSE)
self.Bind(wx.EVT_TIMER, self.on_timer)
def CreatePopupMenu(self):
"""Override from wx.TaskBarIcon. Creates the right-click menu."""
menu = wx.Menu()
menu.AppendCheckItem(self.TBMENU_PAUSE, _("Pause all"))
menu.Check(self.TBMENU_PAUSE, self.is_paused)
menu.Append(self.TBMENU_RESTORE, _("Restore"))
menu.Append(self.TBMENU_CLOSE, _("Close"))
return menu
def on_taskbar_activate(self, evt):
if self.frame.IsIconized():
self.frame.Iconize(False)
if not self.frame.IsShown():
self.frame.Show(True)
self.frame.Raise()
def on_taskbar_close(self, evt):
wx.CallAfter(self.frame.Close, force=True)
def on_timer(self, event):
"""Refresh the taskbar icon's status message."""
objs = self.frame.profile_panels
if objs:
text = '\n'.join(p.get_taskbar_text() for p in objs)
self.SetIcon(self.icon, text)
def on_pause(self, event):
"""Pause or resume the currently running miners."""
self.is_paused = event.Checked()
for miner in self.frame.profile_panels:
if self.is_paused:
miner.pause()
else:
miner.resume()
class MinerListenerThread(threading.Thread):
LINES = [
(r"Target =|average rate|Sending to server|found hash|connected to|Setting server",
lambda _: None), # Just ignore lines like these
(r"accepted|\"result\":\s*true",
lambda _: UpdateAcceptedEvent(accepted=True)),
(r"invalid|stale", lambda _:
UpdateAcceptedEvent(accepted=False)),
(r"(\d+)\s*khash/s", lambda match:
UpdateHashRateEvent(rate=int(match.group(1)))),
(r"(\d+\.\d+)\s*MH/s", lambda match:
UpdateHashRateEvent(rate=float(match.group(1)) * 1000)),
(r"(\d+\.\d+)\s*Mhash/s", lambda match:
UpdateHashRateEvent(rate=float(match.group(1)) * 1000)),
(r"(\d+)\s*Mhash/s", lambda match:
UpdateHashRateEvent(rate=int(match.group(1)) * 1000)),
(r"checking (\d+)", lambda _:
UpdateSoloCheckEvent()),
]
def __init__(self, parent, miner):
threading.Thread.__init__(self)
self.shutdown_event = threading.Event()
self.parent = parent
self.parent_name = parent.name
self.miner = miner
def run(self):
logger.info(_('Listener for "%s" started') % self.parent_name)
while not self.shutdown_event.is_set():
line = self.miner.stdout.readline().strip()
#logger.debug("Line: %s", line)
if not line: continue
for s, event_func in self.LINES: # Use self to allow subclassing
match = re.search(s, line, flags=re.I)
if match is not None:
event = event_func(match)
if event is not None:
wx.PostEvent(self.parent, event)
break
else:
# Possible error or new message, just pipe it through
event = UpdateStatusEvent(text=line)
logger.info(_('Listener for "%(name)s": %(line)s'),
dict(name=self.parent_name, line=line))
wx.PostEvent(self.parent, event)
logger.info(_('Listener for "%s" shutting down'), self.parent_name)
class PhoenixListenerThread(MinerListenerThread):
LINES = [
(r"Result: .* accepted",
lambda _: UpdateAcceptedEvent(accepted=True)),
(r"Result: .* rejected", lambda _:
UpdateAcceptedEvent(accepted=False)),
(r"(\d+)\.?(\d*) Khash/sec", lambda match:
UpdateHashRateEvent(rate=float(match.group(1) + '.' + match.group(2)))),
(r"(\d+)\.?(\d*) Mhash/sec", lambda match:
UpdateHashRateEvent(rate=float(match.group(1) + '.' + match.group(2)) * 1000)),
(r"Currently on block",
lambda _: None), # Just ignore lines like these
]
class CgListenerThread(MinerListenerThread):
LINES = [
(r"Accepted .* GPU \d+ thread \d+",
lambda _: UpdateAcceptedEvent(accepted=True)),
(r"Rejected .* GPU \d+ thread \d+",
lambda _: UpdateAcceptedEvent(accepted=False)),
(r"\(\d+s\):(\d+)\.?(\d*) .* Mh/s", lambda match:
UpdateHashRateEvent(rate=float(match.group(1) + '.' + match.group(2)) * 1000)),
(r"^GPU\s*\d+",
lambda _: None), # Just ignore lines like these
]
class MinerTab(wx.Panel):
"""A tab in the GUI representing a miner instance.
Each MinerTab has these responsibilities:
- Persist its data to and from the config file
- Launch a backend subprocess and monitor its progress
by creating a MinerListenerThread.
- Post updates to the GUI's statusbar & summary panel; the format depends
whether the backend is working solo or in a pool.
"""
def __init__(self, parent, id, devices, servers, defaults, statusbar, data):
wx.Panel.__init__(self, parent, id)
self.parent = parent
self.servers = servers
self.defaults = defaults
self.statusbar = statusbar
self.is_mining = False
self.is_paused = False
self.is_possible_error = False
self.miner = None # subprocess.Popen instance when mining
self.miner_listener = None # MinerListenerThread when mining
self.solo_blocks_found = 0
self.accepted_shares = 0 # shares for pool, diff1 hashes for solo
self.accepted_times = collections.deque()
self.invalid_shares = 0
self.invalid_times = collections.deque()
self.last_rate = 0 # units of khash/s
self.autostart = False
self.num_processors = int(os.getenv('NUMBER_OF_PROCESSORS', 1))
self.affinity_mask = 0
self.server_lbl = wx.StaticText(self, -1, _("Server:"))
self.summary_panel = None # SummaryPanel instance if summary open
self.server = wx.ComboBox(self, -1,
choices=[s['name'] for s in servers],
style=wx.CB_READONLY)
self.website_lbl = wx.StaticText(self, -1, _("Website:"))
self.website = hyperlink.HyperLinkCtrl(self, -1, "")
self.external_lbl = wx.StaticText(self, -1, _("Ext. Path:"))
self.txt_external = wx.TextCtrl(self, -1, "")
self.host_lbl = wx.StaticText(self, -1, _("Host:"))
self.txt_host = wx.TextCtrl(self, -1, "")
self.port_lbl = wx.StaticText(self, -1, _("Port:"))
self.txt_port = wx.TextCtrl(self, -1, "")
self.user_lbl = wx.StaticText(self, -1, STR_USERNAME)
self.txt_username = wx.TextCtrl(self, -1, "")
self.pass_lbl = wx.StaticText(self, -1, STR_PASSWORD)
self.txt_pass = wx.TextCtrl(self, -1, "", style=wx.TE_PASSWORD)
self.device_lbl = wx.StaticText(self, -1, _("Device:"))
self.device_listbox = wx.ComboBox(self, -1, choices=devices or [_("No OpenCL devices")], style=wx.CB_READONLY)
self.flags_lbl = wx.StaticText(self, -1, _("Extra flags:"))
self.txt_flags = wx.TextCtrl(self, -1, "")
self.extra_info = wx.StaticText(self, -1, "")
self.affinity_lbl = wx.StaticText(self, -1, _("CPU Affinity:"))
self.affinity_chks = [wx.CheckBox(self, label='%d ' % i)
for i in range(self.num_processors)]
self.balance_lbl = wx.StaticText(self, -1, _("Balance:"))
self.balance_amt = wx.StaticText(self, -1, "0")
self.balance_refresh = wx.Button(self, -1, STR_REFRESH_BALANCE)
self.balance_refresh_timer = wx.Timer()
self.withdraw = wx.Button(self, -1, _("Withdraw"))
self.balance_cooldown_seconds = 0
self.balance_auth_token = ""
self.labels = [self.server_lbl, self.website_lbl,
self.host_lbl, self.port_lbl,
self.user_lbl, self.pass_lbl,
self.device_lbl, self.flags_lbl,
self.balance_lbl]
self.txts = [self.txt_host, self.txt_port,
self.txt_username, self.txt_pass,
self.txt_flags]
self.all_widgets = [self.server, self.website,
self.device_listbox,
self.balance_amt,
self.balance_refresh,
self.withdraw] + self.labels + self.txts + self.affinity_chks
self.hidden_widgets = [self.extra_info,
self.txt_external,
self.external_lbl]
self.start = wx.Button(self, -1, STR_START_MINING)
self.device_listbox.SetSelection(0)
self.server.SetStringSelection(self.defaults.get('default_server'))
self.set_data(data)
for txt in self.txts:
txt.Bind(wx.EVT_KEY_UP, self.check_if_modified)
self.device_listbox.Bind(wx.EVT_COMBOBOX, self.check_if_modified)
self.start.Bind(wx.EVT_BUTTON, self.toggle_mining)
self.server.Bind(wx.EVT_COMBOBOX, self.on_select_server)
self.balance_refresh_timer.Bind(wx.EVT_TIMER, self.on_balance_cooldown_tick)
self.balance_refresh.Bind(wx.EVT_BUTTON, self.on_balance_refresh)
self.withdraw.Bind(wx.EVT_BUTTON, self.on_withdraw)
for chk in self.affinity_chks:
chk.Bind(wx.EVT_CHECKBOX, self.on_affinity_check)
self.Bind(EVT_UPDATE_HASHRATE, lambda event: self.update_khash(event.rate))
self.Bind(EVT_UPDATE_ACCEPTED, lambda event: self.update_shares(event.accepted))
self.Bind(EVT_UPDATE_STATUS, lambda event: self.update_status(event.text))
self.Bind(EVT_UPDATE_SOLOCHECK, lambda event: self.update_solo())
self.update_statusbar()
self.clear_summary_widgets()
@property
def last_update_time(self):
"""Return the local time of the last accepted share."""
if self.accepted_times:
return time.localtime(self.accepted_times[-1])
return None
@property
def server_config(self):
hostname = self.txt_host.GetValue()
return self.get_server_by_field(hostname, 'host')
@property
def is_solo(self):
"""Return True if this miner is configured for solo mining."""
return self.server.GetStringSelection() == "solo"
@property
def is_modified(self):
"""Return True if this miner has unsaved changes pending."""
return self.last_data != self.get_data()
@property
def external_path(self):
"""Return the path to an external miner, or "" if none is present."""
return self.txt_external.GetValue()
@property
def is_external_miner(self):
"""Return True if this miner has an external path configured."""
return self.txt_external.GetValue() != ""
@property
def host_with_http_prefix(self):
"""Return the host address, with http:// prepended if needed."""
host = self.txt_host.GetValue()
if not host.startswith("http://"):
host = "http://" + host
return host
@property
def host_without_http_prefix(self):
"""Return the host address, with http:// stripped off if needed."""
host = self.txt_host.GetValue()
if host.startswith("http://"):
return host[len('http://'):]
return host
@property
def device_index(self):
"""Return the index of the currently selected OpenCL device."""
s = self.device_listbox.GetStringSelection()
match = re.search(r'\[(\d+)-(\d+)\]', s)
try: return int(match.group(2))
except: return 0
@property
def platform_index(self):
"""Return the index of the currently selected OpenCL platform."""
s = self.device_listbox.GetStringSelection()
match = re.search(r'\[(\d+)-(\d+)\]', s)
try: return int(match.group(1))
except: return 0
@property
def is_device_visible(self):
"""Return True if we are using a backend with device selection."""
NO_DEVICE_SELECTION = ['rpcminer', 'bitcoin-miner']
return not any(d in self.external_path for d in NO_DEVICE_SELECTION)
def on_affinity_check(self, event):
"""Set the affinity mask to the selected value."""
self.affinity_mask = 0
for i in range(self.num_processors):
is_checked = self.affinity_chks[i].GetValue()
self.affinity_mask += (is_checked << i)
if self.is_mining:
try:
set_process_affinity(self.miner.pid, self.affinity_mask)
except:
pass # TODO: test on Linux
def pause(self):
"""Pause the miner if we are mining, otherwise do nothing."""
if self.is_mining:
self.stop_mining()
self.is_paused = True
def resume(self):
"""Resume the miner if we are paused, otherwise do nothing."""
if self.is_paused:
self.start_mining()
self.is_paused = False
def get_data(self):
"""Return a dict of our profile data."""
return dict(name=self.name,
hostname=self.txt_host.GetValue(),
port=self.txt_port.GetValue(),
username=self.txt_username.GetValue(),
password=self.txt_pass.GetValue(),
device=self.device_listbox.GetSelection(),
flags=self.txt_flags.GetValue(),
autostart=self.autostart,
affinity_mask=self.affinity_mask,
balance_auth_token=self.balance_auth_token,
external_path=self.external_path)
def set_data(self, data):
"""Set our profile data to the information in data. See get_data()."""
self.last_data = data
default_server_config = self.get_server_by_field(
self.defaults['default_server'], 'name')
self.name = (data.get('name') or _('Default'))
# Backwards compatibility: hostname key used to be called server.
# We only save out hostname now but accept server from old INI files.
hostname = (data.get('hostname') or
data.get('server') or
default_server_config['host'])
self.txt_host.SetValue(hostname)
self.server.SetStringSelection(self.server_config.get('name', "Other"))
self.txt_username.SetValue(
data.get('username') or
self.defaults.get('default_username', ''))
self.txt_pass.SetValue(
data.get('password') or
self.defaults.get('default_password', ''))
self.txt_port.SetValue(str(
data.get('port') or
self.server_config.get('port', 8332)))
self.txt_flags.SetValue(data.get('flags', ''))
self.autostart = data.get('autostart', False)
self.affinity_mask = data.get('affinity_mask', 1)
for i in range(self.num_processors):
self.affinity_chks[i].SetValue((self.affinity_mask >> i) & 1)
self.txt_external.SetValue(data.get('external_path', ''))
# Handle case where they removed devices since last run.
device_index = data.get('device', None)
if device_index is not None and device_index < self.device_listbox.GetCount():
self.device_listbox.SetSelection(device_index)
self.change_server(self.server_config)
self.balance_auth_token = data.get('balance_auth_token', '')
def clear_summary_widgets(self):
"""Release all our summary widgets."""
self.summary_name = None
self.summary_status = None
self.summary_shares_accepted = None
self.summary_shares_stale = None
self.summary_start = None
self.summary_autostart = None
def get_start_stop_state(self):
"""Return appropriate text for the start/stop button."""
return _("Stop") if self.is_mining else _("Start")
def get_start_label(self):
return STR_STOP_MINING if self.is_mining else STR_START_MINING
def update_summary(self):
"""Update our summary fields if possible."""
if not self.summary_panel:
return
self.summary_name.SetLabel(self.name)
if self.is_paused:
text = STR_PAUSED
elif not self.is_mining:
text = STR_STOPPED
elif self.is_possible_error:
text = _("Connection problems")
else:
text = format_khash(self.last_rate)
self.summary_status.SetLabel(text)
self.summary_shares_accepted.SetLabel("%d (%d)" %
(self.accepted_shares, len(self.accepted_times)))
if self.is_solo:
self.summary_shares_invalid.SetLabel("-")
else:
self.summary_shares_invalid.SetLabel("%d (%d)" %
(self.invalid_shares, len(self.invalid_times)))
self.summary_start.SetLabel(self.get_start_stop_state())
self.summary_autostart.SetValue(self.autostart)
self.summary_panel.grid.Layout()
def get_summary_widgets(self, summary_panel):
"""Return a list of summary widgets suitable for sizer.AddMany."""
self.summary_panel = summary_panel
self.summary_name = wx.StaticText(summary_panel, -1, self.name)
self.summary_name.Bind(wx.EVT_LEFT_UP, self.show_this_panel)
self.summary_status = wx.StaticText(summary_panel, -1, STR_STOPPED)
self.summary_shares_accepted = wx.StaticText(summary_panel, -1, "0")
self.summary_shares_invalid = wx.StaticText(summary_panel, -1, "0")
self.summary_start = wx.Button(summary_panel, -1, self.get_start_stop_state(), style=wx.BU_EXACTFIT)
self.summary_start.Bind(wx.EVT_BUTTON, self.toggle_mining)
self.summary_autostart = wx.CheckBox(summary_panel, -1)
self.summary_autostart.Bind(wx.EVT_CHECKBOX, self.toggle_autostart)
self.summary_autostart.SetValue(self.autostart)
return [
(self.summary_name, 0, wx.ALIGN_CENTER_HORIZONTAL),
(self.summary_status, 0, wx.ALIGN_CENTER_HORIZONTAL, 0),
(self.summary_shares_accepted, 0, wx.ALIGN_CENTER_HORIZONTAL, 0),
(self.summary_shares_invalid, 0, wx.ALIGN_CENTER_HORIZONTAL, 0),
(self.summary_start, 0, wx.ALIGN_CENTER, 0),
(self.summary_autostart, 0, wx.ALIGN_CENTER, 0)
]
def show_this_panel(self, event):
"""Set focus to this panel."""
self.parent.SetSelection(self.parent.GetPageIndex(self))
def toggle_autostart(self, event):
self.autostart = event.IsChecked()
def toggle_mining(self, event):
"""Stop or start the miner."""
if self.is_mining:
self.stop_mining()
else:
self.start_mining()
self.update_summary()
#############################
# Begin backend specific code
def configure_subprocess_poclbm(self):
"""Set up the command line for poclbm."""
folder = get_module_path()
if USE_MOCK:
executable = "python mockBitcoinMiner.py"
else:
if hasattr(sys, 'frozen'):
executable = "poclbm.app/Contents/MacOS/poclbm"
else:
executable = "python poclbm.py"
cmd = "%s %s:%s@%s:%s --device=%d --platform=%d --verbose %s" % (
executable,
self.txt_username.GetValue(),
self.txt_pass.GetValue(),
self.txt_host.GetValue(),
self.txt_port.GetValue(),
self.device_index,
self.platform_index,
self.txt_flags.GetValue()
)
return cmd, folder
def configure_subprocess_rpcminer(self):
"""Set up the command line for rpcminer.
The hostname must start with http:// for these miners.
"""
cmd = "%s -user=%s -password=%s -url=%s:%s %s" % (
self.external_path,
self.txt_username.GetValue(),
self.txt_pass.GetValue(),
self.host_with_http_prefix,
self.txt_port.GetValue(),
self.txt_flags.GetValue()
)
return cmd, os.path.dirname(self.external_path)
def configure_subprocess_ufasoft(self):
"""Set up the command line for ufasoft's SSE2 miner.
The hostname must start with http:// for these miners.
"""
cmd = "%s -u %s -p %s -o %s:%s %s" % (
self.external_path,
self.txt_username.GetValue(),
self.txt_pass.GetValue(),
self.host_with_http_prefix,
self.txt_port.GetValue(),
self.txt_flags.GetValue())
return cmd, os.path.dirname(self.external_path)
def configure_subprocess_phoenix(self):
"""Set up the command line for phoenix miner."""
path = self.external_path
if path.endswith('.py'):
path = "python " + path
cmd = "%s -u http://%s:%s@%s:%s PLATFORM=%d DEVICE=%d %s" % (
path,
self.txt_username.GetValue(),
self.txt_pass.GetValue(),
self.host_without_http_prefix,
self.txt_port.GetValue(),
self.platform_index,
self.device_index,
self.txt_flags.GetValue())
return cmd, os.path.dirname(self.external_path)
def configure_subprocess_cgminer(self):
"""Set up the command line for cgminer."""
path = self.external_path
if path.endswith('.py'):
path = "python " + path
# Command line arguments for cgminer here:
# -u <username>
# -p <password>
# -o <http://server.ip:port>
# -d <device appear in pyopencl>
# -l <log message period in second>
# -T <disable curses interface and output to console (stdout)>
cmd = "%s -u %s -p %s -o http://%s:%s -d %s -l 1 -T %s" % (
path,
self.txt_username.GetValue(),
self.txt_pass.GetValue(),
self.host_without_http_prefix,
self.txt_port.GetValue(),
self.device_index,
self.txt_flags.GetValue())
return cmd, os.path.dirname(self.external_path)
# End backend specific code
###########################
def start_mining(self):
"""Launch a miner subprocess and attach a MinerListenerThread."""
self.is_paused = False
# Avoid showing a console window when frozen
try: import win32process
except ImportError: flags = 0
else: flags = win32process.CREATE_NO_WINDOW
# Determine what command line arguments to use
listener_cls = MinerListenerThread
if not self.is_external_miner:
conf_func = self.configure_subprocess_poclbm
elif "rpcminer" in self.external_path:
conf_func = self.configure_subprocess_rpcminer
elif "bitcoin-miner" in self.external_path:
conf_func = self.configure_subprocess_ufasoft
elif "phoenix" in self.external_path:
conf_func = self.configure_subprocess_phoenix
listener_cls = PhoenixListenerThread
elif "cgminer" in self.external_path:
conf_func = self.configure_subprocess_cgminer
listener_cls = CgListenerThread
else:
raise ValueError # TODO: handle unrecognized miner
cmd, cwd = conf_func()
# for ufasoft:
# redirect stderr to stdout
# use universal_newlines to catch the \r output on Mhash/s lines
try:
logger.debug(_('Running command: ') + cmd)
# for cgminer:
# We need only the STDOUT for meaningful messages.
if conf_func == self.configure_subprocess_cgminer:
self.miner = subprocess.Popen(cmd, cwd=cwd,
stdout=subprocess.PIPE,
stderr=None,
universal_newlines=True,
creationflags=flags,
shell=(sys.platform != 'win32'))
else:
self.miner = subprocess.Popen(cmd, cwd=cwd,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
universal_newlines=True,
creationflags=flags,
shell=(sys.platform != 'win32'))
except OSError:
raise #TODO: the folder or exe could not exist
self.miner_listener = listener_cls(self, self.miner)
self.miner_listener.daemon = True
self.miner_listener.start()
self.is_mining = True
self.set_status(STR_STARTING, 1)
self.start.SetLabel(self.get_start_label())
try:
set_process_affinity(self.miner.pid, self.affinity_mask)
except:
pass # TODO: test on Linux
def on_close(self):
"""Prepare to close gracefully."""
self.stop_mining()
self.balance_refresh_timer.Stop()
def stop_mining(self):
"""Terminate the poclbm process if able and its associated listener."""
if self.miner is not None:
if self.miner.returncode is None:
# It didn't return yet so it's still running.
try:
self.miner.terminate()
except OSError:
pass # TODO: Guess it wasn't still running?
self.miner = None
if self.miner_listener is not None:
self.miner_listener.shutdown_event.set()
self.miner_listener = None
self.is_mining = False
self.is_paused = False
self.set_status(STR_STOPPED, 1)
self.start.SetLabel(self.get_start_label())
def update_khash(self, rate):
"""Update our rate according to a report from the listener thread.
If we are receiving rate messages then it means poclbm is no longer
reporting errors.
"""
self.last_rate = rate
self.set_status(format_khash(rate), 1)
if self.is_possible_error:
self.update_statusbar()
self.is_possible_error = False
def update_statusbar(self):
"""Show the shares or equivalent on the statusbar."""
if self.is_solo:
text = _("Difficulty 1 hashes: %(nhashes)d %(update_time)s") % \
dict(nhashes=self.accepted_shares,
update_time=self.format_last_update_time())
if self.solo_blocks_found > 0:
block_text = _("Blocks: %d, ") % self.solo_blocks_found
text = block_text + text
else:
text = _("Shares: %d accepted") % self.accepted_shares
if self.invalid_shares > 0:
text += _(", %d stale/invalid") % self.invalid_shares
text += " %s" % self.format_last_update_time()
self.set_status(text, 0)
def update_last_time(self, accepted):
"""Set the last update time to now (in local time)."""
now = time.time()
if accepted:
self.accepted_times.append(now)
while now - self.accepted_times[0] > SAMPLE_TIME_SECS:
self.accepted_times.popleft()
else:
self.invalid_times.append(now)
while now - self.invalid_times[0] > SAMPLE_TIME_SECS:
self.invalid_times.popleft()
def format_last_update_time(self):
"""Format last update time for display."""
time_fmt = '%I:%M:%S%p'
if self.last_update_time is None:
return ""
return _("- last at %s") % time.strftime(time_fmt, self.last_update_time)
def update_shares(self, accepted):
"""Update our shares with a report from the listener thread."""
if self.is_solo and accepted:
self.solo_blocks_found += 1
elif accepted:
self.accepted_shares += 1
else:
self.invalid_shares += 1
self.update_last_time(accepted)
self.update_statusbar()
def update_status(self, msg):
"""Update our status with a report from the listener thread.
If we receive a message from poclbm we don't know how to interpret,
it's probably some kind of error state - in this case the best
thing to do is just show it to the user on the status bar.
"""
self.set_status(msg)
self.is_possible_error = True
def set_status(self, msg, index=0):
"""Set the current statusbar text, but only if we have focus."""
if self.parent.GetSelection() == self.parent.GetPageIndex(self):
self.statusbar.SetStatusText(msg, index)
def on_focus(self):
"""When we receive focus, update our status.
This ensures that when switching tabs, the statusbar always
shows the current tab's status.
"""
self.update_statusbar()
if self.is_mining:
self.update_khash(self.last_rate)
else:
self.set_status(STR_STOPPED, 1)
def get_taskbar_text(self):
"""Return text for the hover state of the taskbar."""
rate = format_khash(self.last_rate) if self.is_mining else STR_STOPPED
return "%s: %s" % (self.name, rate)
def update_solo(self):
"""Update our easy hashes with a report from the listener thread."""
self.accepted_shares += 1
self.update_last_time(True)
self.update_statusbar()
def on_select_server(self, event):
"""Update our info in response to a new server choice."""
new_server_name = self.server.GetValue()
new_server = self.get_server_by_field(new_server_name, 'name')
self.change_server(new_server)
def get_server_by_field(self, target_val, field):
"""Return the first server dict with the specified val, or {}."""
for s in self.servers:
if s.get(field) == target_val:
return s
return {}
def set_widgets_visible(self, widgets, show=False):
"""Show or hide each widget in widgets according to the show flag."""
for w in widgets:
if show:
w.Show()
else:
w.Hide()
def set_tooltips(self):
add_tooltip(self.server, _("Server to connect to. Different servers have different fees and features.\nCheck their websites for full information."))
add_tooltip(self.website, _("Website of the currently selected server. Click to visit."))
add_tooltip(self.device_listbox, _("Available OpenCL devices on your system."))
add_tooltip(self.txt_host, _("Host address, without http:// prefix."))
add_tooltip(self.txt_port, _("Server port. This is usually 8332."))
add_tooltip(self.txt_username, _("The miner's username.\nMay be different than your account username.\nExample: Kiv.GPU"))
add_tooltip(self.txt_pass, _("The miner's password.\nMay be different than your account password."))
add_tooltip(self.txt_flags, _("""Extra flags to pass to the miner.
For poclbm use -v -w 128 for dedicated mining, append -f 60 for desktop usage.
For cgminer use -I 8 or -I 9. Without any params for desktop usage."""))
for chk in self.affinity_chks:
add_tooltip(chk, _("CPU cores used for mining.\nUnchecking some cores can reduce high CPU usage in some systems."))
def reset_statistics(self):
"""Reset our share statistics to zero."""
self.solo_blocks_found = 0
self.accepted_shares = 0
self.accepted_times.clear()
self.invalid_shares = 0
self.invalid_times.clear()
self.update_statusbar()
def change_server(self, new_server):
"""Change the server to new_server, updating fields as needed."""
self.reset_statistics()
# Set defaults before we do server specific code
self.set_tooltips()
self.set_widgets_visible(self.all_widgets, True)
self.withdraw.Disable()
url = new_server.get('url', 'n/a')
self.website.SetLabel(url)
self.website.SetURL(url)
# Invalidate any previous auth token since it won't be valid for the
# new server.
self.balance_auth_token = ""
if 'host' in new_server:
self.txt_host.SetValue(new_server['host'])
if 'port' in new_server:
self.txt_port.SetValue(str(new_server['port']))
# Call server specific code.
host = new_server.get('host', "").lower()
if host == "api2.bitcoin.cz" or host == "mtred.com": self.layout_slush()
elif host == "bitpenny.dyndns.biz": self.layout_bitpenny()
elif host == "pit.deepbit.net": self.layout_deepbit()
elif host == "btcmine.com": self.layout_btcmine()
elif host == "rr.btcmp.com": self.layout_btcmp()
elif "btcguild.com" in host: self.layout_btcguild()
elif host == "bitcoin-server.de": self.layout_bitcoinserver
elif host == "pit.x8s.de": self.layout_x8s()
else: self.layout_default()
self.Layout()
self.update_tab_name()
def on_balance_cooldown_tick(self, event=None):
"""Each second, decrement the cooldown for refreshing balance."""
self.balance_cooldown_seconds -= 1
self.balance_refresh.SetLabel("%d..." % self.balance_cooldown_seconds)
if self.balance_cooldown_seconds <= 0:
self.balance_refresh_timer.Stop()
self.balance_refresh.Enable()
self.balance_refresh.SetLabel(STR_REFRESH_BALANCE)
def require_auth_token(self):
"""Prompt the user for an auth token if they don't have one already.
Set the result to self.balance_auth_token and return None.
"""
if self.balance_auth_token:
return
url = self.server_config.get('balance_token_url')
dialog = BalanceAuthRequest(self, url)
dialog.txt_token.SetFocus()
result = dialog.ShowModal()
dialog.Destroy()
if result == wx.ID_CANCEL:
return
self.balance_auth_token = dialog.get_value() # TODO: validate token?
def is_auth_token_rejected(self, response):
"""If the server rejected our token, reset auth_token and return True.
Otherwise, return False.
"""
if response.status in [401, 403]: # 401 Unauthorized or 403 Forbidden
# Token rejected by the server - reset their token so they'll be
# prompted again
self.balance_auth_token = ""
return True
return False
def request_balance_get(self, balance_auth_token, use_https=False):
"""Request our balance from the server via HTTP GET and auth token.
This method should be run in its own thread.
"""
response, data = http_request(
self.server_config['balance_host'],
"GET",
self.server_config["balance_url"] % balance_auth_token,
use_https=use_https
)
if self.is_auth_token_rejected(response):
data = _("Auth token rejected by server.")
elif not data:
data = STR_CONNECTION_ERROR
else:
try:
info = json.loads(data)
confirmed = (info.get('confirmed_reward') or
info.get('confirmed') or
info.get('balance') or
info.get('user', {}).get('confirmed_rewards') or
0)
unconfirmed = (info.get('unconfirmed_reward') or
info.get('unconfirmed') or
info.get('user', {}).get('unconfirmed_rewards') or
0)
if self.server_config.get('host') == "pit.deepbit.net":
ipa = info.get('ipa', False)
self.withdraw.Enable(ipa)
if self.server_config.get('host') == "rr.btcmp.com":
ipa = info.get('can_payout', False)
self.withdraw.Enable(ipa)
data = _("%s confirmed") % format_balance(confirmed)
if unconfirmed > 0:
data += _(", %s unconfirmed") % format_balance(unconfirmed)
except: # TODO: what exception here?
data = _("Bad response from server.")
wx.CallAfter(self.balance_amt.SetLabel, data)
def on_withdraw(self, event):
self.withdraw.Disable()
host = self.server_config.get('host')
if host == 'bitpenny.dyndns.biz':
self.withdraw_bitpenny()
elif host == 'pit.deepbit.net':
self.withdraw_deepbit()
elif host == 'rr.btcmp.com':
self.withdraw_btcmp()
def requires_auth_token(self, host):
"""Return True if the specified host requires an auth token for balance update."""
HOSTS_REQUIRING_AUTH_TOKEN = ["api2.bitcoin.cz",
"btcmine.com",
"pit.deepbit.net",
"pit.x8s.de",
"mtred.com",
"rr.btcmp.com",
"bitcoin-server.de"]
if host in HOSTS_REQUIRING_AUTH_TOKEN: return True
if "btcguild" in host: return True
return False
def requires_https(self, host):
"""Return True if the specified host requires HTTPs for balance update."""
return host == "mtred.com"
def on_balance_refresh(self, event=None):
"""Refresh the miner's balance from the server."""
host = self.server_config.get("host")
if self.requires_auth_token(host):
self.require_auth_token()
if not self.balance_auth_token: # They cancelled the dialog
return
try:
self.balance_auth_token.decode('ascii')
except UnicodeDecodeError:
return # Invalid characters in auth token
self.http_thread = threading.Thread(
target=self.request_balance_get,
args=(self.balance_auth_token,),
kwargs=dict(use_https=self.requires_https(host)))
self.http_thread.start()
elif host == 'bitpenny.dyndns.biz':
self.http_thread = threading.Thread(
target=self.request_payout_bitpenny, args=(False,))
self.http_thread.start()
self.balance_refresh.Disable()
self.balance_cooldown_seconds = 10
self.balance_refresh_timer.Start(1000)
#################################
# Begin server specific HTTP code
def withdraw_btcmp(self):
"""Launch a thread to withdraw from deepbit."""
self.require_auth_token()
if not self.balance_auth_token: # User refused to provide token
return
self.http_thread = threading.Thread(
target=self.request_payout_btcmp,
args=(self.balance_auth_token,))
self.http_thread.start()
def withdraw_deepbit(self):
"""Launch a thread to withdraw from deepbit."""
self.require_auth_token()
if not self.balance_auth_token: # User refused to provide token
return
self.http_thread = threading.Thread(
target=self.request_payout_deepbit,
args=(self.balance_auth_token,))
self.http_thread.start()
def withdraw_bitpenny(self):
self.http_thread = threading.Thread(
target=self.request_payout_bitpenny, args=(True,))
self.http_thread.start() # TODO: look at aliasing of this variable
def request_payout_btcmp(self, balance_auth_token):
"""Request payout from btcmp's server via HTTP POST."""
response, data = http_request(
self.server_config['balance_host'],
"GET",
self.server_config["payout_url"] % balance_auth_token,
use_https=False
)
if self.is_auth_token_rejected(response):
data = _("Auth token rejected by server.")
elif not data:
data = STR_CONNECTION_ERROR
else:
data = _("Withdraw OK")
wx.CallAfter(self.on_balance_received, data)
def request_payout_deepbit(self, balance_auth_token):
"""Request payout from deepbit's server via HTTP POST."""
post_params = dict(id=1,
method="request_payout")
response, data = http_request(
self.server_config['balance_host'],
"POST",
self.server_config['balance_url'] % balance_auth_token,
json.dumps(post_params),
{"Content-type": "application/json; charset=utf-8",
"User-Agent": USER_AGENT}
)
if self.is_auth_token_rejected(response):
data = _("Auth token rejected by server.")
elif not data:
data = STR_CONNECTION_ERROR
else:
data = _("Withdraw OK")
wx.CallAfter(self.on_balance_received, data)
def request_payout_bitpenny(self, withdraw):
"""Request our balance from BitPenny via HTTP POST.
If withdraw is True, also request a withdrawal.
"""
post_params = dict(a=self.txt_username.GetValue(), w=int(withdraw))
response, data = http_request(
self.server_config['balance_host'],
"POST",
self.server_config['balance_url'],
urllib.urlencode(post_params),
{"Content-type": "application/x-www-form-urlencoded"}
)
if self.is_auth_token_rejected(response):
data = _("Auth token rejected by server.")
elif not data:
data = STR_CONNECTION_ERROR
elif withdraw:
data = _("Withdraw OK")
wx.CallAfter(self.on_balance_received, data)
def on_balance_received(self, balance):
"""Set the balance in the GUI."""
try:
amt = float(balance)
except ValueError: # Response was some kind of error
self.balance_amt.SetLabel(balance)
else:
if amt > 0.1:
self.withdraw.Enable()
amt_str = format_balance(amt)
self.balance_amt.SetLabel(amt_str)
self.Layout()
# End server specific HTTP code
###############################
def set_name(self, name):
"""Set the label on this miner's tab to name."""
self.name = name
if self.summary_name:
self.summary_name.SetLabel(self.name)
self.update_tab_name()
def update_tab_name(self):
"""Update the tab name to reflect modified status."""
name = self.name
if self.is_modified:
name += "*"
page = self.parent.GetPageIndex(self)
if page != -1:
self.parent.SetPageText(page, name)
def check_if_modified(self, event):
"""Update the title of the tab to have an asterisk if we are modified."""
self.update_tab_name()
event.Skip()
def on_saved(self):
"""Update our last data after a save."""
self.last_data = self.get_data()
self.update_tab_name()
def layout_init(self):
"""Create the sizers for this frame and set up the external text.
Return the lowest row that is available.
"""
self.frame_sizer = wx.BoxSizer(wx.VERTICAL)
self.frame_sizer.Add((20, 10), 0, wx.EXPAND, 0)
self.inner_sizer = wx.GridBagSizer(10, 5)
self.button_sizer = wx.BoxSizer(wx.HORIZONTAL)
row = 0
if self.is_external_miner:
self.inner_sizer.Add(self.external_lbl, (row, 0), flag=LBL_STYLE)
self.inner_sizer.Add(self.txt_external, (row, 1), span=(1, 3), flag=wx.EXPAND)
row += 1
return row
def layout_server_and_website(self, row):
"""Lay out the server and website widgets in the specified row."""
self.inner_sizer.Add(self.server_lbl, (row, 0), flag=LBL_STYLE)
self.inner_sizer.Add(self.server, (row, 1), flag=wx.EXPAND)
self.inner_sizer.Add(self.website_lbl, (row, 2), flag=LBL_STYLE)
self.inner_sizer.Add(self.website, (row, 3), flag=wx.ALIGN_CENTER_VERTICAL)
def layout_host_and_port(self, row):
"""Lay out the host and port widgets in the specified row."""
self.inner_sizer.Add(self.host_lbl, (row, 0), flag=LBL_STYLE)
self.inner_sizer.Add(self.txt_host, (row, 1), flag=wx.EXPAND)
self.inner_sizer.Add(self.port_lbl, (row, 2), flag=LBL_STYLE)
self.inner_sizer.Add(self.txt_port, (row, 3), flag=wx.EXPAND)
def layout_user_and_pass(self, row):
"""Lay out the user and pass widgets in the specified row."""
self.inner_sizer.Add(self.user_lbl, (row, 0), flag=LBL_STYLE)
self.inner_sizer.Add(self.txt_username, (row, 1), flag=wx.EXPAND)
self.inner_sizer.Add(self.pass_lbl, (row, 2), flag=LBL_STYLE)
self.inner_sizer.Add(self.txt_pass, (row, 3), flag=wx.EXPAND)
def layout_device_and_flags(self, row):
"""Lay out the device and flags widgets in the specified row.
Hide the device dropdown if RPCMiner is present since it doesn't use it.
"""
device_visible = self.is_device_visible
self.set_widgets_visible([self.device_lbl, self.device_listbox], device_visible)
if device_visible:
self.inner_sizer.Add(self.device_lbl, (row, 0), flag=LBL_STYLE)
self.inner_sizer.Add(self.device_listbox, (row, 1), flag=wx.EXPAND)
col = 2 * (device_visible)
self.inner_sizer.Add(self.flags_lbl, (row, col), flag=LBL_STYLE)
span = (1, 1) if device_visible else (1, 4)
self.inner_sizer.Add(self.txt_flags, (row, col + 1), span=span, flag=wx.EXPAND)
def layout_affinity(self, row):
"""Lay out the affinity checkboxes in the specified row."""
self.inner_sizer.Add(self.affinity_lbl, (row, 0))
affinity_sizer = wx.BoxSizer(wx.HORIZONTAL)
for chk in self.affinity_chks:
affinity_sizer.Add(chk)
self.inner_sizer.Add(affinity_sizer, (row, 1))
def layout_balance(self, row):
"""Lay out the balance widgets in the specified row."""
self.inner_sizer.Add(self.balance_lbl, (row, 0), flag=LBL_STYLE)
self.inner_sizer.Add(self.balance_amt, (row, 1))
def layout_finish(self):
"""Lay out the buttons and fit the sizer to the window."""
self.frame_sizer.Add(self.inner_sizer, 1, wx.EXPAND | wx.LEFT | wx.RIGHT, 10)
self.frame_sizer.Add(self.button_sizer, 0, wx.ALIGN_CENTER_HORIZONTAL)
self.inner_sizer.AddGrowableCol(1)
self.inner_sizer.AddGrowableCol(3)
for btn in [self.start, self.balance_refresh, self.withdraw]:
self.button_sizer.Add(btn, 0, BTN_STYLE, 5)
self.set_widgets_visible([self.external_lbl, self.txt_external],
self.is_external_miner)
self.SetSizerAndFit(self.frame_sizer)
def layout_default(self):
"""Lay out a default miner with no custom changes."""
self.user_lbl.SetLabel(STR_USERNAME)
self.set_widgets_visible(self.hidden_widgets, False)
self.set_widgets_visible([self.balance_lbl,
self.balance_amt,
self.balance_refresh,
self.withdraw], False)
row = self.layout_init()
self.layout_server_and_website(row=row)
customs = ["other", "solo"]
is_custom = self.server.GetStringSelection().lower() in customs
if is_custom:
self.layout_host_and_port(row=row + 1)
else:
self.set_widgets_visible([self.host_lbl, self.txt_host,
self.port_lbl, self.txt_port], False)
self.layout_user_and_pass(row=row + 1 + int(is_custom))
self.layout_device_and_flags(row=row + 2 + int(is_custom))
self.layout_affinity(row=row + 3 + int(is_custom))
self.layout_finish()
############################
# Begin server specific code
def layout_bitpenny(self):
"""BitPenny doesn't require registration or a password.
The username is just their receiving address.
"""
invisible = [self.txt_pass, self.txt_host, self.txt_port,
self.pass_lbl, self.host_lbl, self.port_lbl]
self.set_widgets_visible(invisible, False)
self.set_widgets_visible([self.extra_info], True)
row = self.layout_init()
self.layout_server_and_website(row=row)
self.inner_sizer.Add(self.user_lbl, (row + 1, 0), flag=LBL_STYLE)
self.inner_sizer.Add(self.txt_username, (row + 1, 1), span=(1, 3), flag=wx.EXPAND)
self.layout_device_and_flags(row=row + 2)
self.layout_affinity(row=row + 3)
self.layout_balance(row=row + 4)
self.inner_sizer.Add(self.extra_info, (row + 5, 0), span=(1, 4), flag=wx.ALIGN_CENTER_HORIZONTAL)
self.layout_finish()
self.extra_info.SetLabel(_("No registration is required - just enter an address and press Start."))
self.txt_pass.SetValue('poclbm-gui')
self.user_lbl.SetLabel(_("Address:"))
add_tooltip(self.txt_username,
_("Your receiving address for Bitcoins.\nE.g.: 1A94cjRpaPBMV9ZNWFihB5rTFEeihBALgc"))
def layout_slush(self):
"""Slush's pool uses a separate username for each miner."""
self.set_widgets_visible([self.host_lbl, self.txt_host,
self.port_lbl, self.txt_port,
self.withdraw, self.extra_info], False)
row = self.layout_init()
self.layout_server_and_website(row=row)
self.layout_user_and_pass(row=row + 1)
self.layout_device_and_flags(row=row + 2)
self.layout_affinity(row=row + 3)
self.layout_balance(row=row + 4)
self.layout_finish()
add_tooltip(self.txt_username,
_("Your miner username (not your account username).\nExample: Kiv.GPU"))
add_tooltip(self.txt_pass,
_("Your miner password (not your account password)."))
def layout_btcguild(self):
"""BTC Guild has the same layout as slush for now."""
self.layout_slush()
def layout_bitcoinserver(self):
"""Bitcoin-Server.de has the same layout as slush for now."""
self.layout_slush()
def layout_btcmine(self):
self.set_widgets_visible([self.host_lbl, self.txt_host,
self.port_lbl, self.txt_port,
self.withdraw, self.extra_info], False)
row = self.layout_init()
self.layout_server_and_website(row=row)
self.layout_user_and_pass(row=row + 1)
self.layout_device_and_flags(row=row + 2)
self.layout_affinity(row=row + 3)
self.layout_balance(row=row + 4)
self.layout_finish()
add_tooltip(self.txt_username,
_("Your miner username. \nExample: kiv123@kiv123"))
add_tooltip(self.txt_pass,
_("Your miner password (not your account password)."))
def layout_deepbit(self):
"""Deepbit uses an email address for a username."""
self.set_widgets_visible([self.host_lbl, self.txt_host,
self.port_lbl, self.txt_port,
self.extra_info], False)
row = self.layout_init()
self.layout_server_and_website(row=row)
self.layout_user_and_pass(row=row + 1)
self.layout_device_and_flags(row=row + 2)
self.layout_affinity(row=row + 3)
self.layout_balance(row=row + 4)
self.layout_finish()
add_tooltip(self.txt_username,
_("The e-mail address you registered with."))
self.user_lbl.SetLabel(_("Email:"))
def layout_btcmp(self):
"""Deepbit uses an email address for a username."""
self.set_widgets_visible([self.host_lbl, self.txt_host,
self.port_lbl, self.txt_port,
self.extra_info], False)
row = self.layout_init()
self.layout_server_and_website(row=row)
self.layout_user_and_pass(row=row + 1)
self.layout_device_and_flags(row=row + 2)
self.layout_affinity(row=row + 3)
self.layout_balance(row=row + 4)
self.layout_finish()
add_tooltip(self.txt_username,
_("Your worker name. Is something in the form of username.workername"))
self.user_lbl.SetLabel(_("Workername:"))
def layout_x8s(self):
"""x8s has the same layout as slush for now."""
self.layout_slush()
# End server specific code
##########################
class GUIMiner(wx.Frame):
def __init__(self, *args, **kwds):
wx.Frame.__init__(self, *args, **kwds)
style = fnb.FNB_X_ON_TAB | fnb.FNB_FF2 | fnb.FNB_HIDE_ON_SINGLE_TAB
self.nb = fnb.FlatNotebook(self, -1, style=style)
# Set up notebook context menu
notebook_menu = wx.Menu()
ID_RENAME, ID_DUPLICATE = wx.NewId(), wx.NewId()
notebook_menu.Append(ID_RENAME, _("&Rename..."), _("Rename this miner"))
notebook_menu.Append(ID_DUPLICATE, _("&Duplicate...", _("Duplicate this miner")))
self.nb.SetRightClickMenu(notebook_menu)
self.Bind(wx.EVT_MENU, self.rename_miner, id=ID_RENAME)
self.Bind(wx.EVT_MENU, self.duplicate_miner, id=ID_DUPLICATE)
self.console_panel = None
self.summary_panel = None
# Servers and defaults are required, it's a fatal error not to have
# them.
server_config_path = os.path.join(get_module_path(), 'servers.ini')
with open(server_config_path) as f:
data = json.load(f)
self.servers = data.get('servers')
defaults_config_path = os.path.join(get_module_path(), 'defaults.ini')
with open(defaults_config_path) as f:
self.defaults = json.load(f)
self.parse_config()
self.do_show_opencl_warning = self.config_data.get('show_opencl_warning', True)
self.console_max_lines = self.config_data.get('console_max_lines', 5000)
ID_NEW_EXTERNAL, ID_NEW_PHOENIX, ID_NEW_CGMINER, ID_NEW_CUDA, ID_NEW_UFASOFT = wx.NewId(), wx.NewId(), wx.NewId(), wx.NewId(), wx.NewId()
self.menubar = wx.MenuBar()
file_menu = wx.Menu()
new_menu = wx.Menu()
new_menu.Append(wx.ID_NEW, _("&New OpenCL miner..."), _("Create a new OpenCL miner (default for ATI cards)"), wx.ITEM_NORMAL)
new_menu.Append(ID_NEW_PHOENIX, _("New Phoenix miner..."), _("Create a new Phoenix miner (for some ATI cards)"), wx.ITEM_NORMAL)
new_menu.Append(ID_NEW_CGMINER, _("New CG miner..."), _("Create a new CGMiner (for some ATI cards)"), wx.ITEM_NORMAL)
new_menu.Append(ID_NEW_CUDA, _("New CUDA miner..."), _("Create a new CUDA miner (for NVIDIA cards)"), wx.ITEM_NORMAL)
new_menu.Append(ID_NEW_UFASOFT, _("New Ufasoft CPU miner..."), _("Create a new Ufasoft miner (for CPUs)"), wx.ITEM_NORMAL)
new_menu.Append(ID_NEW_EXTERNAL, _("New &other miner..."), _("Create a new custom miner (requires external program)"), wx.ITEM_NORMAL)
file_menu.AppendMenu(wx.NewId(), _('&New miner'), new_menu)
file_menu.Append(wx.ID_SAVE, _("&Save settings"), _("Save your settings"), wx.ITEM_NORMAL)
file_menu.Append(wx.ID_OPEN, _("&Load settings"), _("Load stored settings"), wx.ITEM_NORMAL)
file_menu.Append(wx.ID_EXIT, _("Quit"), STR_QUIT, wx.ITEM_NORMAL)
self.menubar.Append(file_menu, _("&File"))
ID_SUMMARY, ID_CONSOLE = wx.NewId(), wx.NewId()
view_menu = wx.Menu()
view_menu.Append(ID_SUMMARY, _("Show summary"), _("Show summary of all miners"), wx.ITEM_NORMAL)
view_menu.Append(ID_CONSOLE, _("Show console"), _("Show console logs"), wx.ITEM_NORMAL)
self.menubar.Append(view_menu, _("&View"))
ID_SOLO, ID_PATHS, ID_LAUNCH = wx.NewId(), wx.NewId(), wx.NewId()
solo_menu = wx.Menu()
solo_menu.Append(ID_SOLO, _("&Create solo password..."), _("Configure a user/pass for solo mining"), wx.ITEM_NORMAL)
solo_menu.Append(ID_PATHS, _("&Set Bitcoin client path..."), _("Set the location of the official Bitcoin client"), wx.ITEM_NORMAL)
solo_menu.Append(ID_LAUNCH, _("&Launch Bitcoin client as server"), _("Launch the official Bitcoin client as a server for solo mining"), wx.ITEM_NORMAL)
self.menubar.Append(solo_menu, _("&Solo utilities"))
ID_START_MINIMIZED = wx.NewId()
self.options_menu = wx.Menu()
self.start_minimized_chk = self.options_menu.Append(ID_START_MINIMIZED, _("Start &minimized"), _("Start the GUI minimized to the tray."), wx.ITEM_CHECK)
self.options_menu.Check(ID_START_MINIMIZED, self.config_data.get('start_minimized', False))
self.menubar.Append(self.options_menu, _("&Options"))
ID_CHANGE_LANGUAGE = wx.NewId()
lang_menu = wx.Menu()
lang_menu.Append(ID_CHANGE_LANGUAGE, _("&Change language..."), "", wx.ITEM_NORMAL)
self.menubar.Append(lang_menu, _("Language"))
ID_DONATE_SMALL = wx.NewId()
donate_menu = wx.Menu()
donate_menu.Append(ID_DONATE_SMALL, _("&Donate..."), _("Donate Bitcoins to support GUIMiner development"))
self.menubar.Append(donate_menu, _("&Donate"))
help_menu = wx.Menu()
help_menu.Append(wx.ID_ABOUT, _("&About..."), STR_ABOUT, wx.ITEM_NORMAL)
self.menubar.Append(help_menu, _("&Help"))
self.SetMenuBar(self.menubar)
self.statusbar = self.CreateStatusBar(2, 0)
try:
self.bitcoin_executable = os.path.join(os.getenv("PROGRAMFILES"), "Bitcoin", "bitcoin.exe")
except:
self.bitcoin_executable = "" # TODO: where would Bitcoin probably be on Linux/Mac?
try:
self.tbicon = GUIMinerTaskBarIcon(self)
except:
logging.error(_("Failed to load taskbar icon; continuing."))
self.set_properties()
try:
self.devices = get_opencl_devices()
except:
self.devices = []
file_menu.Enable(wx.ID_NEW, False)
file_menu.SetHelpString(wx.ID_NEW, _("OpenCL not found - can't add a OpenCL miner"))
if self.do_show_opencl_warning:
dialog = OpenCLWarningDialog(self)
dialog.ShowModal()
self.do_show_opencl_warning = not dialog.is_box_checked()
self.Bind(wx.EVT_MENU, self.name_new_profile, id=wx.ID_NEW)
self.Bind(wx.EVT_MENU, self.new_phoenix_profile, id=ID_NEW_PHOENIX)
self.Bind(wx.EVT_MENU, self.new_cgminer_profile, id=ID_NEW_CGMINER)
self.Bind(wx.EVT_MENU, self.new_ufasoft_profile, id=ID_NEW_UFASOFT)
self.Bind(wx.EVT_MENU, self.new_cuda_profile, id=ID_NEW_CUDA)
self.Bind(wx.EVT_MENU, self.new_external_profile, id=ID_NEW_EXTERNAL)
self.Bind(wx.EVT_MENU, self.save_config, id=wx.ID_SAVE)
self.Bind(wx.EVT_MENU, self.load_config, id=wx.ID_OPEN)
self.Bind(wx.EVT_MENU, self.on_menu_exit, id=wx.ID_EXIT)
self.Bind(wx.EVT_MENU, self.set_official_client_path, id=ID_PATHS)
self.Bind(wx.EVT_MENU, self.show_console, id=ID_CONSOLE)
self.Bind(wx.EVT_MENU, self.show_summary, id=ID_SUMMARY)
self.Bind(wx.EVT_MENU, self.show_about_dialog, id=wx.ID_ABOUT)
self.Bind(wx.EVT_MENU, self.create_solo_password, id=ID_SOLO)
self.Bind(wx.EVT_MENU, self.launch_solo_server, id=ID_LAUNCH)
self.Bind(wx.EVT_MENU, self.on_change_language, id=ID_CHANGE_LANGUAGE)
self.Bind(wx.EVT_MENU, self.on_donate, id=ID_DONATE_SMALL)
self.Bind(wx.EVT_CLOSE, self.on_close)
self.Bind(wx.EVT_ICONIZE, self.on_iconize)
self.Bind(fnb.EVT_FLATNOTEBOOK_PAGE_CLOSING, self.on_page_closing)
self.Bind(fnb.EVT_FLATNOTEBOOK_PAGE_CLOSED, self.on_page_closed)
self.Bind(fnb.EVT_FLATNOTEBOOK_PAGE_CHANGED, self.on_page_changed)
self.load_config()
self.do_layout()
if not self.start_minimized_chk.IsChecked():
self.Show()
def on_iconize(self, event):
if event.Iconized() and sys.platform == 'win32':
self.Hide() # On minimize, hide from taskbar.
else:
self.Show()
def set_properties(self):
self.SetIcons(get_icon_bundle())
self.SetTitle(_("GUIMiner - v%s") % __version__)
self.statusbar.SetStatusWidths([-1, 125])
statusbar_fields = ["", STR_NOT_STARTED]
for i in range(len(statusbar_fields)):
self.statusbar.SetStatusText(statusbar_fields[i], i)
def do_layout(self):
self.vertical_sizer = wx.BoxSizer(wx.VERTICAL)
self.vertical_sizer.Add(self.nb, 1, wx.EXPAND, 20)
self.SetSizer(self.vertical_sizer)
self.vertical_sizer.SetSizeHints(self)
self.SetSizerAndFit(self.vertical_sizer)
self.Layout()
@property
def profile_panels(self):
"""Return a list of currently available MinerTab."""
pages = [self.nb.GetPage(i) for i in range(self.nb.GetPageCount())]
return [p for p in pages if
p != self.console_panel and p != self.summary_panel]
def add_profile(self, data={}):
"""Add a new MinerTab to the list of tabs."""
panel = MinerTab(self.nb, -1, self.devices, self.servers,
self.defaults, self.statusbar, data)
self.nb.AddPage(panel, panel.name)
# The newly created profile should have focus.
self.nb.EnsureVisible(self.nb.GetPageCount() - 1)
if self.summary_panel is not None:
self.summary_panel.add_miners_to_grid() # Show new entry on summary
return panel
def message(self, *args, **kwargs):
"""Utility method to show a message dialog and return their choice."""
dialog = wx.MessageDialog(self, *args, **kwargs)
retval = dialog.ShowModal()
dialog.Destroy()
return retval
def name_new_profile(self, event=None, extra_profile_data={}):
"""Prompt for the new miner's name."""
dialog = wx.TextEntryDialog(self, _("Name this miner:"), _("New miner"))
if dialog.ShowModal() == wx.ID_OK:
name = dialog.GetValue().strip()
if not name: name = _("Untitled")
data = extra_profile_data.copy()
data['name'] = name
self.add_profile(data)
def new_external_profile(self, event):
"""Prompt for an external miner path, then create a miner.
On Windows we validate against legal miners; on Linux they can pick
whatever they want.
"""
wildcard = _('External miner (*.exe)|*.exe|(*.py)|*.py') if sys.platform == 'win32' else '*.*'
dialog = wx.FileDialog(self,
_("Select external miner:"),
defaultDir=os.path.join(get_module_path(), 'miners'),
defaultFile="",
wildcard=wildcard,
style=wx.OPEN)
if dialog.ShowModal() != wx.ID_OK:
return
if sys.platform == 'win32' and dialog.GetFilename() not in SUPPORTED_BACKENDS:
self.message(
_("Unsupported external miner %(filename)s. Supported are: %(supported)s") % \
dict(filename=dialog.GetFilename(), supported='\n'.join(SUPPORTED_BACKENDS)),
_("Miner not supported"), wx.OK | wx.ICON_ERROR)
return
path = os.path.join(dialog.GetDirectory(), dialog.GetFilename())
dialog.Destroy()
self.name_new_profile(extra_profile_data=dict(external_path=path))
def new_phoenix_profile(self, event):
"""Create a new miner using the Phoenix OpenCL miner backend."""
path = os.path.join(get_module_path(), 'phoenix.exe')
self.name_new_profile(extra_profile_data=dict(external_path=path))
def new_cgminer_profile(self, event):
"""Create a new miner using the Cgminer OpenCL miner backend."""
path = os.path.join(get_module_path(), 'cgminer.exe')
self.name_new_profile(extra_profile_data=dict(external_path=path))
def new_ufasoft_profile(self, event):
"""Create a new miner using the Ufasoft CPU miner backend."""
path = os.path.join(get_module_path(), 'miners', 'ufasoft', 'bitcoin-miner.exe')
self.name_new_profile(extra_profile_data=dict(external_path=path))
def new_cuda_profile(self, event):
"""Create a new miner using the CUDA GPU miner backend."""
path = os.path.join(get_module_path(), 'miners', 'puddinpop', 'rpcminer-cuda.exe')
self.name_new_profile(extra_profile_data=dict(external_path=path))
def get_storage_location(self):
"""Get the folder and filename to store our JSON config."""
if sys.platform == 'win32':
folder = os.path.join(os.environ['AppData'], 'poclbm')
config_filename = os.path.join(folder, 'poclbm.ini')
else: # Assume linux? TODO test
folder = os.environ['HOME']
config_filename = os.path.join(folder, '.poclbm')
return folder, config_filename
def on_close(self, event):
"""Minimize to tray if they click "close" but exit otherwise.
On closing, stop any miners that are currently working.
"""
if event.CanVeto():
self.Hide()
event.Veto()
else:
if any(p.is_modified for p in self.profile_panels):
dialog = wx.MessageDialog(self, _('Do you want to save changes?'), _('Save'),
wx.YES_NO | wx.YES_DEFAULT | wx.ICON_QUESTION)
retval = dialog.ShowModal()
dialog.Destroy()
if retval == wx.ID_YES:
self.save_config()
if self.console_panel is not None:
self.console_panel.on_close()
if self.summary_panel is not None:
self.summary_panel.on_close()
for p in self.profile_panels:
p.on_close()
if self.tbicon is not None:
self.tbicon.RemoveIcon()
self.tbicon.timer.Stop()
self.tbicon.Destroy()
event.Skip()
def save_config(self, event=None):
"""Save the current miner profiles to our config file in JSON format."""
folder, config_filename = self.get_storage_location()
mkdir_p(folder)
profile_data = [p.get_data() for p in self.profile_panels]
config_data = dict(show_console=self.is_console_visible(),
show_summary=self.is_summary_visible(),
profiles=profile_data,
bitcoin_executable=self.bitcoin_executable,
show_opencl_warning=self.do_show_opencl_warning,
start_minimized=self.start_minimized_chk.IsChecked(),
console_max_lines=self.console_max_lines,
window_position=list(self.GetRect()))
logger.debug(_('Saving: ') + json.dumps(config_data))
try:
with open(config_filename, 'w') as f:
json.dump(config_data, f, indent=4)
except IOError:
self.message(
_("Couldn't write save file %s.\nCheck the location is writable.") % config_filename,
_("Save unsuccessful"), wx.OK | wx.ICON_ERROR)
else:
self.message(_("Profiles saved OK to %s.") % config_filename,
_("Save successful"), wx.OK | wx.ICON_INFORMATION)
for p in self.profile_panels:
p.on_saved()
def parse_config(self):
"""Set self.config_data to a dictionary of config values."""
self.config_data = {}
try:
config_filename = self.get_storage_location()[1]
if os.path.exists(config_filename):
with open(config_filename) as f:
self.config_data.update(json.load(f))
logger.debug(_('Loaded: %s') % json.dumps(self.config_data))
except ValueError:
self.message(
_("Your settings saved at:\n %s\nare corrupt or could not be read.\nDeleting this file or saving over it may solve the problem." % config_filename),
_("Error"), wx.ICON_ERROR)
def load_config(self, event=None):
"""Load JSON profile info from the config file."""
self.parse_config()
config_data = self.config_data
executable = config_data.get('bitcoin_executable', None)
if executable is not None:
self.bitcoin_executable = executable
# Shut down any existing miners before they get clobbered
if(any(p.is_mining for p in self.profile_panels)):
result = self.message(
_("Loading profiles will stop any currently running miners. Continue?"),
_("Load profile"), wx.YES_NO | wx.NO_DEFAULT | wx.ICON_INFORMATION)
if result == wx.ID_NO:
return
for p in reversed(self.profile_panels):
p.on_close()
self.nb.DeletePage(self.nb.GetPageIndex(p))
# If present, summary should be the leftmost tab on startup.
if config_data.get('show_summary', False):
self.show_summary()
profile_data = config_data.get('profiles', [])
for d in profile_data:
self.add_profile(d)
if not any(profile_data):
self.add_profile() # Create a default one using defaults.ini
if config_data.get('show_console', False):
self.show_console()
window_position = config_data.get('window_position')
if window_position:
self.SetRect(window_position)
for p in self.profile_panels:
if p.autostart:
p.start_mining()
def set_official_client_path(self, event):
"""Set the path to the official Bitcoin client."""
wildcard = "bitcoin.exe" if sys.platform == 'win32' else '*.*'
dialog = wx.FileDialog(self,
_("Select path to Bitcoin.exe"),
defaultFile="bitcoin.exe",
wildcard=wildcard,
style=wx.OPEN)
if dialog.ShowModal() == wx.ID_OK:
path = os.path.join(dialog.GetDirectory(), dialog.GetFilename())
if os.path.exists(path):
self.bitcoin_executable = path
dialog.Destroy()
def show_about_dialog(self, event):
"""Show the 'about' dialog."""
dialog = AboutGuiminer(self, -1, _('About'))
dialog.ShowModal()
dialog.Destroy()
def on_page_closing(self, event):
"""Handle a tab closing event.
If they are closing a special panel, we have to shut it down.
If the tab has a miner running in it, we have to stop the miner
before letting the tab be removed.
"""
p = self.nb.GetPage(event.GetSelection())
if p == self.console_panel:
self.console_panel.on_close()
self.console_panel = None
event.Skip()
return
if p == self.summary_panel:
self.summary_panel.on_close()
self.summary_panel = None
event.Skip()
return
if p.is_mining:
result = self.message(
_("Closing this miner will stop it. Continue?"),
_("Close miner"),
wx.YES_NO | wx.NO_DEFAULT | wx.ICON_INFORMATION)
if result == wx.ID_NO:
event.Veto()
return
p.on_close()
event.Skip() # OK to close the tab now
def on_page_closed(self, event):
if self.summary_panel is not None:
self.summary_panel.add_miners_to_grid() # Remove miner summary
def on_page_changed(self, event):
"""Handle a tab change event.
Ensures the status bar shows the status of the tab that has focus.
"""
p = self.nb.GetPage(event.GetSelection())
p.on_focus()
def launch_solo_server(self, event):
"""Launch the official bitcoin client in server mode.
This allows poclbm to connect to it for mining solo.
"""
try:
subprocess.Popen(self.bitcoin_executable + " -server")
except OSError:
self.message(
_("Couldn't find Bitcoin at %s. Is your path set correctly?") % self.bitcoin_executable,
_("Launch failed"), wx.ICON_ERROR | wx.OK)
return
self.message(
_("The Bitcoin client will now launch in server mode.\nOnce it connects to the network and downloads the block chain, you can start a miner in 'solo' mode."),
_("Launched ok."),
wx.OK)
def create_solo_password(self, event):
"""Prompt the user for login credentials to the bitcoin client.
These are required to connect to the client over JSON-RPC and are
stored in 'bitcoin.conf'.
"""
if sys.platform == 'win32':
filename = os.path.join(os.getenv("APPDATA"), "Bitcoin", "bitcoin.conf")
else: # Assume Linux for now TODO test
filename = os.path.join(os.getenv('HOME'), ".bitcoin")
if os.path.exists(filename):
result = self.message(
_("%s already exists. Overwrite?") % filename,
_("bitcoin.conf already exists."),
wx.YES_NO | wx.NO_DEFAULT | wx.ICON_INFORMATION)
if result == wx.ID_NO:
return
dialog = SoloPasswordRequest(self, _('Enter password'))
result = dialog.ShowModal()
dialog.Destroy()
if result == wx.ID_CANCEL:
return
with open(filename, "w") as f:
f.write('\nrpcuser=%s\nrpcpassword=%s\nrpcallowip=*' % dialog.get_value())
f.close()
self.message(_("Wrote bitcoin config ok."), _("Success"), wx.OK)
def is_console_visible(self):
"""Return True if the console is visible."""
return self.nb.GetPageIndex(self.console_panel) != -1
def show_console(self, event=None):
"""Show the console log in its own tab."""
if self.is_console_visible():
return # Console already shown
self.console_panel = ConsolePanel(self, self.console_max_lines)
self.nb.AddPage(self.console_panel, _("Console"))
self.nb.EnsureVisible(self.nb.GetPageCount() - 1)
def is_summary_visible(self):
"""Return True if the summary is visible."""
return self.nb.GetPageIndex(self.summary_panel) != -1
def show_summary(self, event=None):
"""Show the summary window in its own tab."""
if self.is_summary_visible():
return
self.summary_panel = SummaryPanel(self)
self.nb.AddPage(self.summary_panel, _("Summary"))
index = self.nb.GetPageIndex(self.summary_panel)
self.nb.SetSelection(index)
def on_menu_exit(self, event):
self.Close(force=True)
def rename_miner(self, event):
"""Change the name of a miner as displayed on the tab."""
p = self.nb.GetPage(self.nb.GetSelection())
if p not in self.profile_panels:
return
dialog = wx.TextEntryDialog(self, _("Rename to:"), _("Rename miner"))
if dialog.ShowModal() == wx.ID_OK:
p.set_name(dialog.GetValue().strip())
def duplicate_miner(self, event):
"""Duplicate the current miner to another miner."""
p = self.nb.GetPage(self.nb.GetSelection())
if p not in self.profile_panels:
return
self.name_new_profile(event=None, extra_profile_data=p.get_data())
def on_change_language(self, event):
dialog = ChangeLanguageDialog(self, _('Change language'), language)
result = dialog.ShowModal()
dialog.Destroy()
if result == wx.ID_CANCEL:
return
language_name = dialog.get_value()
update_language(LANGUAGES[language_name])
save_language()
def on_donate(self, event):
dialog = DonateDialog(self, -1, _('Donate'))
dialog.ShowModal()
dialog.Destroy()
class DonateDialog(wx.Dialog):
"""About dialog for the app with a donation address."""
DONATE_TEXT = "If this software helped you, please consider contributing to its development." \
"\nSend donations to: %(address)s"
def __init__(self, parent, id, title):
wx.Dialog.__init__(self, parent, id, title)
vbox = wx.BoxSizer(wx.VERTICAL)
text = DonateDialog.DONATE_TEXT % dict(address=DONATION_ADDRESS)
self.about_text = wx.StaticText(self, -1, text)
self.copy_btn = wx.Button(self, -1, _("Copy address to clipboard"))
vbox.Add(self.about_text, 0, wx.ALL, 10)
vbox.Add(self.copy_btn, 0, wx.ALL | wx.ALIGN_CENTER_HORIZONTAL, 10)
self.SetSizerAndFit(vbox)
self.copy_btn.Bind(wx.EVT_BUTTON, self.on_copy)
def on_copy(self, event):
"""Copy the donation address to the clipboard."""
if wx.TheClipboard.Open():
data = wx.TextDataObject()
data.SetText(DONATION_ADDRESS)
wx.TheClipboard.SetData(data)
wx.TheClipboard.Close()
class ChangeLanguageDialog(wx.Dialog):
"""Dialog prompting the user to change languages."""
def __init__(self, parent, title, current_language):
style = wx.DEFAULT_DIALOG_STYLE
vbox = wx.BoxSizer(wx.VERTICAL)
wx.Dialog.__init__(self, parent, -1, title, style=style)
self.lbl = wx.StaticText(self, -1,
_("Choose language (requires restart to take full effect)"))
vbox.Add(self.lbl, 0, wx.ALL, 10)
self.language_choices = wx.ComboBox(self, -1,
choices=sorted(LANGUAGES.keys()),
style=wx.CB_READONLY)
self.language_choices.SetStringSelection(LANGUAGES_REVERSE[current_language])
vbox.Add(self.language_choices, 0, wx.ALL, 10)
buttons = self.CreateButtonSizer(wx.OK | wx.CANCEL)
vbox.Add(buttons, 0, wx.ALL | wx.ALIGN_CENTER_HORIZONTAL, 10)
self.SetSizerAndFit(vbox)
def get_value(self):
return self.language_choices.GetStringSelection()
class SoloPasswordRequest(wx.Dialog):
"""Dialog prompting user for login credentials for solo mining."""
def __init__(self, parent, title):
style = wx.DEFAULT_DIALOG_STYLE
vbox = wx.BoxSizer(wx.VERTICAL)
wx.Dialog.__init__(self, parent, -1, title, style=style)
self.user_lbl = wx.StaticText(self, -1, STR_USERNAME)
self.txt_username = wx.TextCtrl(self, -1, "")
self.pass_lbl = wx.StaticText(self, -1, STR_PASSWORD)
self.txt_pass = wx.TextCtrl(self, -1, "", style=wx.TE_PASSWORD)
grid_sizer_1 = wx.FlexGridSizer(2, 2, 5, 5)
grid_sizer_1.Add(self.user_lbl, 0, wx.ALIGN_RIGHT | wx.ALIGN_CENTER_VERTICAL, 0)
grid_sizer_1.Add(self.txt_username, 0, wx.EXPAND, 0)
grid_sizer_1.Add(self.pass_lbl, 0, wx.ALIGN_RIGHT | wx.ALIGN_CENTER_VERTICAL, 0)
grid_sizer_1.Add(self.txt_pass, 0, wx.EXPAND, 0)
buttons = self.CreateButtonSizer(wx.OK | wx.CANCEL)
vbox.Add(grid_sizer_1, wx.EXPAND | wx.ALL, 10)
vbox.Add(buttons)
self.SetSizerAndFit(vbox)
def get_value(self):
"""Return the (username, password) supplied by the user."""
return self.txt_username.GetValue(), self.txt_pass.GetValue()
class BalanceAuthRequest(wx.Dialog):
"""Dialog prompting user for an auth token to refresh their balance."""
instructions = \
_("""Click the link below to log in to the pool and get a special token.
This token lets you securely check your balance.
To remember this token for the future, save your miner settings.""")
def __init__(self, parent, url):
style = wx.DEFAULT_DIALOG_STYLE
vbox = wx.BoxSizer(wx.VERTICAL)
wx.Dialog.__init__(self, parent, -1, STR_REFRESH_BALANCE, style=style)
self.instructions = wx.StaticText(self, -1, BalanceAuthRequest.instructions)
self.website = hyperlink.HyperLinkCtrl(self, -1, url)
self.txt_token = wx.TextCtrl(self, -1, _("(Paste token here)"))
buttons = self.CreateButtonSizer(wx.OK | wx.CANCEL)
vbox.AddMany([
(self.instructions, 0, wx.ALL, 10),
(self.website, 0, wx.ALL | wx.ALIGN_CENTER_HORIZONTAL, 10),
(self.txt_token, 0, wx.EXPAND | wx.ALIGN_CENTER_HORIZONTAL, 10),
(buttons, 0, wx.ALL | wx.ALIGN_CENTER_HORIZONTAL, 10)
])
self.SetSizerAndFit(vbox)
def get_value(self):
"""Return the auth token supplied by the user."""
return self.txt_token.GetValue()
class AboutGuiminer(wx.Dialog):
"""About dialog for the app with a donation address."""
def __init__(self, parent, id, title):
wx.Dialog.__init__(self, parent, id, title)
vbox = wx.BoxSizer(wx.VERTICAL)
text = ABOUT_TEXT % dict(version=__version__,
address=DONATION_ADDRESS)
self.about_text = wx.StaticText(self, -1, text)
self.copy_btn = wx.Button(self, -1, _("Copy address to clipboard"))
vbox.Add(self.about_text)
vbox.Add(self.copy_btn, 0, wx.ALIGN_BOTTOM | wx.ALIGN_CENTER_HORIZONTAL, 0)
self.SetSizerAndFit(vbox)
self.copy_btn.Bind(wx.EVT_BUTTON, self.on_copy)
def on_copy(self, event):
"""Copy the donation address to the clipboard."""
if wx.TheClipboard.Open():
data = wx.TextDataObject()
data.SetText(DONATION_ADDRESS)
wx.TheClipboard.SetData(data)
wx.TheClipboard.Close()
class OpenCLWarningDialog(wx.Dialog):
"""Warning dialog when a user does not have OpenCL installed."""
def __init__(self, parent):
wx.Dialog.__init__(self, parent, -1, _("No OpenCL devices found."))
vbox = wx.BoxSizer(wx.VERTICAL)
self.message = wx.StaticText(self, -1,
_("""No OpenCL devices were found.
If you only want to mine using CPU or CUDA, you can ignore this message.
If you want to mine on ATI graphics cards, you may need to install the ATI Stream
SDK, or your GPU may not support OpenCL."""))
vbox.Add(self.message, 0, wx.ALL, 10)
hbox = wx.BoxSizer(wx.HORIZONTAL)
self.no_show_chk = wx.CheckBox(self, -1)
hbox.Add(self.no_show_chk)
self.no_show_txt = wx.StaticText(self, -1, _("Don't show this message again"))
hbox.Add((5, 0))
hbox.Add(self.no_show_txt)
vbox.Add(hbox, 0, wx.ALL, 10)
buttons = self.CreateButtonSizer(wx.OK)
vbox.Add(buttons, 0, wx.ALIGN_BOTTOM | wx.ALIGN_CENTER_HORIZONTAL, 0)
self.SetSizerAndFit(vbox)
def is_box_checked(self):
return self.no_show_chk.GetValue()
def run():
try:
frame_1 = GUIMiner(None, -1, "")
app.SetTopWindow(frame_1)
app.MainLoop()
except:
logging.exception("Exception:")
raise
if __name__ == "__main__":
run() | unknown | codeparrot/codeparrot-clean | ||
"""
Test all things related to the ``jedi.cache`` module.
"""
import time
import pytest
import jedi
from jedi import settings, cache
from jedi.cache import ParserCacheItem, ParserPickling
ParserPicklingCls = type(ParserPickling)
ParserPickling = ParserPicklingCls()
def test_modulepickling_change_cache_dir(monkeypatch, tmpdir):
"""
ParserPickling should not save old cache when cache_directory is changed.
See: `#168 <https://github.com/davidhalter/jedi/pull/168>`_
"""
dir_1 = str(tmpdir.mkdir('first'))
dir_2 = str(tmpdir.mkdir('second'))
item_1 = ParserCacheItem('fake parser 1')
item_2 = ParserCacheItem('fake parser 2')
path_1 = 'fake path 1'
path_2 = 'fake path 2'
monkeypatch.setattr(settings, 'cache_directory', dir_1)
ParserPickling.save_parser(path_1, item_1)
cached = load_stored_item(ParserPickling, path_1, item_1)
assert cached == item_1.parser
monkeypatch.setattr(settings, 'cache_directory', dir_2)
ParserPickling.save_parser(path_2, item_2)
cached = load_stored_item(ParserPickling, path_1, item_1)
assert cached is None
def load_stored_item(cache, path, item):
"""Load `item` stored at `path` in `cache`."""
return cache.load_parser(path, item.change_time - 1)
@pytest.mark.usefixtures("isolated_jedi_cache")
def test_modulepickling_delete_incompatible_cache():
item = ParserCacheItem('fake parser')
path = 'fake path'
cache1 = ParserPicklingCls()
cache1.version = 1
cache1.save_parser(path, item)
cached1 = load_stored_item(cache1, path, item)
assert cached1 == item.parser
cache2 = ParserPicklingCls()
cache2.version = 2
cached2 = load_stored_item(cache2, path, item)
assert cached2 is None
@pytest.mark.skipif('True', message='Currently the star import cache is not enabled.')
def test_star_import_cache_duration():
new = 0.01
old, jedi.settings.star_import_cache_validity = \
jedi.settings.star_import_cache_validity, new
dct = cache._time_caches['star_import_cache_validity']
old_dct = dict(dct)
dct.clear() # first empty...
# path needs to be not-None (otherwise caching effects are not visible)
jedi.Script('', 1, 0, '').completions()
time.sleep(2 * new)
jedi.Script('', 1, 0, '').completions()
# reset values
jedi.settings.star_import_cache_validity = old
assert len(dct) == 1
dct = old_dct
cache._star_import_cache = {}
def test_cache_call_signatures():
"""
See github issue #390.
"""
def check(column, call_name, path=None):
assert jedi.Script(s, 1, column, path).call_signatures()[0].name == call_name
s = 'str(int())'
for i in range(3):
check(8, 'int')
check(4, 'str')
# Can keep doing these calls and always get the right result.
# Now lets specify a source_path of boo and alternate these calls, it
# should still work.
for i in range(3):
check(8, 'int', 'boo')
check(4, 'str', 'boo')
def test_cache_line_split_issues():
"""Should still work even if there's a newline."""
assert jedi.Script('int(\n').call_signatures()[0].name == 'int' | unknown | codeparrot/codeparrot-clean | ||
<!-- This file is generated by scripts/process-messages/index.js. Do not edit! -->
### experimental_async_required
```
Cannot use `%name%(...)` unless the `experimental.async` compiler option is `true`
```
### invalid_default_snippet
```
Cannot use `{@render children(...)}` if the parent component uses `let:` directives. Consider using a named snippet instead
```
This error would be thrown in a setup like this:
```svelte
<!--- file: Parent.svelte --->
<List {items} let:entry>
<span>{entry}</span>
</List>
```
```svelte
<!--- file: List.svelte --->
<script>
let { items, children } = $props();
</script>
<ul>
{#each items as item}
<li>{@render children(item)}</li>
{/each}
</ul>
```
Here, `List.svelte` is using `{@render children(item)` which means it expects `Parent.svelte` to use snippets. Instead, `Parent.svelte` uses the deprecated `let:` directive. This combination of APIs is incompatible, hence the error.
### invalid_snippet_arguments
```
A snippet function was passed invalid arguments. Snippets should only be instantiated via `{@render ...}`
```
### lifecycle_outside_component
```
`%name%(...)` can only be used during component initialisation
```
Certain lifecycle methods can only be used during component initialisation. To fix this, make sure you're invoking the method inside the _top level of the instance script_ of your component.
```svelte
<script>
import { onMount } from 'svelte';
function handleClick() {
// This is wrong
onMount(() => {})
}
// This is correct
onMount(() => {})
</script>
<button onclick={handleClick}>click me</button>
```
### missing_context
```
Context was not set in a parent component
```
The [`createContext()`](svelte#createContext) utility returns a `[get, set]` pair of functions. `get` will throw an error if `set` was not used to set the context in a parent component.
### snippet_without_render_tag
```
Attempted to render a snippet without a `{@render}` block. This would cause the snippet code to be stringified instead of its content being rendered to the DOM. To fix this, change `{snippet}` to `{@render snippet()}`.
```
A component throwing this error will look something like this (`children` is not being rendered):
```svelte
<script>
let { children } = $props();
</script>
{children}
```
...or like this (a parent component is passing a snippet where a non-snippet value is expected):
```svelte
<!--- file: Parent.svelte --->
<ChildComponent>
{#snippet label()}
<span>Hi!</span>
{/snippet}
</ChildComponent>
```
```svelte
<!--- file: Child.svelte --->
<script>
let { label } = $props();
</script>
<!-- This component doesn't expect a snippet, but the parent provided one -->
<p>{label}</p>
```
### store_invalid_shape
```
`%name%` is not a store with a `subscribe` method
```
### svelte_element_invalid_this_value
```
The `this` prop on `<svelte:element>` must be a string, if defined
``` | unknown | github | https://github.com/sveltejs/svelte | documentation/docs/98-reference/.generated/shared-errors.md |
# Generated by the protocol buffer compiler. DO NOT EDIT!
# source: POGOProtos/Networking/Responses/AttackGymResponse.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from POGOProtos.Data.Battle import BattleLog_pb2 as POGOProtos_dot_Data_dot_Battle_dot_BattleLog__pb2
from POGOProtos.Data.Battle import BattlePokemonInfo_pb2 as POGOProtos_dot_Data_dot_Battle_dot_BattlePokemonInfo__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='POGOProtos/Networking/Responses/AttackGymResponse.proto',
package='POGOProtos.Networking.Responses',
syntax='proto3',
serialized_pb=_b('\n7POGOProtos/Networking/Responses/AttackGymResponse.proto\x12\x1fPOGOProtos.Networking.Responses\x1a&POGOProtos/Data/Battle/BattleLog.proto\x1a.POGOProtos/Data/Battle/BattlePokemonInfo.proto\"\x8c\x03\n\x11\x41ttackGymResponse\x12I\n\x06result\x18\x01 \x01(\x0e\x32\x39.POGOProtos.Networking.Responses.AttackGymResponse.Result\x12\x35\n\nbattle_log\x18\x02 \x01(\x0b\x32!.POGOProtos.Data.Battle.BattleLog\x12\x11\n\tbattle_id\x18\x03 \x01(\t\x12\x42\n\x0f\x61\x63tive_defender\x18\x04 \x01(\x0b\x32).POGOProtos.Data.Battle.BattlePokemonInfo\x12\x42\n\x0f\x61\x63tive_attacker\x18\x05 \x01(\x0b\x32).POGOProtos.Data.Battle.BattlePokemonInfo\"Z\n\x06Result\x12\t\n\x05UNSET\x10\x00\x12\x0b\n\x07SUCCESS\x10\x01\x12 \n\x1c\x45RROR_INVALID_ATTACK_ACTIONS\x10\x02\x12\x16\n\x12\x45RROR_NOT_IN_RANGE\x10\x03\x62\x06proto3')
,
dependencies=[POGOProtos_dot_Data_dot_Battle_dot_BattleLog__pb2.DESCRIPTOR,POGOProtos_dot_Data_dot_Battle_dot_BattlePokemonInfo__pb2.DESCRIPTOR,])
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
_ATTACKGYMRESPONSE_RESULT = _descriptor.EnumDescriptor(
name='Result',
full_name='POGOProtos.Networking.Responses.AttackGymResponse.Result',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='UNSET', index=0, number=0,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='SUCCESS', index=1, number=1,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='ERROR_INVALID_ATTACK_ACTIONS', index=2, number=2,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='ERROR_NOT_IN_RANGE', index=3, number=3,
options=None,
type=None),
],
containing_type=None,
options=None,
serialized_start=487,
serialized_end=577,
)
_sym_db.RegisterEnumDescriptor(_ATTACKGYMRESPONSE_RESULT)
_ATTACKGYMRESPONSE = _descriptor.Descriptor(
name='AttackGymResponse',
full_name='POGOProtos.Networking.Responses.AttackGymResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='result', full_name='POGOProtos.Networking.Responses.AttackGymResponse.result', index=0,
number=1, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='battle_log', full_name='POGOProtos.Networking.Responses.AttackGymResponse.battle_log', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='battle_id', full_name='POGOProtos.Networking.Responses.AttackGymResponse.battle_id', index=2,
number=3, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='active_defender', full_name='POGOProtos.Networking.Responses.AttackGymResponse.active_defender', index=3,
number=4, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='active_attacker', full_name='POGOProtos.Networking.Responses.AttackGymResponse.active_attacker', index=4,
number=5, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
_ATTACKGYMRESPONSE_RESULT,
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=181,
serialized_end=577,
)
_ATTACKGYMRESPONSE.fields_by_name['result'].enum_type = _ATTACKGYMRESPONSE_RESULT
_ATTACKGYMRESPONSE.fields_by_name['battle_log'].message_type = POGOProtos_dot_Data_dot_Battle_dot_BattleLog__pb2._BATTLELOG
_ATTACKGYMRESPONSE.fields_by_name['active_defender'].message_type = POGOProtos_dot_Data_dot_Battle_dot_BattlePokemonInfo__pb2._BATTLEPOKEMONINFO
_ATTACKGYMRESPONSE.fields_by_name['active_attacker'].message_type = POGOProtos_dot_Data_dot_Battle_dot_BattlePokemonInfo__pb2._BATTLEPOKEMONINFO
_ATTACKGYMRESPONSE_RESULT.containing_type = _ATTACKGYMRESPONSE
DESCRIPTOR.message_types_by_name['AttackGymResponse'] = _ATTACKGYMRESPONSE
AttackGymResponse = _reflection.GeneratedProtocolMessageType('AttackGymResponse', (_message.Message,), dict(
DESCRIPTOR = _ATTACKGYMRESPONSE,
__module__ = 'POGOProtos.Networking.Responses.AttackGymResponse_pb2'
# @@protoc_insertion_point(class_scope:POGOProtos.Networking.Responses.AttackGymResponse)
))
_sym_db.RegisterMessage(AttackGymResponse)
# @@protoc_insertion_point(module_scope) | unknown | codeparrot/codeparrot-clean | ||
#!/usr/bin/env python
# (c) 2012, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of the Ansible Documentation
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
__docformat__ = 'restructuredtext'
import os
import sys
import traceback
try:
from sphinx.application import Sphinx
except ImportError:
print "#################################"
print "Dependency missing: Python Sphinx"
print "#################################"
sys.exit(1)
import os
class SphinxBuilder(object):
"""
Creates HTML documentation using Sphinx.
"""
def __init__(self):
"""
Run the DocCommand.
"""
print "Creating html documentation ..."
try:
buildername = 'html'
outdir = os.path.abspath(os.path.join(os.getcwd(), "htmlout"))
# Create the output directory if it doesn't exist
if not os.access(outdir, os.F_OK):
os.mkdir(outdir)
doctreedir = os.path.join('./', '.doctrees')
confdir = os.path.abspath('./')
srcdir = os.path.abspath('rst')
freshenv = True
# Create the builder
app = Sphinx(srcdir,
confdir,
outdir,
doctreedir,
buildername,
{},
sys.stdout,
sys.stderr,
freshenv)
app.builder.build_all()
except ImportError, ie:
traceback.print_exc()
except Exception, ex:
print >> sys.stderr, "FAIL! exiting ... (%s)" % ex
def build_docs(self):
self.app.builder.build_all()
def build_rst_docs():
docgen = SphinxBuilder()
if __name__ == '__main__':
if '-h' in sys.argv or '--help' in sys.argv:
print "This script builds the html documentation from rst/asciidoc sources.\n"
print " Run 'make docs' to build everything."
print " Run 'make viewdocs' to build and then preview in a web browser."
sys.exit(0)
build_rst_docs()
if "view" in sys.argv:
import webbrowser
if not webbrowser.open('htmlout/index.html'):
print >> sys.stderr, "Could not open on your webbrowser." | unknown | codeparrot/codeparrot-clean | ||
!/usr/bin/python
# emon logger
# elha 20141220
# version 1.0
#
# inspired by OpenEnergyMonitor.org
import math
import time
import requests
import Queue
import threading
# const
NUMBER_OF_SAMPLES = 850 # take 850 Samples per Measurement (takes approx 1.2 secs)
ICAL = 1000 / 33 # CT 1000:1 / Burden 33 Ohms
VOLT_PER_TICK = 1.8 / 4096 # VDC BBB / 12 bit ADC Resolution
VOLT_AC = 230 # fixed value
INTERVAL = 60 # measure every 60 secs
# globals
buffer = [0 for i in range(NUMBER_OF_SAMPLES)]
logfile = "/var/log/emon.log"
pins = ["0", "1", "2", "3", "4", "5", "6"]
url = "http://emoncms.org/input/post.json?node=1&apikey=<EMONAPIKEY>"
paramts = "&time="
paramcsv = "&csv="
# read ADC
def Read(pin):
pinfile = "/sys/bus/iio/devices/iio:device0/in_voltage" + pin + "_raw"
with open(pinfile, "r") as analog:
return int(analog.readline())
# calc RMS power for single pin
def CalcPower(pin):
a = 0
# sampling
while a < NUMBER_OF_SAMPLES:
buffer[a] = Read(pin)
a += 1
# sort and median
sort = sorted(buffer)
median = sort[NUMBER_OF_SAMPLES / 2]
# suppress zero power
# only report power if third (99.x quantile) smallest value is more then 8 ticks away from median
if(median - sort[3] < 9):
return 0
# calc RMS (sum squares -> average -> squareroot)
sumI = 0.0
a = 0
while a < NUMBER_OF_SAMPLES:
sumI += math.pow(buffer[a] - median, 2)
a += 1
return VOLT_AC * ICAL * VOLT_PER_TICK * math.sqrt(sumI / NUMBER_OF_SAMPLES)
# calc power for each pin and return csv-data
def Calc():
out = ""
for pin in pins:
out += "%1.1f," % CalcPower(pin)
return out[:-1]
# log to logfile
def log(msg):
with open(logfile, "a") as f:
f.write(msg + '\n')
# send to emoncms.org
def sendworker():
item = ""
while 1:
if(item == ""):
item = backlog.get()
try:
requests.get(item)
backlog.task_done()
item=""
except:
pass
time.sleep(1)
# main, init
try:
print("emon logger")
print("logging to " + logfile)
log("-----------------------------------------------")
log("start %10d" % int(time.time()))
backlog = Queue.Queue()
sender = threading.Thread(target=sendworker)
sender.daemon = True
sender.start()
# main, run loop
while 1:
# wait until next query
time.sleep(INTERVAL - (int(time.time()) % INTERVAL))
# query data
csv = Calc()
ts = "%10d" % int(time.time())
# report in backlog
backlog.put(url + paramts + ts + paramcsv + csv)
# log to logfile (if something goes wrong)
log(ts + ',' + csv)
except:
print("shutdown.") | unknown | codeparrot/codeparrot-clean | ||
#!/usr/bin/env python
#
# Copyright 2009 Facebook
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Escaping/unescaping methods for HTML, JSON, URLs, and others.
Also includes a few other miscellaneous string manipulation functions that
have crept in over time.
"""
from __future__ import absolute_import, division, print_function, with_statement
import re
import sys
from tornado.util import unicode_type, basestring_type, u
try:
from urllib.parse import parse_qs as _parse_qs # py3
except ImportError:
from urlparse import parse_qs as _parse_qs # Python 2.6+
try:
import htmlentitydefs # py2
except ImportError:
import html.entities as htmlentitydefs # py3
try:
import urllib.parse as urllib_parse # py3
except ImportError:
import urllib as urllib_parse # py2
import json
try:
unichr
except NameError:
unichr = chr
_XHTML_ESCAPE_RE = re.compile('[&<>"\']')
_XHTML_ESCAPE_DICT = {'&': '&', '<': '<', '>': '>', '"': '"',
'\'': '''}
def xhtml_escape(value):
"""Escapes a string so it is valid within HTML or XML.
Escapes the characters ``<``, ``>``, ``"``, ``'``, and ``&``.
When used in attribute values the escaped strings must be enclosed
in quotes.
.. versionchanged:: 3.2
Added the single quote to the list of escaped characters.
"""
return _XHTML_ESCAPE_RE.sub(lambda match: _XHTML_ESCAPE_DICT[match.group(0)],
to_basestring(value))
def xhtml_unescape(value):
"""Un-escapes an XML-escaped string."""
return re.sub(r"&(#?)(\w+?);", _convert_entity, _unicode(value))
# The fact that json_encode wraps json.dumps is an implementation detail.
# Please see https://github.com/tornadoweb/tornado/pull/706
# before sending a pull request that adds **kwargs to this function.
def json_encode(value):
"""JSON-encodes the given Python object."""
# JSON permits but does not require forward slashes to be escaped.
# This is useful when json data is emitted in a <script> tag
# in HTML, as it prevents </script> tags from prematurely terminating
# the javascript. Some json libraries do this escaping by default,
# although python's standard library does not, so we do it here.
# http://stackoverflow.com/questions/1580647/json-why-are-forward-slashes-escaped
return json.dumps(value).replace("</", "<\\/")
def json_decode(value):
"""Returns Python objects for the given JSON string."""
return json.loads(to_basestring(value))
def squeeze(value):
"""Replace all sequences of whitespace chars with a single space."""
return re.sub(r"[\x00-\x20]+", " ", value).strip()
def url_escape(value, plus=True):
"""Returns a URL-encoded version of the given value.
If ``plus`` is true (the default), spaces will be represented
as "+" instead of "%20". This is appropriate for query strings
but not for the path component of a URL. Note that this default
is the reverse of Python's urllib module.
.. versionadded:: 3.1
The ``plus`` argument
"""
quote = urllib_parse.quote_plus if plus else urllib_parse.quote
return quote(utf8(value))
# python 3 changed things around enough that we need two separate
# implementations of url_unescape. We also need our own implementation
# of parse_qs since python 3's version insists on decoding everything.
if sys.version_info[0] < 3:
def url_unescape(value, encoding='utf-8', plus=True):
"""Decodes the given value from a URL.
The argument may be either a byte or unicode string.
If encoding is None, the result will be a byte string. Otherwise,
the result is a unicode string in the specified encoding.
If ``plus`` is true (the default), plus signs will be interpreted
as spaces (literal plus signs must be represented as "%2B"). This
is appropriate for query strings and form-encoded values but not
for the path component of a URL. Note that this default is the
reverse of Python's urllib module.
.. versionadded:: 3.1
The ``plus`` argument
"""
unquote = (urllib_parse.unquote_plus if plus else urllib_parse.unquote)
if encoding is None:
return unquote(utf8(value))
else:
return unicode_type(unquote(utf8(value)), encoding)
parse_qs_bytes = _parse_qs
else:
def url_unescape(value, encoding='utf-8', plus=True):
"""Decodes the given value from a URL.
The argument may be either a byte or unicode string.
If encoding is None, the result will be a byte string. Otherwise,
the result is a unicode string in the specified encoding.
If ``plus`` is true (the default), plus signs will be interpreted
as spaces (literal plus signs must be represented as "%2B"). This
is appropriate for query strings and form-encoded values but not
for the path component of a URL. Note that this default is the
reverse of Python's urllib module.
.. versionadded:: 3.1
The ``plus`` argument
"""
if encoding is None:
if plus:
# unquote_to_bytes doesn't have a _plus variant
value = to_basestring(value).replace('+', ' ')
return urllib_parse.unquote_to_bytes(value)
else:
unquote = (urllib_parse.unquote_plus if plus
else urllib_parse.unquote)
return unquote(to_basestring(value), encoding=encoding)
def parse_qs_bytes(qs, keep_blank_values=False, strict_parsing=False):
"""Parses a query string like urlparse.parse_qs, but returns the
values as byte strings.
Keys still become type str (interpreted as latin1 in python3!)
because it's too painful to keep them as byte strings in
python3 and in practice they're nearly always ascii anyway.
"""
# This is gross, but python3 doesn't give us another way.
# Latin1 is the universal donor of character encodings.
result = _parse_qs(qs, keep_blank_values, strict_parsing,
encoding='latin1', errors='strict')
encoded = {}
for k, v in result.items():
encoded[k] = [i.encode('latin1') for i in v]
return encoded
_UTF8_TYPES = (bytes, type(None))
def utf8(value):
"""Converts a string argument to a byte string.
If the argument is already a byte string or None, it is returned unchanged.
Otherwise it must be a unicode string and is encoded as utf8.
"""
if isinstance(value, _UTF8_TYPES):
return value
if not isinstance(value, unicode_type):
raise TypeError(
"Expected bytes, unicode, or None; got %r" % type(value)
)
return value.encode("utf-8")
_TO_UNICODE_TYPES = (unicode_type, type(None))
def to_unicode(value):
"""Converts a string argument to a unicode string.
If the argument is already a unicode string or None, it is returned
unchanged. Otherwise it must be a byte string and is decoded as utf8.
"""
if isinstance(value, _TO_UNICODE_TYPES):
return value
if not isinstance(value, bytes):
raise TypeError(
"Expected bytes, unicode, or None; got %r" % type(value)
)
return value.decode("utf-8")
# to_unicode was previously named _unicode not because it was private,
# but to avoid conflicts with the built-in unicode() function/type
_unicode = to_unicode
# When dealing with the standard library across python 2 and 3 it is
# sometimes useful to have a direct conversion to the native string type
if str is unicode_type:
native_str = to_unicode
else:
native_str = utf8
_BASESTRING_TYPES = (basestring_type, type(None))
def to_basestring(value):
"""Converts a string argument to a subclass of basestring.
In python2, byte and unicode strings are mostly interchangeable,
so functions that deal with a user-supplied argument in combination
with ascii string constants can use either and should return the type
the user supplied. In python3, the two types are not interchangeable,
so this method is needed to convert byte strings to unicode.
"""
if isinstance(value, _BASESTRING_TYPES):
return value
if not isinstance(value, bytes):
raise TypeError(
"Expected bytes, unicode, or None; got %r" % type(value)
)
return value.decode("utf-8")
def recursive_unicode(obj):
"""Walks a simple data structure, converting byte strings to unicode.
Supports lists, tuples, and dictionaries.
"""
if isinstance(obj, dict):
return dict((recursive_unicode(k), recursive_unicode(v)) for (k, v) in obj.items())
elif isinstance(obj, list):
return list(recursive_unicode(i) for i in obj)
elif isinstance(obj, tuple):
return tuple(recursive_unicode(i) for i in obj)
elif isinstance(obj, bytes):
return to_unicode(obj)
else:
return obj
# I originally used the regex from
# http://daringfireball.net/2010/07/improved_regex_for_matching_urls
# but it gets all exponential on certain patterns (such as too many trailing
# dots), causing the regex matcher to never return.
# This regex should avoid those problems.
# Use to_unicode instead of tornado.util.u - we don't want backslashes getting
# processed as escapes.
_URL_RE = re.compile(to_unicode(r"""\b((?:([\w-]+):(/{1,3})|www[.])(?:(?:(?:[^\s&()]|&|")*(?:[^!"#$%&'()*+,.:;<=>?@\[\]^`{|}~\s]))|(?:\((?:[^\s&()]|&|")*\)))+)"""))
def linkify(text, shorten=False, extra_params="",
require_protocol=False, permitted_protocols=["http", "https"]):
"""Converts plain text into HTML with links.
For example: ``linkify("Hello http://tornadoweb.org!")`` would return
``Hello <a href="http://tornadoweb.org">http://tornadoweb.org</a>!``
Parameters:
* ``shorten``: Long urls will be shortened for display.
* ``extra_params``: Extra text to include in the link tag, or a callable
taking the link as an argument and returning the extra text
e.g. ``linkify(text, extra_params='rel="nofollow" class="external"')``,
or::
def extra_params_cb(url):
if url.startswith("http://example.com"):
return 'class="internal"'
else:
return 'class="external" rel="nofollow"'
linkify(text, extra_params=extra_params_cb)
* ``require_protocol``: Only linkify urls which include a protocol. If
this is False, urls such as www.facebook.com will also be linkified.
* ``permitted_protocols``: List (or set) of protocols which should be
linkified, e.g. ``linkify(text, permitted_protocols=["http", "ftp",
"mailto"])``. It is very unsafe to include protocols such as
``javascript``.
"""
if extra_params and not callable(extra_params):
extra_params = " " + extra_params.strip()
def make_link(m):
url = m.group(1)
proto = m.group(2)
if require_protocol and not proto:
return url # not protocol, no linkify
if proto and proto not in permitted_protocols:
return url # bad protocol, no linkify
href = m.group(1)
if not proto:
href = "http://" + href # no proto specified, use http
if callable(extra_params):
params = " " + extra_params(href).strip()
else:
params = extra_params
# clip long urls. max_len is just an approximation
max_len = 30
if shorten and len(url) > max_len:
before_clip = url
if proto:
proto_len = len(proto) + 1 + len(m.group(3) or "") # +1 for :
else:
proto_len = 0
parts = url[proto_len:].split("/")
if len(parts) > 1:
# Grab the whole host part plus the first bit of the path
# The path is usually not that interesting once shortened
# (no more slug, etc), so it really just provides a little
# extra indication of shortening.
url = url[:proto_len] + parts[0] + "/" + \
parts[1][:8].split('?')[0].split('.')[0]
if len(url) > max_len * 1.5: # still too long
url = url[:max_len]
if url != before_clip:
amp = url.rfind('&')
# avoid splitting html char entities
if amp > max_len - 5:
url = url[:amp]
url += "..."
if len(url) >= len(before_clip):
url = before_clip
else:
# full url is visible on mouse-over (for those who don't
# have a status bar, such as Safari by default)
params += ' title="%s"' % href
return u('<a href="%s"%s>%s</a>') % (href, params, url)
# First HTML-escape so that our strings are all safe.
# The regex is modified to avoid character entites other than & so
# that we won't pick up ", etc.
text = _unicode(xhtml_escape(text))
return _URL_RE.sub(make_link, text)
def _convert_entity(m):
if m.group(1) == "#":
try:
if m.group(2)[:1].lower() == 'x':
return unichr(int(m.group(2)[1:], 16))
else:
return unichr(int(m.group(2)))
except ValueError:
return "&#%s;" % m.group(2)
try:
return _HTML_UNICODE_MAP[m.group(2)]
except KeyError:
return "&%s;" % m.group(2)
def _build_unicode_map():
unicode_map = {}
for name, value in htmlentitydefs.name2codepoint.items():
unicode_map[name] = unichr(value)
return unicode_map
_HTML_UNICODE_MAP = _build_unicode_map() | unknown | codeparrot/codeparrot-clean | ||
import cPickle
import ann
import theano
import numpy
from createData import plot_all, plot
from numpy.random import choice
import matplotlib.pyplot as plt
import theano.tensor as T
def load_data(num = None):
data, y = cPickle.load(open('data.dat', 'rb'))
less_xx = 0
if less_xx:
less = min([(y==i).sum() for i in set(y)])
count = dict(zip(set(y), [0] * len(set(y))))
new_data, new_y = [], []
for ele, eley in zip(data, y):
if count[eley] >= less: continue
count[eley] += 1
new_data.append(ele)
new_y.append(eley)
data, y = numpy.array(new_data), numpy.array(new_y)
ind = numpy.random.permutation(data.shape[0])
data = data[ind]
y = y[ind]
ind = int(len(y) * 0.7)
train_set, test_set = (data[:ind], y[:ind]), (data[ind:], y[ind:])
plot(*test_set)
raw_input('pause')
def shared_dataset(data_xy, borrow=True, num = None):
data_x, data_y = data_xy
if num:
data_x = data_x[:num]
data_y = data_y[:num]
print data_x.shape, data_y.shape
shared_x = theano.shared(numpy.asarray(data_x,
dtype=theano.config.floatX),
borrow=borrow)
shared_y = theano.shared(numpy.asarray(data_y,
dtype=theano.config.floatX),
borrow=borrow)
return shared_x, T.cast(shared_y, 'int32')
# valid_set_x, valid_set_y = shared_dataset(valid_set, num = num)
train_set_x, train_set_y = shared_dataset(train_set, num = num)
test_set_x, test_set_y = shared_dataset(test_set, num = num)
rval = [(train_set_x, train_set_y), #(valid_set_x, valid_set_y),
(test_set_x, test_set_y)]
return rval
def plot_in_f2(self):
plt.clf()
pred = self.pred()
plot(self.x.get_value(), pred)
plt.draw()
def plot_in_f(self):
plt.clf()
plot_all(self, 10)
plt.draw()
def test():
plt.ion()
plt.show()
datasets = load_data()
cl = ann.ANN(2, 4, hiddens=[4], lmbd = 0.)
cl.fit(datasets, lr = 0.01, batch_size = 100, n_epochs = 1000)
print cl.get_neg_log(data, T.cast(y, 'int32')).mean()
if __name__ == '__main__':
theano.config.exception_verbosity='high'
plt.ion()
plt.show()
# test()
data, y = cPickle.load(open('data.dat', 'rb'))
y = numpy.asarray(y, dtype = 'int32')
total = len(y)
size = int(total * 0.05)
# data, y = theano.shared(data, borrow = True), T.cast(theano.shared(y, borrow = True), 'int32')
#permenant memory
memory = numpy.zeros((total,))
#random sample training sample
ind = choice(total, size)
max_iteration = 10
iteration = 0
while iteration < max_iteration:
train_set = (theano.shared(data[ind]), theano.shared(y[ind]))
def plot_in_f2(self):
plt.clf()
pred = self.pred(train_set[0])
plot(train_set[0].get_value(), pred)
plt.draw()
cl = ann.ANN(2, 4, hiddens=[4], lmbd = 0.)
haha = 2000
cl.fit((train_set, train_set), lr = 0.01, batch_size = 100, n_epochs = haha,
plot = plot_in_f2, plot_interval = haha)
fitness = cl.get_neg_log(data, y)
#update the memory
memory = fitness #+ memory * 0.71
p = memory/memory.sum()
#resample
ind = choice(total, size, p = p) | unknown | codeparrot/codeparrot-clean | ||
#!/bin/env python
#Copyright ReportLab Europe Ltd. 2000-2012
#see license.txt for license details
#history http://www.reportlab.co.uk/cgi-bin/viewcvs.cgi/public/reportlab/trunk/reportlab/lib/randomtext.py
__version__=''' $Id$ '''
###############################################################################
# generates so-called 'Greek Text' for use in filling documents.
###############################################################################
__doc__="""Like Lorem Ipsum, but more fun and extensible.
This module exposes a function randomText() which generates paragraphs.
These can be used when testing out document templates and stylesheets.
A number of 'themes' are provided - please contribute more!
We need some real Greek text too.
There are currently six themes provided:
STARTUP (words suitable for a business plan - or not as the case may be),
COMPUTERS (names of programming languages and operating systems etc),
BLAH (variations on the word 'blah'),
BUZZWORD (buzzword bingo),
STARTREK (Star Trek),
PRINTING (print-related terms)
PYTHON (snippets and quotes from Monty Python)
CHOMSKY (random lingusitic nonsense)
EXAMPLE USAGE:
from reportlab.lib import randomtext
print randomtext.randomText(randomtext.PYTHON, 10)
This prints a random number of random sentences (up to a limit
of ten) using the theme 'PYTHON'.
"""
#theme one :-)
STARTUP = ['strategic', 'direction', 'proactive', 'venture capital',
'reengineering', 'forecast', 'resources', 'SWOT analysis',
'forward-thinking', 'profit', 'growth', 'doubletalk', 'B2B', 'B2C',
'venture capital', 'IPO', "NASDAQ meltdown - we're all doomed!"]
#theme two - computery things.
COMPUTERS = ['Python', 'Perl', 'Pascal', 'Java', 'Javascript',
'VB', 'Basic', 'LISP', 'Fortran', 'ADA', 'APL', 'C', 'C++',
'assembler', 'Larry Wall', 'Guido van Rossum', 'XML', 'HTML',
'cgi', 'cgi-bin', 'Amiga', 'Macintosh', 'Dell', 'Microsoft',
'firewall', 'server', 'Linux', 'Unix', 'MacOS', 'BeOS', 'AS/400',
'sendmail', 'TCP/IP', 'SMTP', 'RFC822-compliant', 'dynamic',
'Internet', 'A/UX', 'Amiga OS', 'BIOS', 'boot managers', 'CP/M',
'DOS', 'file system', 'FreeBSD', 'Freeware', 'GEOS', 'GNU',
'Hurd', 'Linux', 'Mach', 'Macintosh OS', 'mailing lists', 'Minix',
'Multics', 'NetWare', 'NextStep', 'OS/2', 'Plan 9', 'Realtime',
'UNIX', 'VMS', 'Windows', 'X Windows', 'Xinu', 'security', 'Intel',
'encryption', 'PGP' , 'software', 'ActiveX', 'AppleScript', 'awk',
'BETA', 'COBOL', 'Delphi', 'Dylan', 'Eiffel', 'extreme programming',
'Forth', 'Fortran', 'functional languages', 'Guile', 'format your hard drive',
'Icon', 'IDL', 'Infer', 'Intercal', 'J', 'Java', 'JavaScript', 'CD-ROM',
'JCL', 'Lisp', '"literate programming"', 'Logo', 'MUMPS', 'C: drive',
'Modula-2', 'Modula-3', 'Oberon', 'Occam', 'OpenGL', 'parallel languages',
'Pascal', 'Perl', 'PL/I', 'PostScript', 'Prolog', 'hardware', 'Blue Screen of Death',
'Rexx', 'RPG', 'Scheme', 'scripting languages', 'Smalltalk', 'crash!', 'disc crash',
'Spanner', 'SQL', 'Tcl/Tk', 'TeX', 'TOM', 'Visual', 'Visual Basic', '4GL',
'VRML', 'Virtual Reality Modeling Language', 'difference engine', '...went into "yo-yo mode"',
'Sun', 'Sun Microsystems', 'Hewlett Packard', 'output device',
'CPU', 'memory', 'registers', 'monitor', 'TFT display', 'plasma screen',
'bug report', '"mis-feature"', '...millions of bugs!', 'pizza',
'"illiterate programming"','...lots of pizza!', 'pepperoni pizza',
'coffee', 'Jolt Cola[TM]', 'beer', 'BEER!']
#theme three - 'blah' - for when you want to be subtle. :-)
BLAH = ['Blah', 'BLAH', 'blahblah', 'blahblahblah', 'blah-blah',
'blah!', '"Blah Blah Blah"', 'blah-de-blah', 'blah?', 'blah!!!',
'blah...', 'Blah.', 'blah;', 'blah, Blah, BLAH!', 'Blah!!!']
#theme four - 'buzzword bingo' time!
BUZZWORD = ['intellectual capital', 'market segment', 'flattening',
'regroup', 'platform', 'client-based', 'long-term', 'proactive',
'quality vector', 'out of the loop', 'implement',
'streamline', 'cost-centered', 'phase', 'synergy',
'synergize', 'interactive', 'facilitate',
'appropriate', 'goal-setting', 'empowering', 'low-risk high-yield',
'peel the onion', 'goal', 'downsize', 'result-driven',
'conceptualize', 'multidisciplinary', 'gap analysis', 'dysfunctional',
'networking', 'knowledge management', 'goal-setting',
'mastery learning', 'communication', 'real-estate', 'quarterly',
'scalable', 'Total Quality Management', 'best of breed',
'nimble', 'monetize', 'benchmark', 'hardball',
'client-centered', 'vision statement', 'empowerment',
'lean & mean', 'credibility', 'synergistic',
'backward-compatible', 'hardball', 'stretch the envelope',
'bleeding edge', 'networking', 'motivation', 'best practice',
'best of breed', 'implementation', 'Total Quality Management',
'undefined', 'disintermediate', 'mindset', 'architect',
'gap analysis', 'morale', 'objective', 'projection',
'contribution', 'proactive', 'go the extra mile', 'dynamic',
'world class', 'real estate', 'quality vector', 'credibility',
'appropriate', 'platform', 'projection', 'mastery learning',
'recognition', 'quality', 'scenario', 'performance based',
'solutioning', 'go the extra mile', 'downsize', 'phase',
'networking', 'experiencing slippage', 'knowledge management',
'high priority', 'process', 'ethical', 'value-added', 'implement',
're-factoring', 're-branding', 'embracing change']
#theme five - Star Trek
STARTREK = ['Starfleet', 'Klingon', 'Romulan', 'Cardassian', 'Vulcan',
'Benzite', 'IKV Pagh', 'emergency transponder', 'United Federation of Planets',
'Bolian', "K'Vort Class Bird-of-Prey", 'USS Enterprise', 'USS Intrepid',
'USS Reliant', 'USS Voyager', 'Starfleet Academy', 'Captain Picard',
'Captain Janeway', 'Tom Paris', 'Harry Kim', 'Counsellor Troi',
'Lieutenant Worf', 'Lieutenant Commander Data', 'Dr. Beverly Crusher',
'Admiral Nakamura', 'Irumodic Syndrome', 'Devron system', 'Admiral Pressman',
'asteroid field', 'sensor readings', 'Binars', 'distress signal', 'shuttlecraft',
'cloaking device', 'shuttle bay 2', 'Dr. Pulaski', 'Lwaxana Troi', 'Pacifica',
'William Riker', "Chief O'Brian", 'Soyuz class science vessel', 'Wolf-359',
'Galaxy class vessel', 'Utopia Planitia yards', 'photon torpedo', 'Archer IV',
'quantum flux', 'spacedock', 'Risa', 'Deep Space Nine', 'blood wine',
'quantum torpedoes', 'holodeck', 'Romulan Warbird', 'Betazoid', 'turbolift', 'battle bridge',
'Memory Alpha', '...with a phaser!', 'Romulan ale', 'Ferrengi', 'Klingon opera',
'Quark', 'wormhole', 'Bajoran', 'cruiser', 'warship', 'battlecruiser', '"Intruder alert!"',
'scout ship', 'science vessel', '"Borg Invasion imminent!" ', '"Abandon ship!"',
'Red Alert!', 'warp-core breech', '"All hands abandon ship! This is not a drill!"']
#theme six - print-related terms
PRINTING = ['points', 'picas', 'leading', 'kerning', 'CMYK', 'offset litho',
'type', 'font family', 'typography', 'type designer',
'baseline', 'white-out type', 'WOB', 'bicameral', 'bitmap',
'blockletter', 'bleed', 'margin', 'body', 'widow', 'orphan',
'cicero', 'cursive', 'letterform', 'sidehead', 'dingbat', 'leader',
'DPI', 'drop-cap', 'paragraph', 'En', 'Em', 'flush left', 'left justified',
'right justified', 'centered', 'italic', 'Latin letterform', 'ligature',
'uppercase', 'lowercase', 'serif', 'sans-serif', 'weight', 'type foundry',
'fleuron', 'folio', 'gutter', 'whitespace', 'humanist letterform', 'caption',
'page', 'frame', 'ragged setting', 'flush-right', 'rule', 'drop shadows',
'prepress', 'spot-colour', 'duotones', 'colour separations', 'four-colour printing',
'Pantone[TM]', 'service bureau', 'imagesetter']
#it had to be done!...
#theme seven - the "full Monty"!
PYTHON = ['Good evening ladies and Bruces','I want to buy some cheese', 'You do have some cheese, do you?',
"Of course sir, it's a cheese shop sir, we've got...",'discipline?... naked? ... With a melon!?',
'The Church Police!!' , "There's a dead bishop on the landing", 'Would you like a twist of lemming sir?',
'"Conquistador Coffee brings a new meaning to the word vomit"','Your lupins please',
'Crelm Toothpaste, with the miracle ingredient Fraudulin',
"Well there's the first result and the Silly Party has held Leicester.",
'Hello, I would like to buy a fish license please', "Look, it's people like you what cause unrest!",
"When we got home, our Dad would thrash us to sleep with his belt!", 'Luxury', "Gumby Brain Specialist",
"My brain hurts!!!", "My brain hurts too.", "How not to be seen",
"In this picture there are 47 people. None of them can be seen",
"Mrs Smegma, will you stand up please?",
"Mr. Nesbitt has learned the first lesson of 'Not Being Seen', not to stand up.",
"My hovercraft is full of eels", "Ah. You have beautiful thighs.", "My nipples explode with delight",
"Drop your panties Sir William, I cannot wait 'til lunchtime",
"I'm a completely self-taught idiot.", "I always wanted to be a lumberjack!!!",
"Told you so!! Oh, coitus!!", "",
"Nudge nudge?", "Know what I mean!", "Nudge nudge, nudge nudge?", "Say no more!!",
"Hello, well it's just after 8 o'clock, and time for the penguin on top of your television set to explode",
"Oh, intercourse the penguin!!", "Funny that penguin being there, isn't it?",
"I wish to register a complaint.", "Now that's what I call a dead parrot", "Pining for the fjords???",
"No, that's not dead, it's ,uhhhh, resting", "This is an ex-parrot!!",
"That parrot is definitely deceased.", "No, no, no - it's spelt Raymond Luxury Yach-t, but it's pronounced 'Throatwobbler Mangrove'.",
"You're a very silly man and I'm not going to interview you.", "No Mungo... never kill a customer."
"And I'd like to conclude by putting my finger up my nose",
"egg and Spam", "egg bacon and Spam", "egg bacon sausage and Spam", "Spam bacon sausage and Spam",
"Spam egg Spam Spam bacon and Spam", "Spam sausage Spam Spam Spam bacon Spam tomato and Spam",
"Spam Spam Spam egg and Spam", "Spam Spam Spam Spam Spam Spam baked beans Spam Spam Spam",
"Spam!!", "I don't like Spam!!!", "You can't have egg, bacon, Spam and sausage without the Spam!",
"I'll have your Spam. I Love it!",
"I'm having Spam Spam Spam Spam Spam Spam Spam baked beans Spam Spam Spam and Spam",
"Have you got anything without Spam?", "There's Spam egg sausage and Spam, that's not got much Spam in it.",
"No one expects the Spanish Inquisition!!", "Our weapon is surprise, surprise and fear!",
"Get the comfy chair!", "Amongst our weaponry are such diverse elements as: fear, surprise, ruthless efficiency, an almost fanatical devotion to the Pope, and nice red uniforms - Oh damn!",
"Nobody expects the... Oh bugger!", "What swims in the sea and gets caught in nets? Henri Bergson?",
"Goats. Underwater goats with snorkels and flippers?", "A buffalo with an aqualung?",
"Dinsdale was a looney, but he was a happy looney.", "Dinsdale!!",
"The 127th Upper-Class Twit of the Year Show", "What a great Twit!",
"thought by many to be this year's outstanding twit",
"...and there's a big crowd here today to see these prize idiots in action.",
"And now for something completely different.", "Stop that, it's silly",
"We interrupt this program to annoy you and make things generally irritating",
"This depraved and degrading spectacle is going to stop right now, do you hear me?",
"Stop right there!", "This is absolutely disgusting and I'm not going to stand for it",
"I object to all this sex on the television. I mean, I keep falling off",
"Right! Stop that, it's silly. Very silly indeed", "Very silly indeed", "Lemon curry?",
"And now for something completely different, a man with 3 buttocks",
"I've heard of unisex, but I've never had it", "That's the end, stop the program! Stop it!"]
leadins=[
"To characterize a linguistic level L,",
"On the other hand,",
"This suggests that",
"It appears that",
"Furthermore,",
"We will bring evidence in favor of the following thesis: ",
"To provide a constituent structure for T(Z,K),",
"From C1, it follows that",
"For any transformation which is sufficiently diversified in application to be of any interest,",
"Analogously,",
"Clearly,",
"Note that",
"Of course,",
"Suppose, for instance, that",
"Thus",
"With this clarification,",
"Conversely,",
"We have already seen that",
"By combining adjunctions and certain deformations,",
"I suggested that these results would follow from the assumption that",
"If the position of the trace in (99c) were only relatively inaccessible to movement,",
"However, this assumption is not correct, since",
"Comparing these examples with their parasitic gap counterparts in (96) and (97), we see that",
"In the discussion of resumptive pronouns following (81),",
"So far,",
"Nevertheless,",
"For one thing,",
"Summarizing, then, we assume that",
"A consequence of the approach just outlined is that",
"Presumably,",
"On our assumptions,",
"It may be, then, that",
"It must be emphasized, once again, that",
"Let us continue to suppose that",
"Notice, incidentally, that",
"A majority of informed linguistic specialists agree that",
"There is also a different approach to the [unification] problem,",
"This approach divorces the cognitive sciences from a biological setting,",
"The approach relies on the \"Turing Test,\" devised by mathematician Alan Turing,",
"Adopting this approach,",
"There is no fact, no meaningful question to be answered,",
"Another superficial similarity is the interest in simulation of behavior,",
"A lot of sophistication has been developed about the utilization of machines for complex purposes,",
]
subjects = [
"the notion of level of grammaticalness",
"a case of semigrammaticalness of a different sort",
"most of the methodological work in modern linguistics",
"a subset of English sentences interesting on quite independent grounds",
"the natural general principle that will subsume this case",
"an important property of these three types of EC",
"any associated supporting element",
"the appearance of parasitic gaps in domains relatively inaccessible to ordinary extraction",
"the speaker-hearer's linguistic intuition",
"the descriptive power of the base component",
"the earlier discussion of deviance",
"this analysis of a formative as a pair of sets of features",
"this selectionally introduced contextual feature",
"a descriptively adequate grammar",
"the fundamental error of regarding functional notions as categorial",
"relational information",
"the systematic use of complex symbols",
"the theory of syntactic features developed earlier",
]
verbs= [
"can be defined in such a way as to impose",
"delimits",
"suffices to account for",
"cannot be arbitrary in",
"is not subject to",
"does not readily tolerate",
"raises serious doubts about",
"is not quite equivalent to",
"does not affect the structure of",
"may remedy and, at the same time, eliminate",
"is not to be considered in determining",
"is to be regarded as",
"is unspecified with respect to",
"is, apparently, determined by",
"is necessary to impose an interpretation on",
"appears to correlate rather closely with",
"is rather different from",
]
objects = [
"problems of phonemic and morphological analysis.",
"a corpus of utterance tokens upon which conformity has been defined by the paired utterance test.",
"the traditional practice of grammarians.",
"the levels of acceptability from fairly high (e.g. (99a)) to virtual gibberish (e.g. (98d)).",
"a stipulation to place the constructions into these various categories.",
"a descriptive fact.",
"a parasitic gap construction.",
"the extended c-command discussed in connection with (34).",
"the ultimate standard that determines the accuracy of any proposed grammar.",
"the system of base rules exclusive of the lexicon.",
"irrelevant intervening contexts in selectional rules.",
"nondistinctness in the sense of distinctive feature theory.",
"a general convention regarding the forms of the grammar.",
"an abstract underlying order.",
"an important distinction in language use.",
"the requirement that branching is not tolerated within the dominance scope of a complex symbol.",
"the strong generative capacity of the theory.",
]
def format_wisdom(text,line_length=72):
try:
import textwrap
return textwrap.fill(text, line_length)
except:
return text
def chomsky(times = 1):
if not isinstance(times, int):
return format_wisdom(__doc__)
import random
prevparts = []
newparts = []
output = []
for i in xrange(times):
for partlist in (leadins, subjects, verbs, objects):
while 1:
part = random.choice(partlist)
if part not in prevparts:
break
newparts.append(part)
output.append(' '.join(newparts))
prevparts = newparts
newparts = []
return format_wisdom(' '.join(output))
from reportlab import rl_config
if rl_config.invariant:
if not getattr(rl_config,'_random',None):
rl_config._random = 1
import random
random.seed(2342471922L)
del random
del rl_config
def randomText(theme=STARTUP, sentences=5):
#this may or may not be appropriate in your company
if type(theme)==type(''):
if theme.lower()=='chomsky': return chomsky(sentences)
elif theme.upper() in ('STARTUP','COMPUTERS','BLAH','BUZZWORD','STARTREK','PRINTING','PYTHON'):
theme = globals()[theme.upper()]
else:
raise ValueError('Unknown theme "%s"' % theme)
from random import randint, choice
RANDOMWORDS = theme
#sentences = 5
output = ""
for sentenceno in range(randint(1,sentences)):
output = output + 'Blah'
for wordno in range(randint(10,25)):
if randint(0,4)==0:
word = choice(RANDOMWORDS)
else:
word = 'blah'
output = output + ' ' +word
output = output+'. '
return output
if __name__=='__main__':
import sys
argv = sys.argv[1:]
if argv:
theme = argv.pop(0)
if argv:
sentences = int(argv.pop(0))
else:
sentences = 5
try:
print randomText(theme,sentences)
except:
print>>sys.stderr,"Usage: randomtext.py [theme [#sentences]]"
print>>sys.stderr," theme in chomsky|STARTUP|COMPUTERS|BLAH|BUZZWORD|STARTREK|PRINTING|PYTHON"
raise
else:
print chomsky(5) | unknown | codeparrot/codeparrot-clean | ||
/* crc32_fold.c -- crc32 folding interface
* Copyright (C) 2021 Nathan Moinvaziri
* For conditions of distribution and use, see copyright notice in zlib.h
*/
#include "zbuild.h"
#include "zutil.h"
#include "functable.h"
#include "crc32.h"
Z_INTERNAL uint32_t crc32_fold_reset_c(crc32_fold *crc) {
crc->value = CRC32_INITIAL_VALUE;
return crc->value;
}
Z_INTERNAL void crc32_fold_copy_c(crc32_fold *crc, uint8_t *dst, const uint8_t *src, size_t len) {
crc->value = FUNCTABLE_CALL(crc32)(crc->value, src, len);
memcpy(dst, src, len);
}
Z_INTERNAL void crc32_fold_c(crc32_fold *crc, const uint8_t *src, size_t len, uint32_t init_crc) {
/* Note: while this is basically the same thing as the vanilla CRC function, we still need
* a functable entry for it so that we can generically dispatch to this function with the
* same arguments for the versions that _do_ do a folding CRC but we don't want a copy. The
* init_crc is an unused argument in this context */
Z_UNUSED(init_crc);
crc->value = FUNCTABLE_CALL(crc32)(crc->value, src, len);
}
Z_INTERNAL uint32_t crc32_fold_final_c(crc32_fold *crc) {
return crc->value;
} | c | github | https://github.com/opencv/opencv | 3rdparty/zlib-ng/arch/generic/crc32_fold_c.c |
/**
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.hadoop.fs;
import java.io.*;
import java.util.ArrayList;
import java.util.regex.Pattern;
import org.apache.hadoop.test.GenericTestUtils;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.test.LambdaTestUtils;
import org.apache.hadoop.util.Shell;
import org.junit.jupiter.api.AfterEach;
import org.junit.jupiter.api.BeforeEach;
import org.junit.jupiter.api.Test;
import static org.apache.hadoop.fs.FileContextTestHelper.*;
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertFalse;
import static org.junit.jupiter.api.Assertions.assertNotNull;
import static org.junit.jupiter.api.Assertions.assertTrue;
import static org.junit.jupiter.api.Assertions.fail;
/**
* <p>
* A collection of tests for the {@link FileContext} to test path names passed
* as URIs. This test should be used for testing an instance of FileContext that
* has been initialized to a specific default FileSystem such a LocalFileSystem,
* HDFS,S3, etc, and where path names are passed that are URIs in a different
* FileSystem.
* </p>
*
* <p>
* To test a given {@link FileSystem} implementation create a subclass of this
* test and override {@link #setUp()} to initialize the <code>fc1</code> and
* <code>fc2</code>
*
* The tests will do operations on fc1 that use a URI in fc2
*
* {@link FileContext} instance variable.
* </p>
*/
public abstract class FileContextURIBase {
private static final String basePath =
GenericTestUtils.getTempPath("testContextURI");
private static final Path BASE = new Path(basePath);
// Matches anything containing <, >, :, ", |, ?, *, or anything that ends with
// space or dot.
private static final Pattern WIN_INVALID_FILE_NAME_PATTERN = Pattern.compile(
"^(.*?[<>\\:\"\\|\\?\\*].*?)|(.*?[ \\.])$");
protected FileContext fc1;
protected FileContext fc2;
//Helper method to make path qualified
protected Path qualifiedPath(String path, FileContext fc) {
return fc.makeQualified(new Path(BASE, path));
}
@BeforeEach
public void setUp() throws Exception { }
@AfterEach
public void tearDown() throws Exception {
// Clean up after test completion
// No need to clean fc1 as fc1 and fc2 points same location
if (fc2 != null) {
fc2.delete(BASE, true);
}
}
@Test
public void testCreateFile() throws IOException {
String fileNames[] = {
"testFile", "test File",
"test*File", "test#File",
"test1234", "1234Test",
"test)File", "test_File",
"()&^%$#@!~_+}{><?",
" ", "^ " };
for (String f : fileNames) {
if (!isTestableFileNameOnPlatform(f)) {
continue;
}
// Create a file on fc2's file system using fc1
Path testPath = qualifiedPath(f, fc2);
// Ensure file does not exist
assertFalse(exists(fc2, testPath));
// Now create file
createFile(fc1, testPath);
// Ensure fc2 has the created file
assertTrue(exists(fc2, testPath));
}
}
@Test
public void testCreateFileWithNullName() throws IOException {
String fileName = null;
try {
Path testPath = qualifiedPath(fileName, fc2);
// Ensure file does not exist
assertFalse(exists(fc2, testPath));
// Create a file on fc2's file system using fc1
createFile(fc1, testPath);
fail("Create file with null name should throw IllegalArgumentException.");
} catch (IllegalArgumentException e) {
// expected
}
}
@Test
public void testCreateExistingFile() throws Exception {
String fileName = "testCreateExistingFile";
Path testPath = qualifiedPath(fileName, fc2);
// Ensure file does not exist
assertFalse(exists(fc2, testPath));
// Create a file on fc2's file system using fc1
createFile(fc1, testPath);
// Create same file with fc1
LambdaTestUtils.intercept(IOException.class, () ->
createFile(fc2, testPath));
// Ensure fc2 has the created file
fc2.getFileStatus(testPath);
}
@Test
public void testCreateFileInNonExistingDirectory() throws IOException {
String fileName = "testCreateFileInNonExistingDirectory/testFile";
Path testPath = qualifiedPath(fileName, fc2);
// Ensure file does not exist
assertFalse(exists(fc2, testPath));
// Create a file on fc2's file system using fc1
createFile(fc1, testPath);
// Ensure using fc2 that file is created
assertTrue(isDir(fc2, testPath.getParent()));
assertEquals("testCreateFileInNonExistingDirectory",
testPath.getParent().getName());
fc2.getFileStatus(testPath);
}
@Test
public void testCreateDirectory() throws IOException {
Path path = qualifiedPath("test/hadoop", fc2);
Path falsePath = qualifiedPath("path/doesnot.exist", fc2);
Path subDirPath = qualifiedPath("dir0", fc2);
// Ensure that testPath does not exist in fc1
assertFalse(exists(fc1, path));
assertFalse(isFile(fc1, path));
assertFalse(isDir(fc1, path));
// Create a directory on fc2's file system using fc1
fc1.mkdir(path, FsPermission.getDefault(), true);
// Ensure fc2 has directory
assertTrue(isDir(fc2, path));
assertTrue(exists(fc2, path));
assertFalse(isFile(fc2, path));
// Test to create same dir twice, (HDFS mkdir is similar to mkdir -p )
fc1.mkdir(subDirPath, FsPermission.getDefault(), true);
// This should not throw exception
fc1.mkdir(subDirPath, FsPermission.getDefault(), true);
// Create Sub Dirs
fc1.mkdir(subDirPath, FsPermission.getDefault(), true);
// Check parent dir
Path parentDir = path.getParent();
assertTrue(exists(fc2, parentDir));
assertFalse(isFile(fc2, parentDir));
// Check parent parent dir
Path grandparentDir = parentDir.getParent();
assertTrue(exists(fc2, grandparentDir));
assertFalse(isFile(fc2, grandparentDir));
// Negative test cases
assertFalse(exists(fc2, falsePath));
assertFalse(isDir(fc2, falsePath));
// TestCase - Create multiple directories
String dirNames[] = {
"createTest/testDir", "createTest/test Dir",
"createTest/test*Dir", "createTest/test#Dir",
"createTest/test1234", "createTest/test_DIr",
"createTest/1234Test", "createTest/test)Dir",
"createTest/()&^%$#@!~_+}{><?",
"createTest/ ", "createTest/^ " };
for (String f : dirNames) {
if (!isTestableFileNameOnPlatform(f)) {
continue;
}
// Create a file on fc2's file system using fc1
Path testPath = qualifiedPath(f, fc2);
// Ensure file does not exist
assertFalse(exists(fc2, testPath));
// Now create directory
fc1.mkdir(testPath, FsPermission.getDefault(), true);
// Ensure fc2 has the created directory
assertTrue(exists(fc2, testPath));
assertTrue(isDir(fc2, testPath));
}
// delete the parent directory and verify that the dir no longer exists
final Path parent = qualifiedPath("createTest", fc2);
fc2.delete(parent, true);
assertFalse(exists(fc2, parent));
}
@Test
public void testMkdirsFailsForSubdirectoryOfExistingFile() throws Exception {
Path testDir = qualifiedPath("test/hadoop", fc2);
assertFalse(exists(fc2, testDir));
fc2.mkdir(testDir, FsPermission.getDefault(), true);
assertTrue(exists(fc2, testDir));
// Create file on fc1 using fc2 context
createFile(fc1, qualifiedPath("test/hadoop/file", fc2));
Path testSubDir = qualifiedPath("test/hadoop/file/subdir", fc2);
try {
fc1.mkdir(testSubDir, FsPermission.getDefault(), true);
fail("Should throw IOException.");
} catch (IOException e) {
// expected
}
assertFalse(exists(fc1, testSubDir));
Path testDeepSubDir = qualifiedPath("test/hadoop/file/deep/sub/dir", fc1);
try {
fc2.mkdir(testDeepSubDir, FsPermission.getDefault(), true);
fail("Should throw IOException.");
} catch (IOException e) {
// expected
}
assertFalse(exists(fc1, testDeepSubDir));
}
@Test
public void testIsDirectory() throws IOException {
String dirName = "dirTest";
String invalidDir = "nonExistantDir";
String rootDir = "/";
Path existingPath = qualifiedPath(dirName, fc2);
Path nonExistingPath = qualifiedPath(invalidDir, fc2);
Path pathToRootDir = qualifiedPath(rootDir, fc2);
// Create a directory on fc2's file system using fc1
fc1.mkdir(existingPath, FsPermission.getDefault(), true);
// Ensure fc2 has directory
assertTrue(isDir(fc2, existingPath));
assertTrue(isDir(fc2, pathToRootDir));
// Negative test case
assertFalse(isDir(fc2, nonExistingPath));
}
@Test
public void testDeleteFile() throws IOException {
Path testPath = qualifiedPath("testDeleteFile", fc2);
// Ensure file does not exist
assertFalse(exists(fc2, testPath));
// First create a file on file system using fc1
createFile(fc1, testPath);
// Ensure file exist
assertTrue(exists(fc2, testPath));
// Delete file using fc2
fc2.delete(testPath, false);
// Ensure fc2 does not have deleted file
assertFalse(exists(fc2, testPath));
}
@Test
public void testDeleteNonExistingFile() throws IOException {
String testFileName = "testDeleteNonExistingFile";
Path testPath = qualifiedPath(testFileName, fc2);
// TestCase1 : Test delete on file never existed
// Ensure file does not exist
assertFalse(exists(fc2, testPath));
// Delete on non existing file should return false
assertFalse(fc2.delete(testPath, false));
// TestCase2 : Create , Delete , Delete file
// Create a file on fc2's file system using fc1
createFile(fc1, testPath);
// Ensure file exist
assertTrue(exists(fc2, testPath));
// Delete test file, deleting existing file should return true
assertTrue(fc2.delete(testPath, false));
// Ensure file does not exist
assertFalse(exists(fc2, testPath));
// Delete on non existing file should return false
assertFalse(fc2.delete(testPath, false));
}
@Test
public void testDeleteNonExistingFileInDir() throws IOException {
String testFileInDir = "testDeleteNonExistingFileInDir/testDir/TestFile";
Path testPath = qualifiedPath(testFileInDir, fc2);
// TestCase1 : Test delete on file never existed
// Ensure file does not exist
assertFalse(exists(fc2, testPath));
// Delete on non existing file should return false
assertFalse(fc2.delete(testPath, false));
// TestCase2 : Create , Delete , Delete file
// Create a file on fc2's file system using fc1
createFile(fc1, testPath);
// Ensure file exist
assertTrue(exists(fc2, testPath));
// Delete test file, deleting existing file should return true
assertTrue(fc2.delete(testPath, false));
// Ensure file does not exist
assertFalse(exists(fc2, testPath));
// Delete on non existing file should return false
assertFalse(fc2.delete(testPath, false));
}
@Test
public void testDeleteDirectory() throws IOException {
String dirName = "dirTest";
Path testDirPath = qualifiedPath(dirName, fc2);
// Ensure directory does not exist
assertFalse(exists(fc2, testDirPath));
// Create a directory on fc2's file system using fc1
fc1.mkdir(testDirPath, FsPermission.getDefault(), true);
// Ensure dir is created
assertTrue(exists(fc2, testDirPath));
assertTrue(isDir(fc2, testDirPath));
fc2.delete(testDirPath, true);
// Ensure that directory is deleted
assertFalse(isDir(fc2, testDirPath));
// TestCase - Create and delete multiple directories
String dirNames[] = {
"deleteTest/testDir", "deleteTest/test Dir",
"deleteTest/test*Dir", "deleteTest/test#Dir",
"deleteTest/test1234", "deleteTest/1234Test",
"deleteTest/test)Dir", "deleteTest/test_DIr",
"deleteTest/()&^%$#@!~_+}{><?",
"deleteTest/ ",
"deleteTest/^ " };
for (String f : dirNames) {
if (!isTestableFileNameOnPlatform(f)) {
continue;
}
// Create a file on fc2's file system using fc1
Path testPath = qualifiedPath(f, fc2);
// Ensure file does not exist
assertFalse(exists(fc2, testPath));
// Now create directory
fc1.mkdir(testPath, FsPermission.getDefault(), true);
// Ensure fc2 has the created directory
assertTrue(exists(fc2, testPath));
assertTrue(isDir(fc2, testPath));
// Delete dir
assertTrue(fc2.delete(testPath, true));
// verify if directory is deleted
assertFalse(exists(fc2, testPath));
assertFalse(isDir(fc2, testPath));
}
}
@Test
public void testDeleteNonExistingDirectory() throws IOException {
String testDirName = "testDeleteNonExistingDirectory";
Path testPath = qualifiedPath(testDirName, fc2);
// TestCase1 : Test delete on directory never existed
// Ensure directory does not exist
assertFalse(exists(fc2, testPath));
// Delete on non existing directory should return false
assertFalse(fc2.delete(testPath, false));
// TestCase2 : Create dir, Delete dir, Delete dir
// Create a file on fc2's file system using fc1
fc1.mkdir(testPath, FsPermission.getDefault(), true);
// Ensure dir exist
assertTrue(exists(fc2, testPath));
// Delete test file, deleting existing file should return true
assertTrue(fc2.delete(testPath, false));
// Ensure file does not exist
assertFalse(exists(fc2, testPath));
// Delete on non existing file should return false
assertFalse(fc2.delete(testPath, false));
}
@Test
public void testModificationTime() throws IOException {
String testFile = "testModificationTime";
long fc2ModificationTime, fc1ModificationTime;
Path testPath = qualifiedPath(testFile, fc2);
// Create a file on fc2's file system using fc1
createFile(fc1, testPath);
// Get modification time using fc2 and fc1
fc1ModificationTime = fc1.getFileStatus(testPath).getModificationTime();
fc2ModificationTime = fc2.getFileStatus(testPath).getModificationTime();
// Ensure fc1 and fc2 reports same modification time
assertEquals(fc1ModificationTime, fc2ModificationTime);
}
@Test
public void testFileStatus() throws IOException {
String fileName = "testModificationTime";
Path path2 = fc2.makeQualified(new Path(BASE, fileName));
// Create a file on fc2's file system using fc1
createFile(fc1, path2);
FsStatus fc2Status = fc2.getFsStatus(path2);
// FsStatus , used, free and capacity are non-negative longs
assertNotNull(fc2Status);
assertTrue(fc2Status.getCapacity() > 0);
assertTrue(fc2Status.getRemaining() > 0);
assertTrue(fc2Status.getUsed() > 0);
}
@Test
public void testGetFileStatusThrowsExceptionForNonExistentFile()
throws Exception {
String testFile = "test/hadoop/fileDoesNotExist";
Path testPath = qualifiedPath(testFile, fc2);
try {
fc1.getFileStatus(testPath);
fail("Should throw FileNotFoundException");
} catch (FileNotFoundException e) {
// expected
}
}
@Test
public void testListStatusThrowsExceptionForNonExistentFile()
throws Exception {
String testFile = "test/hadoop/file";
Path testPath = qualifiedPath(testFile, fc2);
try {
fc1.listStatus(testPath);
fail("Should throw FileNotFoundException");
} catch (FileNotFoundException fnfe) {
// expected
}
}
@Test
public void testListStatus() throws Exception {
final String hPrefix = "test/hadoop";
final String[] dirs = {
hPrefix + "/a",
hPrefix + "/b",
hPrefix + "/c",
hPrefix + "/1",
hPrefix + "/#@#@",
hPrefix + "/&*#$#$@234"};
ArrayList<Path> testDirs = new ArrayList<Path>();
for (String d : dirs) {
if (!isTestableFileNameOnPlatform(d)) {
continue;
}
testDirs.add(qualifiedPath(d, fc2));
}
assertFalse(exists(fc1, testDirs.get(0)));
for (Path path : testDirs) {
fc1.mkdir(path, FsPermission.getDefault(), true);
}
// test listStatus that returns an array of FileStatus
FileStatus[] paths = fc1.util().listStatus(qualifiedPath("test", fc1));
assertEquals(1, paths.length);
assertEquals(qualifiedPath(hPrefix, fc1), paths[0].getPath());
paths = fc1.util().listStatus(qualifiedPath(hPrefix, fc1));
assertEquals(testDirs.size(), paths.length);
for (int i = 0; i < testDirs.size(); i++) {
boolean found = false;
for (int j = 0; j < paths.length; j++) {
if (qualifiedPath(testDirs.get(i).toString(), fc1).equals(
paths[j].getPath())) {
found = true;
}
}
assertTrue(found, testDirs.get(i) + " not found");
}
paths = fc1.util().listStatus(qualifiedPath(dirs[0], fc1));
assertEquals(0, paths.length);
// test listStatus that returns an iterator of FileStatus
RemoteIterator<FileStatus> pathsItor =
fc1.listStatus(qualifiedPath("test", fc1));
assertEquals(qualifiedPath(hPrefix, fc1), pathsItor.next().getPath());
assertFalse(pathsItor.hasNext());
pathsItor = fc1.listStatus(qualifiedPath(hPrefix, fc1));
int dirLen = 0;
for (; pathsItor.hasNext(); dirLen++) {
boolean found = false;
FileStatus stat = pathsItor.next();
for (int j = 0; j < dirs.length; j++) {
if (qualifiedPath(dirs[j],fc1).equals(stat.getPath())) {
found = true;
break;
}
}
assertTrue(found, stat.getPath() + " not found");
}
assertEquals(testDirs.size(), dirLen);
pathsItor = fc1.listStatus(qualifiedPath(dirs[0], fc1));
assertFalse(pathsItor.hasNext());
}
/**
* Returns true if the argument is a file name that is testable on the platform
* currently running the test. This is intended for use by tests so that they
* can skip checking file names that aren't supported by the underlying
* platform. The current implementation specifically checks for patterns that
* are not valid file names on Windows when the tests are running on Windows.
*
* @param fileName String file name to check
* @return boolean true if the argument is valid as a file name
*/
private static boolean isTestableFileNameOnPlatform(String fileName) {
boolean valid = true;
if (Shell.WINDOWS) {
// Disallow reserved characters: <, >, :, ", |, ?, *.
// Disallow trailing space or period.
// See http://msdn.microsoft.com/en-us/library/windows/desktop/aa365247(v=vs.85).aspx
valid = !WIN_INVALID_FILE_NAME_PATTERN.matcher(fileName).matches();
}
return valid;
}
} | java | github | https://github.com/apache/hadoop | hadoop-common-project/hadoop-common/src/test/java/org/apache/hadoop/fs/FileContextURIBase.java |
# -*- coding: utf-8 -*-
# This file is part of Casia - CAS server based on Django
# Copyright (C) 2013 Mateusz Małek
# Casia is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
# You should have received a copy of the GNU Affero General Public License
# along with Casia. If not, see <http://www.gnu.org/licenses/>.
from django.conf.urls import patterns, url
urlpatterns = patterns('',
url(r'^validate$', 'casia.cas.views.validate', name='cas_validate'),
url(r'^serviceValidate$', 'casia.cas.views.service_validate',
name='cas_service_validate'),
url(r'^login$', 'casia.cas.views.login', name='cas_login'),
url(r'^issue/(?P<ticket_request_id>.*?)$', 'casia.cas.views.issue',
name='cas_issue'),
url(r'^logout$', 'casia.webapp.views.logout', name='cas_logout'),
url(r'^proxyValidate$', 'casia.cas.views.service_validate',
{'require_st': False}, name='cas_proxy_validate'),
url(r'^proxy$', 'casia.cas.views.proxy', name='cas_proxy'),
) | unknown | codeparrot/codeparrot-clean | ||
// Copyright (c) HashiCorp, Inc.
// SPDX-License-Identifier: BUSL-1.1
package blocktoattr
import (
"testing"
"github.com/hashicorp/hcl/v2"
"github.com/hashicorp/hcl/v2/ext/dynblock"
"github.com/hashicorp/hcl/v2/hcldec"
"github.com/hashicorp/hcl/v2/hclsyntax"
hcljson "github.com/hashicorp/hcl/v2/json"
"github.com/hashicorp/terraform/internal/configs/configschema"
"github.com/zclconf/go-cty/cty"
)
func TestFixUpBlockAttrs(t *testing.T) {
fooSchema := &configschema.Block{
Attributes: map[string]*configschema.Attribute{
"foo": {
Type: cty.List(cty.Object(map[string]cty.Type{
"bar": cty.String,
})),
Optional: true,
},
},
}
tests := map[string]struct {
src string
json bool
schema *configschema.Block
want cty.Value
wantErrs bool
}{
"empty": {
src: ``,
schema: &configschema.Block{},
want: cty.EmptyObjectVal,
},
"empty JSON": {
src: `{}`,
json: true,
schema: &configschema.Block{},
want: cty.EmptyObjectVal,
},
"unset": {
src: ``,
schema: fooSchema,
want: cty.ObjectVal(map[string]cty.Value{
"foo": cty.NullVal(fooSchema.Attributes["foo"].Type),
}),
},
"unset JSON": {
src: `{}`,
json: true,
schema: fooSchema,
want: cty.ObjectVal(map[string]cty.Value{
"foo": cty.NullVal(fooSchema.Attributes["foo"].Type),
}),
},
"no fixup required, with one value": {
src: `
foo = [
{
bar = "baz"
},
]
`,
schema: fooSchema,
want: cty.ObjectVal(map[string]cty.Value{
"foo": cty.ListVal([]cty.Value{
cty.ObjectVal(map[string]cty.Value{
"bar": cty.StringVal("baz"),
}),
}),
}),
},
"no fixup required, with two values": {
src: `
foo = [
{
bar = "baz"
},
{
bar = "boop"
},
]
`,
schema: fooSchema,
want: cty.ObjectVal(map[string]cty.Value{
"foo": cty.ListVal([]cty.Value{
cty.ObjectVal(map[string]cty.Value{
"bar": cty.StringVal("baz"),
}),
cty.ObjectVal(map[string]cty.Value{
"bar": cty.StringVal("boop"),
}),
}),
}),
},
"no fixup required, with values, JSON": {
src: `{"foo": [{"bar": "baz"}]}`,
json: true,
schema: fooSchema,
want: cty.ObjectVal(map[string]cty.Value{
"foo": cty.ListVal([]cty.Value{
cty.ObjectVal(map[string]cty.Value{
"bar": cty.StringVal("baz"),
}),
}),
}),
},
"no fixup required, empty": {
src: `
foo = []
`,
schema: fooSchema,
want: cty.ObjectVal(map[string]cty.Value{
"foo": cty.ListValEmpty(fooSchema.Attributes["foo"].Type.ElementType()),
}),
},
"no fixup required, empty, JSON": {
src: `{"foo":[]}`,
json: true,
schema: fooSchema,
want: cty.ObjectVal(map[string]cty.Value{
"foo": cty.ListValEmpty(fooSchema.Attributes["foo"].Type.ElementType()),
}),
},
"fixup one block": {
src: `
foo {
bar = "baz"
}
`,
schema: fooSchema,
want: cty.ObjectVal(map[string]cty.Value{
"foo": cty.ListVal([]cty.Value{
cty.ObjectVal(map[string]cty.Value{
"bar": cty.StringVal("baz"),
}),
}),
}),
},
"fixup one block omitting attribute": {
src: `
foo {}
`,
schema: fooSchema,
want: cty.ObjectVal(map[string]cty.Value{
"foo": cty.ListVal([]cty.Value{
cty.ObjectVal(map[string]cty.Value{
"bar": cty.NullVal(cty.String),
}),
}),
}),
},
"fixup two blocks": {
src: `
foo {
bar = baz
}
foo {
bar = "boop"
}
`,
schema: fooSchema,
want: cty.ObjectVal(map[string]cty.Value{
"foo": cty.ListVal([]cty.Value{
cty.ObjectVal(map[string]cty.Value{
"bar": cty.StringVal("baz value"),
}),
cty.ObjectVal(map[string]cty.Value{
"bar": cty.StringVal("boop"),
}),
}),
}),
},
"interaction with dynamic block generation": {
src: `
dynamic "foo" {
for_each = ["baz", beep]
content {
bar = foo.value
}
}
`,
schema: fooSchema,
want: cty.ObjectVal(map[string]cty.Value{
"foo": cty.ListVal([]cty.Value{
cty.ObjectVal(map[string]cty.Value{
"bar": cty.StringVal("baz"),
}),
cty.ObjectVal(map[string]cty.Value{
"bar": cty.StringVal("beep value"),
}),
}),
}),
},
"dynamic block with empty iterator": {
src: `
dynamic "foo" {
for_each = []
content {
bar = foo.value
}
}
`,
schema: fooSchema,
want: cty.ObjectVal(map[string]cty.Value{
"foo": cty.NullVal(fooSchema.Attributes["foo"].Type),
}),
},
"both attribute and block syntax": {
src: `
foo = []
foo {
bar = "baz"
}
`,
schema: fooSchema,
wantErrs: true, // Unsupported block type (user must be consistent about whether they consider foo to be a block type or an attribute)
want: cty.ObjectVal(map[string]cty.Value{
"foo": cty.ListVal([]cty.Value{
cty.ObjectVal(map[string]cty.Value{
"bar": cty.StringVal("baz"),
}),
cty.ObjectVal(map[string]cty.Value{
"bar": cty.StringVal("boop"),
}),
}),
}),
},
"fixup inside block": {
src: `
container {
foo {
bar = "baz"
}
foo {
bar = "boop"
}
}
container {
foo {
bar = beep
}
}
`,
schema: &configschema.Block{
BlockTypes: map[string]*configschema.NestedBlock{
"container": {
Nesting: configschema.NestingList,
Block: *fooSchema,
},
},
},
want: cty.ObjectVal(map[string]cty.Value{
"container": cty.ListVal([]cty.Value{
cty.ObjectVal(map[string]cty.Value{
"foo": cty.ListVal([]cty.Value{
cty.ObjectVal(map[string]cty.Value{
"bar": cty.StringVal("baz"),
}),
cty.ObjectVal(map[string]cty.Value{
"bar": cty.StringVal("boop"),
}),
}),
}),
cty.ObjectVal(map[string]cty.Value{
"foo": cty.ListVal([]cty.Value{
cty.ObjectVal(map[string]cty.Value{
"bar": cty.StringVal("beep value"),
}),
}),
}),
}),
}),
},
"fixup inside attribute-as-block": {
src: `
container {
foo {
bar = "baz"
}
foo {
bar = "boop"
}
}
container {
foo {
bar = beep
}
}
`,
schema: &configschema.Block{
Attributes: map[string]*configschema.Attribute{
"container": {
Type: cty.List(cty.Object(map[string]cty.Type{
"foo": cty.List(cty.Object(map[string]cty.Type{
"bar": cty.String,
})),
})),
Optional: true,
},
},
},
want: cty.ObjectVal(map[string]cty.Value{
"container": cty.ListVal([]cty.Value{
cty.ObjectVal(map[string]cty.Value{
"foo": cty.ListVal([]cty.Value{
cty.ObjectVal(map[string]cty.Value{
"bar": cty.StringVal("baz"),
}),
cty.ObjectVal(map[string]cty.Value{
"bar": cty.StringVal("boop"),
}),
}),
}),
cty.ObjectVal(map[string]cty.Value{
"foo": cty.ListVal([]cty.Value{
cty.ObjectVal(map[string]cty.Value{
"bar": cty.StringVal("beep value"),
}),
}),
}),
}),
}),
},
"nested fixup with dynamic block generation": {
src: `
container {
dynamic "foo" {
for_each = ["baz", beep]
content {
bar = foo.value
}
}
}
`,
schema: &configschema.Block{
BlockTypes: map[string]*configschema.NestedBlock{
"container": {
Nesting: configschema.NestingList,
Block: *fooSchema,
},
},
},
want: cty.ObjectVal(map[string]cty.Value{
"container": cty.ListVal([]cty.Value{
cty.ObjectVal(map[string]cty.Value{
"foo": cty.ListVal([]cty.Value{
cty.ObjectVal(map[string]cty.Value{
"bar": cty.StringVal("baz"),
}),
cty.ObjectVal(map[string]cty.Value{
"bar": cty.StringVal("beep value"),
}),
}),
}),
}),
}),
},
"missing nested block items": {
src: `
container {
foo {
bar = "one"
}
}
`,
schema: &configschema.Block{
BlockTypes: map[string]*configschema.NestedBlock{
"container": {
Nesting: configschema.NestingList,
MinItems: 2,
Block: configschema.Block{
Attributes: map[string]*configschema.Attribute{
"foo": {
Type: cty.List(cty.Object(map[string]cty.Type{
"bar": cty.String,
})),
Optional: true,
},
},
},
},
},
},
want: cty.ObjectVal(map[string]cty.Value{
"container": cty.ListVal([]cty.Value{
cty.ObjectVal(map[string]cty.Value{
"foo": cty.ListVal([]cty.Value{
cty.ObjectVal(map[string]cty.Value{
"bar": cty.StringVal("baz"),
}),
}),
}),
}),
}),
wantErrs: true,
},
"no fixup allowed with NestedType": {
src: `
container {
foo = "one"
}
`,
schema: &configschema.Block{
Attributes: map[string]*configschema.Attribute{
"container": {
NestedType: &configschema.Object{
Nesting: configschema.NestingList,
Attributes: map[string]*configschema.Attribute{
"foo": {
Type: cty.String,
},
},
},
},
},
},
want: cty.ObjectVal(map[string]cty.Value{
"container": cty.NullVal(cty.List(
cty.Object(map[string]cty.Type{
"foo": cty.String,
}),
)),
}),
wantErrs: true,
},
"no fixup allowed new types": {
src: `
container {
foo = "one"
}
`,
schema: &configschema.Block{
Attributes: map[string]*configschema.Attribute{
// This could be a ConfigModeAttr fixup
"container": {
Type: cty.List(cty.Object(map[string]cty.Type{
"foo": cty.String,
})),
},
// But the presence of this type means it must have been
// declared by a new SDK
"new_type": {
Type: cty.Object(map[string]cty.Type{
"boo": cty.String,
}),
},
},
},
want: cty.ObjectVal(map[string]cty.Value{
"container": cty.NullVal(cty.List(
cty.Object(map[string]cty.Type{
"foo": cty.String,
}),
)),
}),
wantErrs: true,
},
}
ctx := &hcl.EvalContext{
Variables: map[string]cty.Value{
"bar": cty.StringVal("bar value"),
"baz": cty.StringVal("baz value"),
"beep": cty.StringVal("beep value"),
},
}
for name, test := range tests {
t.Run(name, func(t *testing.T) {
var f *hcl.File
var diags hcl.Diagnostics
if test.json {
f, diags = hcljson.Parse([]byte(test.src), "test.tf.json")
} else {
f, diags = hclsyntax.ParseConfig([]byte(test.src), "test.tf", hcl.Pos{Line: 1, Column: 1})
}
if diags.HasErrors() {
for _, diag := range diags {
t.Errorf("unexpected diagnostic: %s", diag)
}
t.FailNow()
}
// We'll expand dynamic blocks in the body first, to mimic how
// we process this fixup when using the main "lang" package API.
spec := test.schema.DecoderSpec()
body := dynblock.Expand(f.Body, ctx)
body = FixUpBlockAttrs(body, test.schema)
got, diags := hcldec.Decode(body, spec, ctx)
if test.wantErrs {
if !diags.HasErrors() {
t.Errorf("succeeded, but want error\ngot: %#v", got)
}
// check that our wrapped body returns the correct context by
// verifying the Subject is valid.
for _, d := range diags {
if d.Subject.Filename == "" {
t.Errorf("empty diagnostic subject: %#v", d.Subject)
}
}
return
}
if !test.want.RawEquals(got) {
t.Errorf("wrong result\ngot: %#v\nwant: %#v", got, test.want)
}
for _, diag := range diags {
t.Errorf("unexpected diagnostic: %s", diag)
}
})
}
} | go | github | https://github.com/hashicorp/terraform | internal/lang/blocktoattr/fixup_test.go |
import WebIDL
def WebIDLTest(parser, harness):
parser.parse("""
interface TestIncompleteTypes {
attribute FooInterface attr1;
FooInterface method1(FooInterface arg);
};
interface FooInterface {
};
""")
results = parser.finish()
harness.ok(True, "TestIncompleteTypes interface parsed without error.")
harness.check(len(results), 2, "Should be two productions.")
iface = results[0]
harness.ok(isinstance(iface, WebIDL.IDLInterface),
"Should be an IDLInterface")
harness.check(iface.identifier.QName(), "::TestIncompleteTypes", "Interface has the right QName")
harness.check(iface.identifier.name, "TestIncompleteTypes", "Interface has the right name")
harness.check(len(iface.members), 2, "Expect 2 members")
attr = iface.members[0]
harness.ok(isinstance(attr, WebIDL.IDLAttribute),
"Should be an IDLAttribute")
method = iface.members[1]
harness.ok(isinstance(method, WebIDL.IDLMethod),
"Should be an IDLMethod")
harness.check(attr.identifier.QName(), "::TestIncompleteTypes::attr1",
"Attribute has the right QName")
harness.check(attr.type.name, "FooInterface",
"Previously unresolved type has the right name")
harness.check(method.identifier.QName(), "::TestIncompleteTypes::method1",
"Attribute has the right QName")
(returnType, args) = method.signatures()[0]
harness.check(returnType.name, "FooInterface",
"Previously unresolved type has the right name")
harness.check(args[0].type.name, "FooInterface",
"Previously unresolved type has the right name") | unknown | codeparrot/codeparrot-clean | ||
/*
* Copyright 2002-present the original author or authors.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.springframework.cache.config;
import java.util.ArrayList;
import java.util.Collection;
import java.util.List;
import org.junit.jupiter.api.Test;
import org.springframework.beans.factory.NoSuchBeanDefinitionException;
import org.springframework.beans.factory.NoUniqueBeanDefinitionException;
import org.springframework.cache.CacheManager;
import org.springframework.cache.annotation.Cacheable;
import org.springframework.cache.annotation.CachingConfigurer;
import org.springframework.cache.annotation.EnableCaching;
import org.springframework.cache.interceptor.CacheErrorHandler;
import org.springframework.cache.interceptor.CacheInterceptor;
import org.springframework.cache.interceptor.CacheResolver;
import org.springframework.cache.interceptor.KeyGenerator;
import org.springframework.cache.interceptor.NamedCacheResolver;
import org.springframework.cache.interceptor.SimpleCacheErrorHandler;
import org.springframework.cache.interceptor.SimpleCacheResolver;
import org.springframework.cache.support.NoOpCacheManager;
import org.springframework.context.ConfigurableApplicationContext;
import org.springframework.context.annotation.AnnotationConfigApplicationContext;
import org.springframework.context.annotation.Bean;
import org.springframework.context.annotation.Configuration;
import org.springframework.context.testfixture.cache.AbstractCacheAnnotationTests;
import org.springframework.context.testfixture.cache.CacheTestUtils;
import org.springframework.context.testfixture.cache.SomeCustomKeyGenerator;
import org.springframework.context.testfixture.cache.SomeKeyGenerator;
import org.springframework.context.testfixture.cache.beans.AnnotatedClassCacheableService;
import org.springframework.context.testfixture.cache.beans.CacheableService;
import org.springframework.context.testfixture.cache.beans.DefaultCacheableService;
import static org.assertj.core.api.Assertions.assertThat;
import static org.assertj.core.api.Assertions.assertThatCode;
import static org.assertj.core.api.Assertions.assertThatThrownBy;
/**
* Integration tests for {@code @EnableCaching} and its related
* {@code @Configuration} classes.
*
* @author Chris Beams
* @author Stephane Nicoll
*/
class EnableCachingTests extends AbstractCacheAnnotationTests {
/** hook into superclass suite of tests */
@Override
protected ConfigurableApplicationContext getApplicationContext() {
return new AnnotationConfigApplicationContext(EnableCachingConfig.class);
}
@Test
void keyStrategy() {
CacheInterceptor ci = this.ctx.getBean(CacheInterceptor.class);
assertThat(ci.getKeyGenerator()).isSameAs(this.ctx.getBean("keyGenerator", KeyGenerator.class));
}
@Test
void cacheErrorHandler() {
CacheInterceptor ci = this.ctx.getBean(CacheInterceptor.class);
assertThat(ci.getErrorHandler()).isSameAs(this.ctx.getBean("errorHandler", CacheErrorHandler.class));
}
@Test
void singleCacheManagerBean() {
AnnotationConfigApplicationContext ctx = new AnnotationConfigApplicationContext();
ctx.register(SingleCacheManagerConfig.class);
assertThatCode(ctx::refresh).doesNotThrowAnyException();
ctx.close();
}
@Test
void multipleCacheManagerBeans() {
AnnotationConfigApplicationContext ctx = new AnnotationConfigApplicationContext();
ctx.register(MultiCacheManagerConfig.class);
assertThatThrownBy(ctx::refresh)
.isInstanceOfSatisfying(NoUniqueBeanDefinitionException.class, ex -> {
assertThat(ex.getMessage()).contains(
"no CacheResolver specified and expected single matching CacheManager but found 2")
.contains("cm1", "cm2");
assertThat(ex.getNumberOfBeansFound()).isEqualTo(2);
assertThat(ex.getBeanNamesFound()).containsExactlyInAnyOrder("cm1", "cm2");
}).hasNoCause();
}
@Test
void multipleCacheManagerBeans_implementsCachingConfigurer() {
AnnotationConfigApplicationContext ctx = new AnnotationConfigApplicationContext();
ctx.register(MultiCacheManagerConfigurer.class);
assertThatCode(ctx::refresh).doesNotThrowAnyException();
ctx.close();
}
@Test
void multipleCachingConfigurers() {
@SuppressWarnings("resource")
AnnotationConfigApplicationContext ctx = new AnnotationConfigApplicationContext();
ctx.register(MultiCacheManagerConfigurer.class, EnableCachingConfig.class);
assertThatThrownBy(ctx::refresh)
.hasMessageContaining("implementations of CachingConfigurer");
}
@Test
void noCacheManagerBeans() {
AnnotationConfigApplicationContext ctx = new AnnotationConfigApplicationContext();
ctx.register(EmptyConfig.class);
assertThatThrownBy(ctx::refresh)
.isInstanceOf(NoSuchBeanDefinitionException.class)
.hasMessageContaining("no CacheResolver specified")
.hasMessageContaining(
"register a CacheManager bean or remove the @EnableCaching annotation from your configuration.")
.hasNoCause();
}
@Test
void emptyConfigSupport() {
ConfigurableApplicationContext context = new AnnotationConfigApplicationContext(EmptyConfigSupportConfig.class);
CacheInterceptor ci = context.getBean(CacheInterceptor.class);
assertThat(ci.getCacheResolver()).isInstanceOfSatisfying(SimpleCacheResolver.class, cacheResolver ->
assertThat(cacheResolver.getCacheManager()).isSameAs(context.getBean(CacheManager.class)));
context.close();
}
@Test
void bothSetOnlyResolverIsUsed() {
ConfigurableApplicationContext context = new AnnotationConfigApplicationContext(FullCachingConfig.class);
CacheInterceptor ci = context.getBean(CacheInterceptor.class);
assertThat(ci.getCacheResolver()).isSameAs(context.getBean("cacheResolver"));
assertThat(ci.getKeyGenerator()).isSameAs(context.getBean("keyGenerator"));
context.close();
}
@Test
void mutableKey() {
AnnotationConfigApplicationContext ctx = new AnnotationConfigApplicationContext();
ctx.register(EnableCachingConfig.class, ServiceWithMutableKey.class);
ctx.refresh();
ServiceWithMutableKey service = ctx.getBean(ServiceWithMutableKey.class);
String result = service.find(new ArrayList<>(List.of("id")));
assertThat(service.find(new ArrayList<>(List.of("id")))).isSameAs(result);
ctx.close();
}
@Configuration
@EnableCaching
static class EnableCachingConfig implements CachingConfigurer {
@Override
@Bean
public CacheManager cacheManager() {
return CacheTestUtils.createSimpleCacheManager("testCache", "primary", "secondary");
}
@Bean
public CacheableService<?> service() {
return new DefaultCacheableService();
}
@Bean
public CacheableService<?> classService() {
return new AnnotatedClassCacheableService();
}
@Override
@Bean
public KeyGenerator keyGenerator() {
return new SomeKeyGenerator();
}
@Override
@Bean
public CacheErrorHandler errorHandler() {
return new SimpleCacheErrorHandler();
}
@Bean
public KeyGenerator customKeyGenerator() {
return new SomeCustomKeyGenerator();
}
@Bean
public CacheManager customCacheManager() {
return CacheTestUtils.createSimpleCacheManager("testCache");
}
}
@Configuration
@EnableCaching
static class EmptyConfig {
}
@Configuration
@EnableCaching
static class SingleCacheManagerConfig {
@Bean
public CacheManager cm1() {
return new NoOpCacheManager();
}
}
@Configuration
@EnableCaching
static class MultiCacheManagerConfig {
@Bean
public CacheManager cm1() {
return new NoOpCacheManager();
}
@Bean
public CacheManager cm2() {
return new NoOpCacheManager();
}
}
@Configuration
@EnableCaching
static class MultiCacheManagerConfigurer implements CachingConfigurer {
@Bean
public CacheManager cm1() {
return new NoOpCacheManager();
}
@Bean
public CacheManager cm2() {
return new NoOpCacheManager();
}
@Override
public CacheManager cacheManager() {
return cm1();
}
@Override
public KeyGenerator keyGenerator() {
return null;
}
}
@Configuration
@EnableCaching
static class EmptyConfigSupportConfig implements CachingConfigurer {
@Bean
public CacheManager cm() {
return new NoOpCacheManager();
}
}
@Configuration
@EnableCaching
static class FullCachingConfig implements CachingConfigurer {
@Override
@Bean
public CacheManager cacheManager() {
return new NoOpCacheManager();
}
@Override
@Bean
public KeyGenerator keyGenerator() {
return new SomeKeyGenerator();
}
@Override
@Bean
public CacheResolver cacheResolver() {
return new NamedCacheResolver(cacheManager(), "foo");
}
}
static class ServiceWithMutableKey {
@Cacheable(value = "testCache", keyGenerator = "customKeyGenerator")
public String find(Collection<String> id) {
id.add("other");
return id.toString();
}
}
} | java | github | https://github.com/spring-projects/spring-framework | spring-context/src/test/java/org/springframework/cache/config/EnableCachingTests.java |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.