repo_name stringlengths 5 100 | path stringlengths 4 375 | copies stringclasses 991 values | size stringlengths 4 7 | content stringlengths 666 1M | license stringclasses 15 values |
|---|---|---|---|---|---|
felinx/djinn | djinn/errors.py | 2 | 3285 | # -*- coding: utf-8 -*-
#
# Copyright(c) 2014 palmhold.com
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from six import PY2
from tornado import escape
from tornado.web import HTTPError
# HTTP status code
HTTP_OK = 200
ERROR_BAD_REQUEST = 400
ERROR_UNAUTHORIZED = 401
ERROR_FORBIDDEN = 403
ERROR_NOT_FOUND = 404
ERROR_METHOD_NOT_ALLOWED = 405
ERROR_INTERNAL_SERVER_ERROR = 500
# Custom error code
ERROR_WARNING = 1001
ERROR_DEPRECATED = 1002
ERROR_MAINTAINING = 1003
ERROR_UNKNOWN_ERROR = 9999
# default errors
_unknown_error = "unknow_error"
_unknown_message = "Unknown error"
_error_types = {400: "bad_request",
401: "unauthorized",
403: "forbidden",
404: "not_found",
405: "method_not_allowed",
500: "internal_server_error",
1001: "warning",
1002: "deprecated",
1003: "maintaining",
9999: _unknown_error}
ERROR_MESSAGES = {400: "Bad request",
401: "Unauthorized",
403: "Forbidden",
404: "Not found",
405: "Method not allowed",
500: "Internal server error",
1001: "Warning",
1002: "Deprecated",
1003: "Maintaining",
9999: _unknown_message}
class DjinnError(Exception):
pass
class DatastoreError(DjinnError):
pass
class TemplateContextError(DjinnError):
"""Template context variable does not exist."""
pass
class HTTPAPIError(HTTPError):
"""API error handling exception
API server always returns formatted JSON to client even there is
an internal server error.
"""
def __init__(self, status_code=ERROR_UNKNOWN_ERROR, message=None,
error=None, data=None, *args, **kwargs):
assert isinstance(data, dict) or data is None
message = message if message else ""
if PY2:
assert isinstance(message, basestring)
else:
assert isinstance(message, (str, bytes))
super(HTTPAPIError, self).__init__(int(status_code),
log_message=message, *args, **kwargs)
self.error = error if error else \
_error_types.get(self.status_code, _unknown_error)
self.message = message if message else \
ERROR_MESSAGES.get(self.status_code, _unknown_message)
self.data = data if data is not None else {}
def __str__(self):
err = {"meta": {"code": self.status_code, "error": self.error}}
if self.data:
err["data"] = self.data
if self.message:
err["meta"]["message"] = self.message % self.args
return escape.json_encode(err)
| apache-2.0 |
adamjmcgrath/react-native | JSCLegacyProfiler/trace_data.py | 375 | 8013 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import re
import unittest
"""
# _-----=> irqs-off
# / _----=> need-resched
# | / _---=> hardirq/softirq
# || / _--=> preempt-depth
# ||| / delay
# TASK-PID CPU# |||| TIMESTAMP FUNCTION
# | | | |||| | |
<idle>-0 [001] ...2 3269.291072: sched_switch: prev_comm=swapper/1 prev_pid=0 prev_prio=120 prev_state=R ==> next_comm=mmcqd/0 next_pid=120 next_prio=120
"""
TRACE_LINE_PATTERN = re.compile(
r'^\s*(?P<task>.+)-(?P<pid>\d+)\s+(?:\((?P<tgid>.+)\)\s+)?\[(?P<cpu>\d+)\]\s+(?:(?P<flags>\S{4})\s+)?(?P<timestamp>[0-9.]+):\s+(?P<function>.+)$')
"""
Example lines from custom app traces:
0: B|27295|providerRemove
0: E
tracing_mark_write: S|27311|NNFColdStart<D-7744962>|1112249168
"""
APP_TRACE_LINE_PATTERN = re.compile(
r'^(?P<type>.+?): (?P<args>.+)$')
"""
Example section names:
NNFColdStart
NNFColdStart<0><T7744962>
NNFColdStart<X>
NNFColdStart<T7744962>
"""
DECORATED_SECTION_NAME_PATTERN = re.compile(r'^(?P<section_name>.*?)(?:<0>)?(?:<(?P<command>.)(?P<argument>.*?)>)?$')
SYSTRACE_LINE_TYPES = set(['0', 'tracing_mark_write'])
class TraceLine(object):
def __init__(self, task, pid, tgid, cpu, flags, timestamp, function):
self.task = task
self.pid = pid
self.tgid = tgid
self.cpu = cpu
self.flags = flags
self.timestamp = timestamp
self.function = function
self.canceled = False
@property
def is_app_trace_line(self):
return isinstance(self.function, AppTraceFunction)
def cancel(self):
self.canceled = True
def __str__(self):
if self.canceled:
return ""
elif self.tgid:
return "{task:>16s}-{pid:<5d} ({tgid:5s}) [{cpu:03d}] {flags:4s} {timestamp:12f}: {function}\n".format(**vars(self))
elif self.flags:
return "{task:>16s}-{pid:<5d} [{cpu:03d}] {flags:4s} {timestamp:12f}: {function}\n".format(**vars(self))
else:
return "{task:>16s}-{pid:<5d} [{cpu:03d}] {timestamp:12.6f}: {function}\n".format(**vars(self))
class AppTraceFunction(object):
def __init__(self, type, args):
self.type = type
self.args = args
self.operation = args[0]
if len(args) >= 2 and args[1]:
self.pid = int(args[1])
if len(args) >= 3:
self._section_name, self.command, self.argument = _parse_section_name(args[2])
args[2] = self._section_name
else:
self._section_name = None
self.command = None
self.argument = None
self.cookie = None
@property
def section_name(self):
return self._section_name
@section_name.setter
def section_name(self, value):
self._section_name = value
self.args[2] = value
def __str__(self):
return "{type}: {args}".format(type=self.type, args='|'.join(self.args))
class AsyncTraceFunction(AppTraceFunction):
def __init__(self, type, args):
super(AsyncTraceFunction, self).__init__(type, args)
self.cookie = int(args[3])
TRACE_TYPE_MAP = {
'S': AsyncTraceFunction,
'T': AsyncTraceFunction,
'F': AsyncTraceFunction,
}
def parse_line(line):
match = TRACE_LINE_PATTERN.match(line.strip())
if not match:
return None
task = match.group("task")
pid = int(match.group("pid"))
tgid = match.group("tgid")
cpu = int(match.group("cpu"))
flags = match.group("flags")
timestamp = float(match.group("timestamp"))
function = match.group("function")
app_trace = _parse_function(function)
if app_trace:
function = app_trace
return TraceLine(task, pid, tgid, cpu, flags, timestamp, function)
def parse_dextr_line(line):
task = line["name"]
pid = line["pid"]
tgid = line["tid"]
cpu = None
flags = None
timestamp = line["ts"]
function = AppTraceFunction("DextrTrace", [line["ph"], line["pid"], line["name"]])
return TraceLine(task, pid, tgid, cpu, flags, timestamp, function)
def _parse_function(function):
line_match = APP_TRACE_LINE_PATTERN.match(function)
if not line_match:
return None
type = line_match.group("type")
if not type in SYSTRACE_LINE_TYPES:
return None
args = line_match.group("args").split('|')
if len(args) == 1 and len(args[0]) == 0:
args = None
constructor = TRACE_TYPE_MAP.get(args[0], AppTraceFunction)
return constructor(type, args)
def _parse_section_name(section_name):
if section_name is None:
return section_name, None, None
section_name_match = DECORATED_SECTION_NAME_PATTERN.match(section_name)
section_name = section_name_match.group("section_name")
command = section_name_match.group("command")
argument = section_name_match.group("argument")
return section_name, command, argument
def _format_section_name(section_name, command, argument):
if not command:
return section_name
return "{section_name}<{command}{argument}>".format(**vars())
class RoundTripFormattingTests(unittest.TestCase):
def testPlainSectionName(self):
section_name = "SectionName12345-5562342fas"
self.assertEqual(section_name, _format_section_name(*_parse_section_name(section_name)))
def testDecoratedSectionName(self):
section_name = "SectionName12345-5562342fas<D-123456>"
self.assertEqual(section_name, _format_section_name(*_parse_section_name(section_name)))
def testSimpleFunction(self):
function = "0: E"
self.assertEqual(function, str(_parse_function(function)))
def testFunctionWithoutCookie(self):
function = "0: B|27295|providerRemove"
self.assertEqual(function, str(_parse_function(function)))
def testFunctionWithCookie(self):
function = "0: S|27311|NNFColdStart|1112249168"
self.assertEqual(function, str(_parse_function(function)))
def testFunctionWithCookieAndArgs(self):
function = "0: T|27311|NNFColdStart|1122|Start"
self.assertEqual(function, str(_parse_function(function)))
def testFunctionWithArgsButNoPid(self):
function = "0: E|||foo=bar"
self.assertEqual(function, str(_parse_function(function)))
def testKitKatFunction(self):
function = "tracing_mark_write: B|14127|Looper.dispatchMessage|arg=>>>>> Dispatching to Handler (android.os.Handler) {422ae980} null: 0|Java"
self.assertEqual(function, str(_parse_function(function)))
def testNonSysTraceFunctionIgnored(self):
function = "sched_switch: prev_comm=swapper/1 prev_pid=0 prev_prio=120 prev_state=R ==> next_comm=mmcqd/0 next_pid=120 next_prio=120"
self.assertEqual(None, _parse_function(function))
def testLineWithFlagsAndTGID(self):
line = " <idle>-0 ( 550) [000] d..2 7953.258473: cpu_idle: state=1 cpu_id=0\n"
self.assertEqual(line, str(parse_line(line)))
def testLineWithFlagsAndNoTGID(self):
line = " <idle>-0 (-----) [000] d..2 7953.258473: cpu_idle: state=1 cpu_id=0\n"
self.assertEqual(line, str(parse_line(line)))
def testLineWithFlags(self):
line = " <idle>-0 [001] ...2 3269.291072: sched_switch: prev_comm=swapper/1 prev_pid=0 prev_prio=120 prev_state=R ==> next_comm=mmcqd/0 next_pid=120 next_prio=120\n"
self.assertEqual(line, str(parse_line(line)))
def testLineWithoutFlags(self):
line = " <idle>-0 [001] 3269.291072: sched_switch: prev_comm=swapper/1 prev_pid=0 prev_prio=120 prev_state=R ==> next_comm=mmcqd/0 next_pid=120 next_prio=120\n"
self.assertEqual(line, str(parse_line(line)))
| bsd-3-clause |
sanghinitin/golismero | thirdparty_libs/nltk/misc/babelfish.py | 12 | 6597 | # coding: utf8
# babelizer.py - API for simple access to babelfish.altavista.com.
# Requires python 2.0 or better.
# From: http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/64937
# Author: Jonathan Feinberg <jdf@pobox.com>
# Modified by Steven Bird to work with current babelfish
#
# See it in use at http://babel.MrFeinberg.com/
r"""API for simple access to babelfish.altavista.com.
Summary:
>>> from nltk.misc import babelfish as babelizer
>>> babelizer.available_languages
['Chinese', 'English', 'French', 'German', 'Greek', 'Italian', 'Japanese', 'Korean', 'Portuguese', 'Russian', 'Spanish']
>>> babelizer.translate('How much is that doggie in the window?',
... 'english', 'french')
'Combien co\xfbte ce chienchien dans la fen\xeatre ?'
"""
import re
import string
import urllib
import sys
"""
Various patterns I have encountered in looking for the babelfish result.
We try each of them in turn, based on the relative number of times I've
seen each of these patterns. $1.00 to anyone who can provide a heuristic
for knowing which one to use. This includes AltaVista employees.
"""
__where = [ re.compile(r'<div id="result"><div style="padding:0.6em;">([^<]*)'),
re.compile(r'name=\"q\">([^<]*)'),
re.compile(r'td bgcolor=white>([^<]*)'),
re.compile(r'<\/strong><br>([^<]*)'),
re.compile(r'padding:10px[^>]+>([^<]*)')
]
__languages = { 'english' : 'en',
'french' : 'fr',
'spanish' : 'es',
'german' : 'de',
'greek' : 'el',
'italian' : 'it',
'portuguese': 'pt',
'chinese' : 'zh',
'japanese' : 'ja',
'korean' : 'ko',
'russian' : 'ru'
}
"""
All of the available language names.
"""
available_languages = sorted([x.title() for x in __languages])
class BabelizerError(Exception):
"""
Calling translate() or babelize() can raise a BabelizerError
"""
class BabelfishChangedError(BabelizerError):
"""
Thrown when babelfish.yahoo.com changes some detail of their HTML layout,
and babelizer no longer submits data in the correct form, or can no
longer parse the results.
"""
class BabelizerIOError(BabelizerError):
"""
Thrown for various networking and IO errors.
"""
def clean(text):
return re.sub(r'\s+', ' ', text.strip())
def translate(phrase, source, target):
"""
Use babelfish to translate phrase from source language to target language.
It's only guaranteed to work if 'english' is one of the two languages.
:raise BabelizeError: If an error is encountered.
"""
phrase = clean(phrase)
try:
source_code = __languages[source]
target_code = __languages[target]
except KeyError, lang:
raise ValueError, "Language %s not available" % lang
params = urllib.urlencode({'doit': 'done',
'tt': 'urltext',
'urltext': phrase,
'lp': source_code + '_' + target_code})
try:
response = urllib.urlopen('http://babelfish.yahoo.com/translate_txt', params)
except IOError, what:
raise BabelizerIOError("Couldn't talk to server: %s" % what)
html = response.read()
for regex in __where:
match = regex.search(html)
if match: break
if not match: raise BabelfishChangedError("Can't recognize translated string.")
return clean(match.group(1))
def babelize(phrase, source, target, limit = 12):
"""
Use babelfish to translate back and forth between source and
target until either no more changes occur in translation or
limit iterations have been reached, whichever comes first.
It's only guaranteed to work if 'english' is one of the two
languages.
:raise BabelizeError: If an error is encountered.
"""
phrase = clean(phrase)
seen = set([phrase])
yield phrase
flip = {source: target, target: source}
next = source
for i in range(limit):
phrase = translate(phrase, next, flip[next])
if phrase in seen:
break
seen.add(phrase)
yield phrase
next = flip[next]
HELP = """NLTK Babelizer Commands:
All single-word inputs are commands:
help: this help message
languages: print the list of languages
language: the name of a language to use"""
def babelize_shell():
"""
An interactive shell that uses babelfish to
translate back and forth between source and
target until either no more changes occur in translation or
limit iterations have been reached, whichever comes first.
It's only guaranteed to work if 'english' is one of the two
languages.
:raise BabelizeError: If an error is encountered.
"""
print "NLTK Babelizer: type 'help' for a list of commands."
language = ''
phrase = ''
try:
while True:
command = raw_input('Babel> ')
command = clean(command)
if ' ' not in command:
command = command.lower()
if command == 'help':
print HELP
elif command == 'languages':
print ' '.join(sorted(__languages))
elif command in __languages:
language = command
elif command in ['quit', 'bye', 'end']:
break
elif command == 'run':
if not language:
print "Please specify a language first (type 'languages' for a list)."
elif not phrase:
print "Please enter a phrase first (just type it in at the prompt)."
else:
for count, new_phrase in enumerate(babelize(phrase, 'english', language)):
print "%s>" % count, new_phrase
sys.stdout.flush()
else:
print "Command not recognized (type 'help' for help)."
# if the command contains a space, it must have multiple words, and be a new phrase
else:
phrase = command
except EOFError:
print
pass
# I won't take that from you, or from your doggie (Korean)
# the pig I found looked happy (chinese)
# absence makes the heart grow fonder (italian)
# more idioms: http://www.idiomsite.com/
if __name__ == '__main__':
babelize_shell()
| gpl-2.0 |
sebrandon1/neutron | neutron/tests/tempest/api/test_subnets.py | 3 | 2636 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tempest import test
from neutron.tests.tempest.api import base
class SubnetsSearchCriteriaTest(base.BaseSearchCriteriaTest):
resource = 'subnet'
list_kwargs = {'shared': False}
@classmethod
def resource_setup(cls):
super(SubnetsSearchCriteriaTest, cls).resource_setup()
net = cls.create_network(network_name='subnet-search-test-net')
for name in cls.resource_names:
cls.create_subnet(net, name=name)
@test.idempotent_id('d2d61995-5dd5-4b93-bce7-3edefdb79563')
def test_list_sorts_asc(self):
self._test_list_sorts_asc()
@test.idempotent_id('c3c6b0af-c4ac-4da0-b568-8d08ae550604')
def test_list_sorts_desc(self):
self._test_list_sorts_desc()
@test.idempotent_id('b93063b3-f713-406e-bf93-e5738e09153c')
def test_list_pagination(self):
self._test_list_pagination()
@test.idempotent_id('2ddd9aa6-de28-410f-9cbc-ce752893c407')
def test_list_pagination_with_marker(self):
self._test_list_pagination_with_marker()
@test.idempotent_id('351183ef-6ed9-4d71-a9f2-a5ac049bd7ea')
def test_list_pagination_with_href_links(self):
self._test_list_pagination_with_href_links()
@test.idempotent_id('dfaa20ca-6d84-4f26-962f-2fee4d247cd9')
def test_list_pagination_page_reverse_asc(self):
self._test_list_pagination_page_reverse_asc()
@test.idempotent_id('40552213-3e12-4d6a-86f3-dda92f3de88c')
def test_list_pagination_page_reverse_desc(self):
self._test_list_pagination_page_reverse_desc()
@test.idempotent_id('3cea9053-a731-4480-93ee-19b2c28a9ce4')
def test_list_pagination_page_reverse_with_href_links(self):
self._test_list_pagination_page_reverse_with_href_links()
@test.idempotent_id('d851937c-9821-4b46-9d18-43e9077ecac0')
def test_list_no_pagination_limit_0(self):
self._test_list_no_pagination_limit_0()
@test.idempotent_id('c0f9280b-9d81-4728-a967-6be22659d4c8')
def test_list_validation_filters(self):
self._test_list_validation_filters()
| apache-2.0 |
Captnoord/openpli-enigma2 | lib/python/Components/Keyboard.py | 30 | 1278 | from Components.Console import Console
from os import listdir as os_listdir, path as os_path
from re import compile as re_compile
from enigma import eEnv
class Keyboard:
def __init__(self):
self.keyboardmaps = []
self.readKeyboardMapFiles()
def readKeyboardMapFiles(self):
for keymapfile in os_listdir(eEnv.resolve('${datadir}/keymaps/')):
if (keymapfile.endswith(".info")):
mapfile = None
mapname = None
for line in open(eEnv.resolve('${datadir}/keymaps/') + keymapfile):
m = re_compile('^\s*(\w+)\s*=\s*(.*)\s*$').match(line)
if m:
key, val = m.groups()
if key == 'kmap':
mapfile = val
if key == 'name':
mapname = val
if (mapfile is not None) and (mapname is not None):
self.keyboardmaps.append(( mapfile,mapname))
def activateKeyboardMap(self, index):
try:
keymap = self.keyboardmaps[index]
print "Activating keymap:",keymap[1]
keymappath = eEnv.resolve('${datadir}/keymaps/') + keymap[0]
if os_path.exists(keymappath):
Console().ePopen(("loadkmap < " + str(keymappath)))
except:
print "Selected keymap does not exist!"
def getKeyboardMaplist(self):
return self.keyboardmaps
def getDefaultKeyboardMap(self):
return 'default.kmap'
keyboard = Keyboard()
| gpl-2.0 |
nan86150/ImageFusion | ENV2.7/lib/python2.7/site-packages/setuptools/command/install_lib.py | 396 | 3771 | import os
import imp
from itertools import product, starmap
import distutils.command.install_lib as orig
class install_lib(orig.install_lib):
"""Don't add compiled flags to filenames of non-Python files"""
def run(self):
self.build()
outfiles = self.install()
if outfiles is not None:
# always compile, in case we have any extension stubs to deal with
self.byte_compile(outfiles)
def get_exclusions(self):
"""
Return a collections.Sized collections.Container of paths to be
excluded for single_version_externally_managed installations.
"""
all_packages = (
pkg
for ns_pkg in self._get_SVEM_NSPs()
for pkg in self._all_packages(ns_pkg)
)
excl_specs = product(all_packages, self._gen_exclusion_paths())
return set(starmap(self._exclude_pkg_path, excl_specs))
def _exclude_pkg_path(self, pkg, exclusion_path):
"""
Given a package name and exclusion path within that package,
compute the full exclusion path.
"""
parts = pkg.split('.') + [exclusion_path]
return os.path.join(self.install_dir, *parts)
@staticmethod
def _all_packages(pkg_name):
"""
>>> list(install_lib._all_packages('foo.bar.baz'))
['foo.bar.baz', 'foo.bar', 'foo']
"""
while pkg_name:
yield pkg_name
pkg_name, sep, child = pkg_name.rpartition('.')
def _get_SVEM_NSPs(self):
"""
Get namespace packages (list) but only for
single_version_externally_managed installations and empty otherwise.
"""
# TODO: is it necessary to short-circuit here? i.e. what's the cost
# if get_finalized_command is called even when namespace_packages is
# False?
if not self.distribution.namespace_packages:
return []
install_cmd = self.get_finalized_command('install')
svem = install_cmd.single_version_externally_managed
return self.distribution.namespace_packages if svem else []
@staticmethod
def _gen_exclusion_paths():
"""
Generate file paths to be excluded for namespace packages (bytecode
cache files).
"""
# always exclude the package module itself
yield '__init__.py'
yield '__init__.pyc'
yield '__init__.pyo'
if not hasattr(imp, 'get_tag'):
return
base = os.path.join('__pycache__', '__init__.' + imp.get_tag())
yield base + '.pyc'
yield base + '.pyo'
def copy_tree(
self, infile, outfile,
preserve_mode=1, preserve_times=1, preserve_symlinks=0, level=1
):
assert preserve_mode and preserve_times and not preserve_symlinks
exclude = self.get_exclusions()
if not exclude:
return orig.install_lib.copy_tree(self, infile, outfile)
# Exclude namespace package __init__.py* files from the output
from setuptools.archive_util import unpack_directory
from distutils import log
outfiles = []
def pf(src, dst):
if dst in exclude:
log.warn("Skipping installation of %s (namespace package)",
dst)
return False
log.info("copying %s -> %s", src, os.path.dirname(dst))
outfiles.append(dst)
return dst
unpack_directory(infile, outfile, pf)
return outfiles
def get_outputs(self):
outputs = orig.install_lib.get_outputs(self)
exclude = self.get_exclusions()
if exclude:
return [f for f in outputs if f not in exclude]
return outputs
| mit |
hfp/tensorflow-xsmm | tensorflow/examples/adding_an_op/zero_out_3_test.py | 22 | 2003 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Test for version 3 of the zero_out op."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from tensorflow.examples.adding_an_op import zero_out_op_3
from tensorflow.python.framework import test_util
class ZeroOut3Test(tf.test.TestCase):
@test_util.run_deprecated_v1
def test(self):
with self.cached_session():
result = zero_out_op_3.zero_out([5, 4, 3, 2, 1])
self.assertAllEqual(result.eval(), [5, 0, 0, 0, 0])
@test_util.run_deprecated_v1
def testAttr(self):
with self.cached_session():
result = zero_out_op_3.zero_out([5, 4, 3, 2, 1], preserve_index=3)
self.assertAllEqual(result.eval(), [0, 0, 0, 2, 0])
@test_util.run_deprecated_v1
def testNegative(self):
with self.cached_session():
result = zero_out_op_3.zero_out([5, 4, 3, 2, 1], preserve_index=-1)
with self.assertRaisesOpError("Need preserve_index >= 0, got -1"):
self.evaluate(result)
@test_util.run_deprecated_v1
def testLarge(self):
with self.cached_session():
result = zero_out_op_3.zero_out([5, 4, 3, 2, 1], preserve_index=17)
with self.assertRaisesOpError("preserve_index out of range"):
self.evaluate(result)
if __name__ == "__main__":
tf.test.main()
| apache-2.0 |
xuxiao/zulip | zerver/lib/utils.py | 115 | 2978 | # -*- coding: utf-8 -*-
from __future__ import absolute_import
import base64
import hashlib
import os
from time import sleep
from django.conf import settings
def statsd_key(val, clean_periods=False):
if not isinstance(val, str):
val = str(val)
if ':' in val:
val = val.split(':')[0]
val = val.replace('-', "_")
if clean_periods:
val = val.replace('.', '_')
return val
class StatsDWrapper(object):
"""Transparently either submit metrics to statsd
or do nothing without erroring out"""
# Backported support for gauge deltas
# as our statsd server supports them but supporting
# pystatsd is not released yet
def _our_gauge(self, stat, value, rate=1, delta=False):
"""Set a gauge value."""
from django_statsd.clients import statsd
if delta:
value = '%+g|g' % (value,)
else:
value = '%g|g' % (value,)
statsd._send(stat, value, rate)
def __getattr__(self, name):
# Hand off to statsd if we have it enabled
# otherwise do nothing
if name in ['timer', 'timing', 'incr', 'decr', 'gauge']:
if settings.STATSD_HOST != '':
from django_statsd.clients import statsd
if name == 'gauge':
return self._our_gauge
else:
return getattr(statsd, name)
else:
return lambda *args, **kwargs: None
raise AttributeError
statsd = StatsDWrapper()
# Runs the callback with slices of all_list of a given batch_size
def run_in_batches(all_list, batch_size, callback, sleep_time = 0, logger = None):
if len(all_list) == 0:
return
limit = (len(all_list) / batch_size) + 1;
for i in xrange(limit):
start = i*batch_size
end = (i+1) * batch_size
if end >= len(all_list):
end = len(all_list)
batch = all_list[start:end]
if logger:
logger("Executing %s in batch %s of %s" % (end-start, i+1, limit))
callback(batch)
if i != limit - 1:
sleep(sleep_time)
def make_safe_digest(string, hash_func=hashlib.sha1):
"""
return a hex digest of `string`.
"""
# hashlib.sha1, md5, etc. expect bytes, so non-ASCII strings must
# be encoded.
return hash_func(string.encode('utf-8')).hexdigest()
def log_statsd_event(name):
"""
Sends a single event to statsd with the desired name and the current timestamp
This can be used to provide vertical lines in generated graphs,
for example when doing a prod deploy, bankruptcy request, or
other one-off events
Note that to draw this event as a vertical line in graphite
you can use the drawAsInfinite() command
"""
event_name = "events.%s" % (name,)
statsd.incr(event_name)
def generate_random_token(length):
return base64.b16encode(os.urandom(length / 2)).lower()
| apache-2.0 |
bavardage/statsmodels | statsmodels/sandbox/examples/ex_gam_results.py | 37 | 1660 | # -*- coding: utf-8 -*-
"""Example results for GAM from tests
Created on Mon Nov 07 13:13:15 2011
Author: Josef Perktold
The example is loaded from a test module. The test still fails but the
results look relatively good.
I don't know yet why there is the small difference and why GAM doesn't
converge in this case
"""
from statsmodels.sandbox.tests.test_gam import _estGAMGaussianLogLink
tt = _estGAMGaussianLogLink()
comp, const = tt.res_gam.smoothed_demeaned(tt.mod_gam.exog)
comp_glm_ = tt.res2.model.exog * tt.res2.params
comp1 = comp_glm_[:,1:4].sum(1)
mean1 = comp1.mean()
comp1 -= mean1
comp2 = comp_glm_[:,4:].sum(1)
mean2 = comp2.mean()
comp2 -= mean2
comp1_true = tt.res2.model.exog[:,1:4].sum(1)
mean1 = comp1_true.mean()
comp1_true -= mean1
comp2_true = tt.res2.model.exog[:,4:].sum(1)
mean2 = comp2_true.mean()
comp2_true -= mean2
noise = tt.res2.model.endog - tt.mu_true
noise_eta = tt.family.link(tt.res2.model.endog) - tt.y_true
import matplotlib.pyplot as plt
plt.figure()
plt.plot(noise, 'k.')
plt.figure()
plt.plot(comp, 'r-')
plt.plot(comp1, 'b-')
plt.plot(comp2, 'b-')
plt.plot(comp1_true, 'k--', lw=2)
plt.plot(comp2_true, 'k--', lw=2)
#the next doesn't make sense - non-linear
#c1 = tt.family.link(tt.family.link.inverse(comp1_true) + noise)
#c2 = tt.family.link(tt.family.link.inverse(comp2_true) + noise)
#not nice in example/plot: noise variance is constant not proportional
plt.plot(comp1_true + noise_eta, 'g.', alpha=0.95)
plt.plot(comp2_true + noise_eta, 'r.', alpha=0.95)
#plt.plot(c1, 'g.', alpha=0.95)
#plt.plot(c2, 'r.', alpha=0.95)
plt.title('Gaussian loglink, GAM (red), GLM (blue), true (black)')
plt.show()
| bsd-3-clause |
burakgon/E7_Elite_kernel | bionic/libc/kernel/tools/find_headers.py | 10 | 5175 | #!/usr/bin/env python
#
# this program is used to find source code that includes linux kernel headers directly
# (e.g. with #include <linux/...> or #include <asm/...>)
#
# then it lists them on the standard output.
import sys, cpp, glob, os, re, getopt, kernel
from utils import *
from defaults import *
program_dir = find_program_dir()
wanted_archs = kernel_archs
wanted_config = None
def usage():
print """\
usage: find_headers.py [options] <kernel-root> (file|directory|@listfile)+
options:
-c <file> specify .config file (none by default)
-a <archs> used to specify an alternative list
of architectures to support
('%s' by default)
-v enable verbose mode
this program is used to find all the kernel headers that are used
by a set of source files or directories containing them. the search
is recursive to find *all* required files.
""" % ( string.join(kernel_archs,",") )
sys.exit(1)
try:
optlist, args = getopt.getopt( sys.argv[1:], 'vc:d:a:k:' )
except:
# unrecognized option
print "error: unrecognized option"
usage()
for opt, arg in optlist:
if opt == '-a':
wanted_archs = string.split(arg,',')
elif opt == '-c':
wanted_config = arg
elif opt == '-v':
kernel.verboseSearch = 1
kernel.verboseFind = 1
verbose = 1
else:
usage()
if len(args) < 2:
usage()
kernel_root = args[0]
if not os.path.exists(kernel_root):
sys.stderr.write( "error: directory '%s' does not exist\n" % kernel_root )
sys.exit(1)
if not os.path.isdir(kernel_root):
sys.stderr.write( "error: '%s' is not a directory\n" % kernel_root )
sys.exit(1)
if not os.path.isdir(kernel_root+"/include/linux"):
sys.stderr.write( "error: '%s' does not have an 'include/linux' directory\n" % kernel_root )
sys.exit(1)
if wanted_config:
if not os.path.exists(wanted_config):
sys.stderr.write( "error: file '%s' does not exist\n" % wanted_config )
sys.exit(1)
if not os.path.isfile(wanted_config):
sys.stderr.write( "error: '%s' is not a file\n" % wanted_config )
sys.exit(1)
# find all architectures in the kernel tree
archs = []
for archdir in os.listdir(kernel_root+"/arch"):
if os.path.exists("%s/arch/%s/include/asm" % (kernel_root, archdir)):
if verbose:
print "Found arch '%s'" % archdir
archs.append(archdir)
# if we're using the 'kernel_headers' directory, there is only asm/
# and no other asm-<arch> directories
#
in_kernel_headers = False
if len(archs) == 0:
# this can happen when we're using the 'kernel_headers' directory
if os.path.isdir(kernel_root+"/asm"):
in_kernel_headers = True
archs = [ "arm", "mips"]
# if the user has specified some architectures with -a <archs> ensure that
# all those he wants are available from the kernel include tree
if wanted_archs != None:
if in_kernel_headers and wanted_archs != [ "arm", "mips" ]:
sys.stderr.write( "error: when parsing kernel_headers, only 'arm' and 'mips' architectures are supported at the moment\n" )
sys.exit(1)
missing = []
for arch in wanted_archs:
if arch not in archs:
missing.append(arch)
if len(missing) > 0:
sys.stderr.write( "error: the following requested architectures are not in the kernel tree: " )
for a in missing:
sys.stderr.write( " %s" % a )
sys.stderr.write( "\n" )
sys.exit(1)
archs = wanted_archs
# helper function used to walk the user files
def parse_file(path, parser):
#print "parse %s" % path
parser.parseFile(path)
# remove previous destination directory
#destdir = "/tmp/bionic-kernel-headers/"
#cleanup_dir(destdir)
# try to read the config file
try:
cparser = kernel.ConfigParser()
if wanted_config:
cparser.parseFile( wanted_config )
except:
sys.stderr.write( "error: can't parse '%s'" % wanted_config )
sys.exit(1)
kernel_config = cparser.getDefinitions()
# first, obtain the list of kernel files used by our clients
fparser = kernel.HeaderScanner()
dir_excludes=[".repo","external/kernel-headers","ndk","out","prebuilt","bionic/libc/kernel","development/ndk","external/qemu/distrib"]
walk_source_files( args[1:], parse_file, fparser, excludes=["./"+f for f in dir_excludes] )
headers = fparser.getHeaders()
files = fparser.getFiles()
# now recursively scan the kernel headers for additionnal sub-included headers
hparser = kernel.KernelHeaderFinder(headers,archs,kernel_root,kernel_config)
headers = hparser.scanForAllArchs()
if 0: # just for debugging
dumpHeaderUsers = False
print "the following %d headers:" % len(headers)
for h in sorted(headers):
if dumpHeaderUsers:
print " %s (%s)" % (h, repr(hparser.getHeaderUsers(h)))
else:
print " %s" % h
print "are used by the following %d files:" % len(files)
for f in sorted(files):
print " %s" % f
sys.exit(0)
for h in sorted(headers):
print "%s" % h
sys.exit(0)
| gpl-2.0 |
antoinearnoud/openfisca-france-indirect-taxation | openfisca_france_indirect_taxation/__init__.py | 4 | 2591 | # -*- coding: utf-8 -*-
# OpenFisca -- A versatile microsimulation software
# By: OpenFisca Team <contact@openfisca.fr>
#
# Copyright (C) 2011, 2012, 2013, 2014 OpenFisca Team
# https://github.com/openfisca
#
# This file is part of OpenFisca.
#
# OpenFisca is free software; you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# OpenFisca is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import os
from openfisca_core.taxbenefitsystems import XmlBasedTaxBenefitSystem
from .entities import entity_class_by_symbol
from .scenarios import Scenario
from . import param
from .param import preprocessing
# TaxBenefitSystems
def init_country():
class TaxBenefitSystem(XmlBasedTaxBenefitSystem):
entity_class_by_key_plural = {
entity_class.key_plural: entity_class
for entity_class in entity_class_by_symbol.itervalues()
}
legislation_xml_file_path = os.path.join(
os.path.dirname(os.path.abspath(param.__file__)),
'parameters.xml'
)
preprocess_legislation = staticmethod(preprocessing.preprocess_legislation)
def prefill_cache(self):
# Define categorie_fiscale_* and poste_coicp_* variables
from .model.consommation import categories_fiscales
categories_fiscales.preload_categories_fiscales_data_frame()
from .model.consommation import postes_coicop
postes_coicop.preload_postes_coicop_data_frame()
# Reindex columns since preload functions generate new columns.
self.index_columns()
# Define class attributes after class declaration to avoid "name is not defined" exceptions.
TaxBenefitSystem.Scenario = Scenario
from .model import model # noqa analysis:ignore
return TaxBenefitSystem
def init_tax_benefit_system():
"""
Helper function which suits most of the time.
Use `init_country` if you need to get the `TaxBenefitSystem` class.
"""
TaxBenefitSystem = init_country()
tax_benefit_system = TaxBenefitSystem()
return tax_benefit_system
| agpl-3.0 |
yoer/hue | desktop/core/ext-py/thrift-0.9.1/setup.py | 42 | 3274 | #!/usr/bin/env python
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
import sys
try:
from setuptools import setup, Extension
except:
from distutils.core import setup, Extension, Command
from distutils.command.build_ext import build_ext
from distutils.errors import CCompilerError, DistutilsExecError, DistutilsPlatformError
include_dirs = []
if sys.platform == 'win32':
include_dirs.append('compat/win32')
ext_errors = (CCompilerError, DistutilsExecError, DistutilsPlatformError, IOError)
else:
ext_errors = (CCompilerError, DistutilsExecError, DistutilsPlatformError)
class BuildFailed(Exception):
pass
class ve_build_ext(build_ext):
def run(self):
try:
build_ext.run(self)
except DistutilsPlatformError, x:
raise BuildFailed()
def build_extension(self, ext):
try:
build_ext.build_extension(self, ext)
except ext_errors, x:
raise BuildFailed()
def run_setup(with_binary):
if with_binary:
extensions = dict(
ext_modules = [
Extension('thrift.protocol.fastbinary',
sources = ['src/protocol/fastbinary.c'],
include_dirs = include_dirs,
)
],
cmdclass=dict(build_ext=ve_build_ext)
)
else:
extensions = dict()
setup(name = 'thrift',
version = '0.9.1',
description = 'Python bindings for the Apache Thrift RPC system',
author = ['Thrift Developers'],
author_email = ['dev@thrift.apache.org'],
url = 'http://thrift.apache.org',
license = 'Apache License 2.0',
packages = [
'thrift',
'thrift.protocol',
'thrift.transport',
'thrift.server',
],
package_dir = {'thrift' : 'src'},
classifiers = [
'Development Status :: 5 - Production/Stable',
'Environment :: Console',
'Intended Audience :: Developers',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Topic :: Software Development :: Libraries',
'Topic :: System :: Networking'
],
**extensions
)
try:
run_setup(True)
except BuildFailed:
print
print '*' * 80
print "An error occured while trying to compile with the C extension enabled"
print "Attempting to build without the extension now"
print '*' * 80
print
run_setup(False)
| apache-2.0 |
mapr/hue | desktop/core/ext-py/Django-1.6.10/tests/comment_tests/tests/test_app_api.py | 58 | 2664 | from __future__ import absolute_import
from django.conf import settings
from django.contrib import comments
from django.contrib.comments.models import Comment
from django.contrib.comments.forms import CommentForm
from django.core.exceptions import ImproperlyConfigured
from django.test.utils import override_settings
from django.utils import six
from . import CommentTestCase
class CommentAppAPITests(CommentTestCase):
"""Tests for the "comment app" API"""
def testGetCommentApp(self):
self.assertEqual(comments.get_comment_app(), comments)
@override_settings(
COMMENTS_APP='missing_app',
INSTALLED_APPS=list(settings.INSTALLED_APPS) + ['missing_app'],
)
def testGetMissingCommentApp(self):
with six.assertRaisesRegex(self, ImproperlyConfigured, 'missing_app'):
_ = comments.get_comment_app()
def testGetForm(self):
self.assertEqual(comments.get_form(), CommentForm)
def testGetFormTarget(self):
self.assertEqual(comments.get_form_target(), "/post/")
def testGetFlagURL(self):
c = Comment(id=12345)
self.assertEqual(comments.get_flag_url(c), "/flag/12345/")
def getGetDeleteURL(self):
c = Comment(id=12345)
self.assertEqual(comments.get_delete_url(c), "/delete/12345/")
def getGetApproveURL(self):
c = Comment(id=12345)
self.assertEqual(comments.get_approve_url(c), "/approve/12345/")
@override_settings(
COMMENTS_APP='comment_tests.custom_comments',
INSTALLED_APPS=list(settings.INSTALLED_APPS) + [
'comment_tests.custom_comments'],
)
class CustomCommentTest(CommentTestCase):
urls = 'comment_tests.urls'
def testGetCommentApp(self):
from comment_tests import custom_comments
self.assertEqual(comments.get_comment_app(), custom_comments)
def testGetModel(self):
from comment_tests.custom_comments.models import CustomComment
self.assertEqual(comments.get_model(), CustomComment)
def testGetForm(self):
from comment_tests.custom_comments.forms import CustomCommentForm
self.assertEqual(comments.get_form(), CustomCommentForm)
def testGetFormTarget(self):
self.assertEqual(comments.get_form_target(), "/post/")
def testGetFlagURL(self):
c = Comment(id=12345)
self.assertEqual(comments.get_flag_url(c), "/flag/12345/")
def getGetDeleteURL(self):
c = Comment(id=12345)
self.assertEqual(comments.get_delete_url(c), "/delete/12345/")
def getGetApproveURL(self):
c = Comment(id=12345)
self.assertEqual(comments.get_approve_url(c), "/approve/12345/")
| apache-2.0 |
mlc0202/vitess | test/zkocc_test.py | 3 | 5876 | #!/usr/bin/env python
import datetime
import json
import logging
import os
import re
import tempfile
import time
import unittest
import environment
import tablet
import utils
from zk import zkocc
# We check in this test that we can achieve at least this QPS.
# Sometimes on slow machines this won't work.
# We used to observe 30k+ QPS on workstations. This has gone down to 13k.
# So now the value we check is 5k, and we have an action item to look into it.
MIN_QPS = 5000
def setUpModule():
try:
environment.topo_server().setup()
except:
tearDownModule()
raise
def tearDownModule():
if utils.options.skip_teardown:
return
environment.topo_server().teardown()
utils.kill_sub_processes()
utils.remove_tmp_files()
class TopoOccTest(unittest.TestCase):
def setUp(self):
environment.topo_server().wipe()
utils.VtGate().start()
def tearDown(self):
utils.vtgate.kill()
def rebuild(self):
utils.run_vtctl(['RebuildKeyspaceGraph', '-rebuild_srv_shards', 'test_keyspace'], auto_log=True)
def test_get_srv_keyspace_names(self):
utils.run_vtctl('CreateKeyspace test_keyspace1')
utils.run_vtctl('CreateKeyspace test_keyspace2')
t1 = tablet.Tablet(tablet_uid=1, cell="nj")
t1.init_tablet("master", "test_keyspace1", "0")
t1.update_addrs()
t2 = tablet.Tablet(tablet_uid=2, cell="nj")
t2.init_tablet("master", "test_keyspace2", "0")
t2.update_addrs()
utils.run_vtctl(['RebuildKeyspaceGraph', 'test_keyspace*'], auto_log=True)
# vtgate API test
out, err = utils.run(environment.binary_argstr('zkclient2')+' -server localhost:%d -mode getSrvKeyspaceNames test_nj' % utils.vtgate.port, trap_output=True)
self.assertEqual(err, "KeyspaceNames[0] = test_keyspace1\n" +
"KeyspaceNames[1] = test_keyspace2\n")
def test_get_srv_keyspace(self):
utils.run_vtctl('CreateKeyspace test_keyspace')
t = tablet.Tablet(tablet_uid=1, cell="nj")
t.init_tablet("master", "test_keyspace", "0")
utils.run_vtctl('UpdateTabletAddrs -hostname localhost -ip-addr 127.0.0.1 -mysql-port %s %s' % (t.mysql_port, t.tablet_alias))
self.rebuild()
# vtgate zk API test
out, err = utils.run(environment.binary_argstr('zkclient2')+' -server localhost:%d -mode getSrvKeyspace test_nj test_keyspace' % utils.vtgate.port, trap_output=True)
self.assertEqual(err, "Partitions[master] =\n" +
" ShardReferences[0]={Start: , End: }\n" +
"Partitions[rdonly] =\n" +
" ShardReferences[0]={Start: , End: }\n" +
"Partitions[replica] =\n" +
" ShardReferences[0]={Start: , End: }\n",
"Got wrong content: %s" % err)
def test_get_end_points(self):
utils.run_vtctl('CreateKeyspace test_keyspace')
t = tablet.Tablet(tablet_uid=1, cell="nj")
t.init_tablet("master", "test_keyspace", "0")
t.update_addrs()
self.rebuild()
# vtgate zk API test
out, err = utils.run(environment.binary_argstr('zkclient2')+' -server localhost:%d -mode getEndPoints test_nj test_keyspace 0 master' % utils.vtgate.port, trap_output=True)
self.assertEqual(err, "Entries[0] = 1 localhost\n")
class TestTopo(unittest.TestCase):
longMessage = True
def setUp(self):
environment.topo_server().wipe()
# test_vtgate_qps can be run to profile vtgate:
# Just run:
# ./zkocc_test.py -v TestTopo.test_vtgate_qps --skip-teardown
# Then run:
# go tool pprof $VTROOT/bin/vtgate $VTDATAROOT/tmp/vtgate.pprof
# (or with zkclient2 for the client side)
# and for instance type 'web' in the prompt.
def test_vtgate_qps(self):
# create the topology
utils.run_vtctl('CreateKeyspace test_keyspace')
t = tablet.Tablet(tablet_uid=1, cell="nj")
t.init_tablet("master", "test_keyspace", "0")
t.update_addrs()
utils.run_vtctl('RebuildKeyspaceGraph test_keyspace', auto_log=True)
# start vtgate and the qps-er
utils.VtGate().start(
extra_args=['-cpu_profile', os.path.join(environment.tmproot,
'vtgate.pprof')])
qpser = utils.run_bg(environment.binary_args('zkclient2') + [
'-server', utils.vtgate.addr(),
'-mode', 'qps',
'-zkclient_cpu_profile', os.path.join(environment.tmproot, 'zkclient2.pprof'),
'test_nj', 'test_keyspace'])
qpser.wait()
# get the vtgate vars, make sure we have what we need
v = utils.vtgate.get_vars()
# some checks on performance / stats
rpcCalls = v['TopoReaderRpcQueryCount']['test_nj']
if rpcCalls < MIN_QPS * 10:
self.fail('QPS is too low: %d < %d' % (rpcCalls / 10, MIN_QPS))
else:
logging.debug("Recorded qps: %d", rpcCalls / 10)
utils.vtgate.kill()
def test_fake_zkocc_connection(self):
fkc = zkocc.FakeZkOccConnection.from_data_path("testing", environment.vttop + "/test/fake_zkocc_config.json")
fkc.replace_zk_data("3306", "3310")
fkc.replace_zk_data("127.0.0.1", "my.cool.hostname")
# new style API tests
keyspaces = fkc.get_srv_keyspace_names('testing')
self.assertEqual(keyspaces, ["test_keyspace"], "get_srv_keyspace_names doesn't work")
keyspace = fkc.get_srv_keyspace('testing', 'test_keyspace')
self.assertEqual({
'ShardReferences': [{
'KeyRange': {'End': '\xd0', 'Start': '\xc0'},
'Name': 'c0-d0'}]},
keyspace, "keyspace reading is wrong")
end_points = fkc.get_end_points("testing", "test_keyspace", "0", "master")
self.assertEqual({
'entries': [{'host': 'my.cool.hostname',
'named_port_map': {'mysql': 3310, 'vt': 6711},
'port': 0,
'uid': 0}]},
end_points, "end points are wrong")
if __name__ == '__main__':
utils.main()
| bsd-3-clause |
tuxfux-hlp-notes/python-batches | archieves/batch-61/modules/myenv/lib/python2.7/site-packages/pip/_vendor/requests/packages/charade/langthaimodel.py | 2930 | 11275 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Communicator client code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
# 255: Control characters that usually does not exist in any text
# 254: Carriage/Return
# 253: symbol (punctuation) that does not belong to word
# 252: 0 - 9
# The following result for thai was collected from a limited sample (1M).
# Character Mapping Table:
TIS620CharToOrderMap = (
255,255,255,255,255,255,255,255,255,255,254,255,255,254,255,255, # 00
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 10
253,253,253,253,253,253,253,253,253,253,253,253,253,253,253,253, # 20
252,252,252,252,252,252,252,252,252,252,253,253,253,253,253,253, # 30
253,182,106,107,100,183,184,185,101, 94,186,187,108,109,110,111, # 40
188,189,190, 89, 95,112,113,191,192,193,194,253,253,253,253,253, # 50
253, 64, 72, 73,114, 74,115,116,102, 81,201,117, 90,103, 78, 82, # 60
96,202, 91, 79, 84,104,105, 97, 98, 92,203,253,253,253,253,253, # 70
209,210,211,212,213, 88,214,215,216,217,218,219,220,118,221,222,
223,224, 99, 85, 83,225,226,227,228,229,230,231,232,233,234,235,
236, 5, 30,237, 24,238, 75, 8, 26, 52, 34, 51,119, 47, 58, 57,
49, 53, 55, 43, 20, 19, 44, 14, 48, 3, 17, 25, 39, 62, 31, 54,
45, 9, 16, 2, 61, 15,239, 12, 42, 46, 18, 21, 76, 4, 66, 63,
22, 10, 1, 36, 23, 13, 40, 27, 32, 35, 86,240,241,242,243,244,
11, 28, 41, 29, 33,245, 50, 37, 6, 7, 67, 77, 38, 93,246,247,
68, 56, 59, 65, 69, 60, 70, 80, 71, 87,248,249,250,251,252,253,
)
# Model Table:
# total sequences: 100%
# first 512 sequences: 92.6386%
# first 1024 sequences:7.3177%
# rest sequences: 1.0230%
# negative sequences: 0.0436%
ThaiLangModel = (
0,1,3,3,3,3,0,0,3,3,0,3,3,0,3,3,3,3,3,3,3,3,0,0,3,3,3,0,3,3,3,3,
0,3,3,0,0,0,1,3,0,3,3,2,3,3,0,1,2,3,3,3,3,0,2,0,2,0,0,3,2,1,2,2,
3,0,3,3,2,3,0,0,3,3,0,3,3,0,3,3,3,3,3,3,3,3,3,0,3,2,3,0,2,2,2,3,
0,2,3,0,0,0,0,1,0,1,2,3,1,1,3,2,2,0,1,1,0,0,1,0,0,0,0,0,0,0,1,1,
3,3,3,2,3,3,3,3,3,3,3,3,3,3,3,2,2,2,2,2,2,2,3,3,2,3,2,3,3,2,2,2,
3,1,2,3,0,3,3,2,2,1,2,3,3,1,2,0,1,3,0,1,0,0,1,0,0,0,0,0,0,0,1,1,
3,3,2,2,3,3,3,3,1,2,3,3,3,3,3,2,2,2,2,3,3,2,2,3,3,2,2,3,2,3,2,2,
3,3,1,2,3,1,2,2,3,3,1,0,2,1,0,0,3,1,2,1,0,0,1,0,0,0,0,0,0,1,0,1,
3,3,3,3,3,3,2,2,3,3,3,3,2,3,2,2,3,3,2,2,3,2,2,2,2,1,1,3,1,2,1,1,
3,2,1,0,2,1,0,1,0,1,1,0,1,1,0,0,1,0,1,0,0,0,1,0,0,0,0,0,0,0,0,0,
3,3,3,2,3,2,3,3,2,2,3,2,3,3,2,3,1,1,2,3,2,2,2,3,2,2,2,2,2,1,2,1,
2,2,1,1,3,3,2,1,0,1,2,2,0,1,3,0,0,0,1,1,0,0,0,0,0,2,3,0,0,2,1,1,
3,3,2,3,3,2,0,0,3,3,0,3,3,0,2,2,3,1,2,2,1,1,1,0,2,2,2,0,2,2,1,1,
0,2,1,0,2,0,0,2,0,1,0,0,1,0,0,0,1,1,1,1,0,0,0,0,0,0,0,0,0,0,1,0,
3,3,2,3,3,2,0,0,3,3,0,2,3,0,2,1,2,2,2,2,1,2,0,0,2,2,2,0,2,2,1,1,
0,2,1,0,2,0,0,2,0,1,1,0,1,0,0,0,0,0,0,1,0,0,1,0,0,0,0,0,0,0,0,0,
3,3,2,3,2,3,2,0,2,2,1,3,2,1,3,2,1,2,3,2,2,3,0,2,3,2,2,1,2,2,2,2,
1,2,2,0,0,0,0,2,0,1,2,0,1,1,1,0,1,0,3,1,1,0,0,0,0,0,0,0,0,0,1,0,
3,3,2,3,3,2,3,2,2,2,3,2,2,3,2,2,1,2,3,2,2,3,1,3,2,2,2,3,2,2,2,3,
3,2,1,3,0,1,1,1,0,2,1,1,1,1,1,0,1,0,1,1,0,0,0,0,0,0,0,0,0,2,0,0,
1,0,0,3,0,3,3,3,3,3,0,0,3,0,2,2,3,3,3,3,3,0,0,0,1,1,3,0,0,0,0,2,
0,0,1,0,0,0,0,0,0,0,2,3,0,0,0,3,0,2,0,0,0,0,0,3,0,0,0,0,0,0,0,0,
2,0,3,3,3,3,0,0,2,3,0,0,3,0,3,3,2,3,3,3,3,3,0,0,3,3,3,0,0,0,3,3,
0,0,3,0,0,0,0,2,0,0,2,1,1,3,0,0,1,0,0,2,3,0,1,0,0,0,0,0,0,0,1,0,
3,3,3,3,2,3,3,3,3,3,3,3,1,2,1,3,3,2,2,1,2,2,2,3,1,1,2,0,2,1,2,1,
2,2,1,0,0,0,1,1,0,1,0,1,1,0,0,0,0,0,1,1,0,0,1,0,0,0,0,0,0,0,0,0,
3,0,2,1,2,3,3,3,0,2,0,2,2,0,2,1,3,2,2,1,2,1,0,0,2,2,1,0,2,1,2,2,
0,1,1,0,0,0,0,1,0,1,1,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,3,3,2,1,3,3,1,1,3,0,2,3,1,1,3,2,1,1,2,0,2,2,3,2,1,1,1,1,1,2,
3,0,0,1,3,1,2,1,2,0,3,0,0,0,1,0,3,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,
3,3,1,1,3,2,3,3,3,1,3,2,1,3,2,1,3,2,2,2,2,1,3,3,1,2,1,3,1,2,3,0,
2,1,1,3,2,2,2,1,2,1,0,0,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,
3,3,2,3,2,3,3,2,3,2,3,2,3,3,2,1,0,3,2,2,2,1,2,2,2,1,2,2,1,2,1,1,
2,2,2,3,0,1,3,1,1,1,1,0,1,1,0,2,1,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,3,3,2,3,2,2,1,1,3,2,3,2,3,2,0,3,2,2,1,2,0,2,2,2,1,2,2,2,2,1,
3,2,1,2,2,1,0,2,0,1,0,0,1,1,0,0,0,0,0,1,1,0,1,0,0,0,0,0,0,0,0,1,
3,3,3,3,3,2,3,1,2,3,3,2,2,3,0,1,1,2,0,3,3,2,2,3,0,1,1,3,0,0,0,0,
3,1,0,3,3,0,2,0,2,1,0,0,3,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,3,2,3,2,3,3,0,1,3,1,1,2,1,2,1,1,3,1,1,0,2,3,1,1,1,1,1,1,1,1,
3,1,1,2,2,2,2,1,1,1,0,0,2,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,
3,2,2,1,1,2,1,3,3,2,3,2,2,3,2,2,3,1,2,2,1,2,0,3,2,1,2,2,2,2,2,1,
3,2,1,2,2,2,1,1,1,1,0,0,1,1,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,3,3,3,3,3,3,1,3,3,0,2,1,0,3,2,0,0,3,1,0,1,1,0,1,0,0,0,0,0,1,
1,0,0,1,0,3,2,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,0,2,2,2,3,0,0,1,3,0,3,2,0,3,2,2,3,3,3,3,3,1,0,2,2,2,0,2,2,1,2,
0,2,3,0,0,0,0,1,0,1,0,0,1,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,
3,0,2,3,1,3,3,2,3,3,0,3,3,0,3,2,2,3,2,3,3,3,0,0,2,2,3,0,1,1,1,3,
0,0,3,0,0,0,2,2,0,1,3,0,1,2,2,2,3,0,0,0,0,0,1,0,0,0,0,0,0,0,0,1,
3,2,3,3,2,0,3,3,2,2,3,1,3,2,1,3,2,0,1,2,2,0,2,3,2,1,0,3,0,0,0,0,
3,0,0,2,3,1,3,0,0,3,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,1,3,2,2,2,1,2,0,1,3,1,1,3,1,3,0,0,2,1,1,1,1,2,1,1,1,0,2,1,0,1,
1,2,0,0,0,3,1,1,0,0,0,0,1,0,1,0,0,1,0,1,0,0,0,0,0,3,1,0,0,0,1,0,
3,3,3,3,2,2,2,2,2,1,3,1,1,1,2,0,1,1,2,1,2,1,3,2,0,0,3,1,1,1,1,1,
3,1,0,2,3,0,0,0,3,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,2,3,0,3,3,0,2,0,0,0,0,0,0,0,3,0,0,1,0,0,0,0,0,0,0,0,0,0,0,
0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,2,3,1,3,0,0,1,2,0,0,2,0,3,3,2,3,3,3,2,3,0,0,2,2,2,0,0,0,2,2,
0,0,1,0,0,0,0,3,0,0,0,0,2,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,
0,0,0,3,0,2,0,0,0,0,0,0,0,0,0,0,1,2,3,1,3,3,0,0,1,0,3,0,0,0,0,0,
0,0,3,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,1,2,3,1,2,3,1,0,3,0,2,2,1,0,2,1,1,2,0,1,0,0,1,1,1,1,0,1,0,0,
1,0,0,0,0,1,1,0,3,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,3,3,2,1,0,1,1,1,3,1,2,2,2,2,2,2,1,1,1,1,0,3,1,0,1,3,1,1,1,1,
1,1,0,2,0,1,3,1,1,0,0,1,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,2,0,1,
3,0,2,2,1,3,3,2,3,3,0,1,1,0,2,2,1,2,1,3,3,1,0,0,3,2,0,0,0,0,2,1,
0,1,0,0,0,0,1,2,0,1,1,3,1,1,2,2,1,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,
0,0,3,0,0,1,0,0,0,3,0,0,3,0,3,1,0,1,1,1,3,2,0,0,0,3,0,0,0,0,2,0,
0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,2,0,0,0,0,0,0,0,0,0,
3,3,1,3,2,1,3,3,1,2,2,0,1,2,1,0,1,2,0,0,0,0,0,3,0,0,0,3,0,0,0,0,
3,0,0,1,1,1,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,0,1,2,0,3,3,3,2,2,0,1,1,0,1,3,0,0,0,2,2,0,0,0,0,3,1,0,1,0,0,0,
0,0,0,0,0,0,0,0,0,1,0,1,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,0,2,3,1,2,0,0,2,1,0,3,1,0,1,2,0,1,1,1,1,3,0,0,3,1,1,0,2,2,1,1,
0,2,0,0,0,0,0,1,0,1,0,0,1,1,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,0,0,3,1,2,0,0,2,2,0,1,2,0,1,0,1,3,1,2,1,0,0,0,2,0,3,0,0,0,1,0,
0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,0,1,1,2,2,0,0,0,2,0,2,1,0,1,1,0,1,1,1,2,1,0,0,1,1,1,0,2,1,1,1,
0,1,1,0,0,0,0,0,0,1,0,0,1,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,1,0,1,
0,0,0,2,0,1,3,1,1,1,1,0,0,0,0,3,2,0,1,0,0,0,1,2,0,0,0,1,0,0,0,0,
0,0,0,3,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,3,3,3,3,1,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1,0,2,3,2,2,0,0,0,1,0,0,0,0,2,3,2,1,2,2,3,0,0,0,2,3,1,0,0,0,1,1,
0,0,1,0,0,0,0,0,0,0,1,0,0,1,0,0,0,0,0,1,1,0,1,0,0,0,0,0,0,0,0,0,
3,3,2,2,0,1,0,0,0,0,2,0,2,0,1,0,0,0,1,1,0,0,0,2,1,0,1,0,1,1,0,0,
0,1,0,2,0,0,1,0,3,0,1,0,0,0,2,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,1,0,0,1,0,0,0,0,0,1,1,2,0,0,0,0,1,0,0,1,3,1,0,0,0,0,1,1,0,0,
0,1,0,0,0,0,3,0,0,0,0,0,0,3,0,0,0,0,0,0,0,3,0,0,0,0,0,0,0,0,0,0,
3,3,1,1,1,1,2,3,0,0,2,1,1,1,1,1,0,2,1,1,0,0,0,2,1,0,1,2,1,1,0,1,
2,1,0,3,0,0,0,0,3,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1,3,1,0,0,0,0,0,0,0,3,0,0,0,3,0,0,0,0,0,0,0,0,1,1,0,0,0,0,0,0,1,
0,0,0,2,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,2,0,0,0,0,0,0,1,2,1,0,1,1,0,2,0,0,1,0,0,2,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,2,0,0,0,1,3,0,1,0,0,0,2,0,0,0,0,0,0,0,1,2,0,0,0,0,0,
3,3,0,0,1,1,2,0,0,1,2,1,0,1,1,1,0,1,1,0,0,2,1,1,0,1,0,0,1,1,1,0,
0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,3,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,2,2,1,0,0,0,0,1,0,0,0,0,3,0,0,0,0,0,0,0,0,0,3,0,0,0,0,0,0,0,0,
2,0,0,0,0,0,3,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,3,0,0,1,1,0,0,0,2,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,1,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1,1,0,1,2,0,1,2,0,0,1,1,0,2,0,1,0,0,1,0,0,0,0,1,0,0,0,2,0,0,0,0,
1,0,0,1,0,1,1,0,3,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,1,0,0,0,0,0,0,0,1,1,0,1,1,0,2,1,3,0,0,0,0,1,1,0,0,0,0,0,0,0,3,
1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,3,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,0,1,0,1,0,0,2,0,0,2,0,0,1,1,2,0,0,1,1,0,0,0,1,0,0,0,1,1,0,0,0,
1,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,
1,0,0,3,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,1,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,1,1,0,0,0,
2,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,3,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,0,0,0,0,2,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,1,0,1,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,1,3,0,0,0,
2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,1,0,0,0,0,
1,0,0,0,0,0,0,0,0,1,0,0,0,0,2,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,1,1,0,0,2,1,0,0,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
)
TIS620ThaiModel = {
'charToOrderMap': TIS620CharToOrderMap,
'precedenceMatrix': ThaiLangModel,
'mTypicalPositiveRatio': 0.926386,
'keepEnglishLetter': False,
'charsetName': "TIS-620"
}
# flake8: noqa
| gpl-3.0 |
Dhivyap/ansible | test/units/modules/network/check_point/test_cp_mgmt_time.py | 19 | 5179 | # Ansible module to manage CheckPoint Firewall (c) 2019
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
from __future__ import absolute_import, division, print_function
__metaclass__ = type
import pytest
from units.modules.utils import set_module_args, exit_json, fail_json, AnsibleExitJson
from ansible.module_utils import basic
from ansible.modules.network.check_point import cp_mgmt_time
OBJECT = {
"name": "timeObject1",
"end": {
"date": "24-Nov-2014",
"time": "21:22"
},
"recurrence": {
"pattern": "Daily",
"month": "Any",
"weekdays": [
"Sun",
"Mon"
],
"days": [
"1"
]
},
"start_now": True,
"end_never": False,
"hours_ranges": [
{
"from": "00:00",
"to": "00:00",
"enabled": True,
"index": 1
},
{
"from": "00:00",
"to": "00:00",
"enabled": False,
"index": 2
}
]
}
CREATE_PAYLOAD = {
"name": "timeObject1",
"end": {
"date": "24-Nov-2014",
"time": "21:22"
},
"recurrence": {
"pattern": "Daily",
"month": "Any",
"weekdays": [
"Sun",
"Mon"
],
"days": [
"1"
]
},
"start_now": True,
"end_never": False,
"hours_ranges": [
{
"from": "00:00",
"to": "00:00",
"enabled": True,
"index": 1
},
{
"from": "00:00",
"to": "00:00",
"enabled": False,
"index": 2
}
]
}
UPDATE_PAYLOAD = {
"name": "timeObject1",
"recurrence": {
"pattern": "Weekly",
"weekdays": [
"Fri"
],
"month": "Any"
},
"hours_ranges": [
{
"from": "00:22",
"to": "00:33"
}
]
}
OBJECT_AFTER_UPDATE = UPDATE_PAYLOAD
DELETE_PAYLOAD = {
"name": "timeObject1",
"state": "absent"
}
function_path = 'ansible.modules.network.check_point.cp_mgmt_time.api_call'
api_call_object = 'time'
class TestCheckpointTime(object):
module = cp_mgmt_time
@pytest.fixture(autouse=True)
def module_mock(self, mocker):
return mocker.patch.multiple(basic.AnsibleModule, exit_json=exit_json, fail_json=fail_json)
@pytest.fixture
def connection_mock(self, mocker):
connection_class_mock = mocker.patch('ansible.module_utils.network.checkpoint.checkpoint.Connection')
return connection_class_mock.return_value
def test_create(self, mocker, connection_mock):
mock_function = mocker.patch(function_path)
mock_function.return_value = {'changed': True, api_call_object: OBJECT}
result = self._run_module(CREATE_PAYLOAD)
assert result['changed']
assert OBJECT.items() == result[api_call_object].items()
def test_create_idempotent(self, mocker, connection_mock):
mock_function = mocker.patch(function_path)
mock_function.return_value = {'changed': False, api_call_object: OBJECT}
result = self._run_module(CREATE_PAYLOAD)
assert not result['changed']
def test_update(self, mocker, connection_mock):
mock_function = mocker.patch(function_path)
mock_function.return_value = {'changed': True, api_call_object: OBJECT_AFTER_UPDATE}
result = self._run_module(UPDATE_PAYLOAD)
assert result['changed']
assert OBJECT_AFTER_UPDATE.items() == result[api_call_object].items()
def test_update_idempotent(self, mocker, connection_mock):
mock_function = mocker.patch(function_path)
mock_function.return_value = {'changed': False, api_call_object: OBJECT_AFTER_UPDATE}
result = self._run_module(UPDATE_PAYLOAD)
assert not result['changed']
def test_delete(self, mocker, connection_mock):
mock_function = mocker.patch(function_path)
mock_function.return_value = {'changed': True}
result = self._run_module(DELETE_PAYLOAD)
assert result['changed']
def test_delete_idempotent(self, mocker, connection_mock):
mock_function = mocker.patch(function_path)
mock_function.return_value = {'changed': False}
result = self._run_module(DELETE_PAYLOAD)
assert not result['changed']
def _run_module(self, module_args):
set_module_args(module_args)
with pytest.raises(AnsibleExitJson) as ex:
self.module.main()
return ex.value.args[0]
| gpl-3.0 |
pdehaye/theming-edx-platform | lms/xmodule_namespace.py | 1 | 2164 | """
Namespace that defines fields common to all blocks used in the LMS
"""
from xblock.core import Namespace, Boolean, Scope, String, Float
from xmodule.fields import Date, Timedelta
from datetime import datetime
from pytz import UTC
class LmsNamespace(Namespace):
"""
Namespace that defines fields common to all blocks used in the LMS
"""
hide_from_toc = Boolean(
help="Whether to display this module in the table of contents",
default=False,
scope=Scope.settings
)
graded = Boolean(
help="Whether this module contributes to the final course grade",
default=False,
scope=Scope.settings
)
format = String(
help="What format this module is in (used for deciding which "
"grader to apply, and what to show in the TOC)",
scope=Scope.settings,
)
start = Date(
help="Start time when this module is visible",
default=datetime.fromtimestamp(0, UTC),
scope=Scope.settings
)
due = Date(help="Date that this problem is due by", scope=Scope.settings)
source_file = String(help="source file name (eg for latex)", scope=Scope.settings)
giturl = String(help="url root for course data git repository", scope=Scope.settings)
xqa_key = String(help="DO NOT USE", scope=Scope.settings)
ispublic = Boolean(help="Whether this course is open to the public, or only to admins", scope=Scope.settings)
graceperiod = Timedelta(
help="Amount of time after the due date that submissions will be accepted",
scope=Scope.settings
)
showanswer = String(
help="When to show the problem answer to the student",
scope=Scope.settings,
default="finished"
)
rerandomize = String(
help="When to rerandomize the problem",
default="never",
scope=Scope.settings
)
days_early_for_beta = Float(
help="Number of days early to show content to beta users",
default=None,
scope=Scope.settings
)
static_asset_path = String(help="Path to use for static assets - overrides Studio c4x://", scope=Scope.settings, default='')
| agpl-3.0 |
eayunstack/neutron | neutron/db/l3_db.py | 1 | 91850 | # Copyright 2012 VMware, Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import functools
import random
import netaddr
from neutron_lib.api.definitions import external_net as extnet_apidef
from neutron_lib.api import validators
from neutron_lib.callbacks import events
from neutron_lib.callbacks import exceptions
from neutron_lib.callbacks import registry
from neutron_lib.callbacks import resources
from neutron_lib import constants
from neutron_lib import context as n_ctx
from neutron_lib import exceptions as n_exc
from neutron_lib.plugins import constants as plugin_constants
from neutron_lib.plugins import directory
from neutron_lib.services import base as base_services
from oslo_log import log as logging
from oslo_utils import uuidutils
from sqlalchemy import orm
from sqlalchemy.orm import exc
from neutron._i18n import _
from neutron.api.rpc.agentnotifiers import l3_rpc_agent_api
from neutron.common import ipv6_utils
from neutron.common import rpc as n_rpc
from neutron.common import utils
from neutron.db import _model_query as model_query
from neutron.db import _resource_extend as resource_extend
from neutron.db import _utils as db_utils
from neutron.db import api as db_api
from neutron.db import common_db_mixin
from neutron.db.models import l3 as l3_models
from neutron.db import models_v2
from neutron.db import standardattrdescription_db as st_attr
from neutron.extensions import l3
from neutron.extensions import qos_fip
from neutron.objects import base as base_obj
from neutron.objects import ports as port_obj
from neutron.objects import router as l3_obj
from neutron.plugins.common import utils as p_utils
from neutron import worker as neutron_worker
LOG = logging.getLogger(__name__)
DEVICE_OWNER_HA_REPLICATED_INT = constants.DEVICE_OWNER_HA_REPLICATED_INT
DEVICE_OWNER_ROUTER_INTF = constants.DEVICE_OWNER_ROUTER_INTF
DEVICE_OWNER_ROUTER_GW = constants.DEVICE_OWNER_ROUTER_GW
DEVICE_OWNER_FLOATINGIP = constants.DEVICE_OWNER_FLOATINGIP
EXTERNAL_GW_INFO = l3.EXTERNAL_GW_INFO
# Maps API field to DB column
# API parameter name and Database column names may differ.
# Useful to keep the filtering between API and Database.
API_TO_DB_COLUMN_MAP = {'port_id': 'fixed_port_id'}
CORE_ROUTER_ATTRS = ('id', 'name', 'tenant_id', 'admin_state_up', 'status')
@registry.has_registry_receivers
class L3_NAT_dbonly_mixin(l3.RouterPluginBase,
base_services.WorkerBase,
st_attr.StandardAttrDescriptionMixin):
"""Mixin class to add L3/NAT router methods to db_base_plugin_v2."""
router_device_owners = (
DEVICE_OWNER_HA_REPLICATED_INT,
DEVICE_OWNER_ROUTER_INTF,
DEVICE_OWNER_ROUTER_GW,
DEVICE_OWNER_FLOATINGIP
)
_dns_integration = None
_fip_qos = None
def __new__(cls):
inst = super(L3_NAT_dbonly_mixin, cls).__new__(cls)
inst._start_janitor()
return inst
@staticmethod
@registry.receives(resources.PORT, [events.BEFORE_DELETE])
def _prevent_l3_port_delete_callback(resource, event, trigger, **kwargs):
context = kwargs['context']
port_id = kwargs['port_id']
port_check = kwargs['port_check']
l3plugin = directory.get_plugin(plugin_constants.L3)
if l3plugin and port_check:
l3plugin.prevent_l3_port_deletion(context, port_id)
@property
def _is_dns_integration_supported(self):
if self._dns_integration is None:
self._dns_integration = (
utils.is_extension_supported(self._core_plugin,
'dns-integration') or
utils.is_extension_supported(self._core_plugin,
'dns-domain-ports'))
return self._dns_integration
@property
def _is_fip_qos_supported(self):
if self._fip_qos is None:
# Check L3 service plugin
self._fip_qos = utils.is_extension_supported(
self, qos_fip.FIP_QOS_ALIAS)
return self._fip_qos
@property
def _core_plugin(self):
return directory.get_plugin()
def _start_janitor(self):
"""Starts the periodic job that cleans up broken complex resources.
This job will look for things like floating IP ports without an
associated floating IP and delete them 5 minutes after detection.
"""
interval = 60 * 5 # only every 5 minutes. cleanups should be rare
initial_delay = random.randint(0, interval) # splay multiple servers
janitor = neutron_worker.PeriodicWorker(self._clean_garbage, interval,
initial_delay)
self.add_worker(janitor)
def _clean_garbage(self):
if not hasattr(self, '_candidate_broken_fip_ports'):
self._candidate_broken_fip_ports = set()
context = n_ctx.get_admin_context()
candidates = self._get_dead_floating_port_candidates(context)
# just because a port is in 'candidates' doesn't necessarily mean
# it's broken, we could have just caught it before it was updated.
# We confirm by waiting until the next call of this function to see
# if it persists.
to_cleanup = candidates & self._candidate_broken_fip_ports
self._candidate_broken_fip_ports = candidates - to_cleanup
for port_id in to_cleanup:
# ensure it wasn't just a failure to update device_id before we
# delete it
try:
self._fix_or_kill_floating_port(context, port_id)
except Exception:
LOG.exception("Error cleaning up floating IP port: %s",
port_id)
def _fix_or_kill_floating_port(self, context, port_id):
pager = base_obj.Pager(limit=1)
fips = l3_obj.FloatingIP.get_objects(
context, _pager=pager, floating_port_id=port_id)
if fips:
LOG.warning("Found incorrect device_id on floating port "
"%(pid)s, correcting to %(fip)s.",
{'pid': port_id, 'fip': fips[0].id})
self._core_plugin.update_port(
context, port_id, {'port': {'device_id': fips[0].id}})
else:
LOG.warning("Found floating IP port %s without floating IP, "
"deleting.", port_id)
self._core_plugin.delete_port(
context, port_id, l3_port_check=False)
def _get_dead_floating_port_candidates(self, context):
filters = {'device_id': ['PENDING'],
'device_owner': [DEVICE_OWNER_FLOATINGIP]}
return {p['id'] for p in self._core_plugin.get_ports(context, filters)}
def _get_router(self, context, router_id):
try:
router = model_query.get_by_id(
context, l3_models.Router, router_id)
except exc.NoResultFound:
raise l3.RouterNotFound(router_id=router_id)
return router
def _make_router_dict(self, router, fields=None, process_extensions=True):
res = dict((key, router[key]) for key in CORE_ROUTER_ATTRS)
if router['gw_port_id']:
ext_gw_info = {
'network_id': router.gw_port['network_id'],
'external_fixed_ips': [{'subnet_id': ip["subnet_id"],
'ip_address': ip["ip_address"]}
for ip in router.gw_port['fixed_ips']]}
else:
ext_gw_info = None
res.update({
EXTERNAL_GW_INFO: ext_gw_info,
'gw_port_id': router['gw_port_id'],
})
# NOTE(salv-orlando): The following assumes this mixin is used in a
# class inheriting from CommonDbMixin, which is true for all existing
# plugins.
if process_extensions:
resource_extend.apply_funcs(l3.ROUTERS, res, router)
return db_utils.resource_fields(res, fields)
def _create_router_db(self, context, router, tenant_id):
"""Create the DB object."""
router.setdefault('id', uuidutils.generate_uuid())
router['tenant_id'] = tenant_id
registry.notify(resources.ROUTER, events.BEFORE_CREATE,
self, context=context, router=router)
with context.session.begin(subtransactions=True):
# pre-generate id so it will be available when
# configuring external gw port
router_db = l3_models.Router(
id=router['id'],
tenant_id=router['tenant_id'],
name=router['name'],
admin_state_up=router['admin_state_up'],
status=constants.ACTIVE,
description=router.get('description'))
context.session.add(router_db)
registry.notify(resources.ROUTER, events.PRECOMMIT_CREATE,
self, context=context, router=router,
router_id=router['id'], router_db=router_db)
return router_db
def _update_gw_for_create_router(self, context, gw_info, router_id):
if gw_info:
router_db = self._get_router(context, router_id)
self._update_router_gw_info(context, router_id,
gw_info, router=router_db)
@db_api.retry_if_session_inactive()
def create_router(self, context, router):
r = router['router']
gw_info = r.pop(EXTERNAL_GW_INFO, None)
create = functools.partial(self._create_router_db, context, r,
r['tenant_id'])
delete = functools.partial(self.delete_router, context)
update_gw = functools.partial(self._update_gw_for_create_router,
context, gw_info)
router_db, _unused = common_db_mixin.safe_creation(context, create,
delete, update_gw,
transaction=False)
new_router = self._make_router_dict(router_db)
registry.notify(resources.ROUTER, events.AFTER_CREATE, self,
context=context, router_id=router_db.id,
router=new_router, request_attrs=r,
router_db=router_db)
return new_router
def _update_router_db(self, context, router_id, data):
"""Update the DB object."""
with context.session.begin(subtransactions=True):
router_db = self._get_router(context, router_id)
old_router = self._make_router_dict(router_db)
if data:
router_db.update(data)
registry.notify(resources.ROUTER, events.PRECOMMIT_UPDATE,
self, context=context, router_id=router_id,
router=data, router_db=router_db,
old_router=old_router)
return router_db
@db_api.retry_if_session_inactive()
def update_router(self, context, id, router):
r = router['router']
gw_info = r.pop(EXTERNAL_GW_INFO, constants.ATTR_NOT_SPECIFIED)
original = self.get_router(context, id)
# check whether router needs and can be rescheduled to the proper
# l3 agent (associated with given external network);
# do check before update in DB as an exception will be raised
# in case no proper l3 agent found
if gw_info != constants.ATTR_NOT_SPECIFIED:
candidates = self._check_router_needs_rescheduling(
context, id, gw_info)
# Update the gateway outside of the DB update since it involves L2
# calls that don't make sense to rollback and may cause deadlocks
# in a transaction.
self._update_router_gw_info(context, id, gw_info)
else:
candidates = None
router_db = self._update_router_db(context, id, r)
if candidates:
l3_plugin = directory.get_plugin(plugin_constants.L3)
l3_plugin.reschedule_router(context, id, candidates)
updated = self._make_router_dict(router_db)
registry.notify(resources.ROUTER, events.AFTER_UPDATE, self,
context=context, router_id=id, old_router=original,
router=updated, request_attrs=r, router_db=router_db)
return updated
def _check_router_needs_rescheduling(self, context, router_id, gw_info):
"""Checks whether router's l3 agent can handle the given network
When external_network_bridge is set, each L3 agent can be associated
with at most one external network. If router's new external gateway
is on other network then the router needs to be rescheduled to the
proper l3 agent.
If external_network_bridge is not set then the agent
can support multiple external networks and rescheduling is not needed
:return: list of candidate agents if rescheduling needed,
None otherwise; raises exception if there is no eligible l3 agent
associated with target external network
"""
# TODO(obondarev): rethink placement of this func as l3 db manager is
# not really a proper place for agent scheduling stuff
network_id = gw_info.get('network_id') if gw_info else None
if not network_id:
return
nets = self._core_plugin.get_networks(
context, {extnet_apidef.EXTERNAL: [True]})
# nothing to do if there is only one external network
if len(nets) <= 1:
return
# first get plugin supporting l3 agent scheduling
# (either l3 service plugin or core_plugin)
l3_plugin = directory.get_plugin(plugin_constants.L3)
if (not utils.is_extension_supported(
l3_plugin,
constants.L3_AGENT_SCHEDULER_EXT_ALIAS) or
l3_plugin.router_scheduler is None):
# that might mean that we are dealing with non-agent-based
# implementation of l3 services
return
if not l3_plugin.router_supports_scheduling(context, router_id):
return
cur_agents = l3_plugin.list_l3_agents_hosting_router(
context, router_id)['agents']
for agent in cur_agents:
ext_net_id = agent['configurations'].get(
'gateway_external_network_id')
ext_bridge = agent['configurations'].get(
'external_network_bridge', '')
if (ext_net_id == network_id or
(not ext_net_id and not ext_bridge)):
return
# otherwise find l3 agent with matching gateway_external_network_id
active_agents = l3_plugin.get_l3_agents(context, active=True)
router = {
'id': router_id,
'external_gateway_info': {'network_id': network_id}
}
candidates = l3_plugin.get_l3_agent_candidates(context,
router,
active_agents)
if not candidates:
msg = (_('No eligible l3 agent associated with external network '
'%s found') % network_id)
raise n_exc.BadRequest(resource='router', msg=msg)
return candidates
def _create_router_gw_port(self, context, router, network_id, ext_ips):
# Port has no 'tenant-id', as it is hidden from user
port_data = {'tenant_id': '', # intentionally not set
'network_id': network_id,
'fixed_ips': ext_ips or constants.ATTR_NOT_SPECIFIED,
'device_id': router['id'],
'device_owner': DEVICE_OWNER_ROUTER_GW,
'admin_state_up': True,
'name': ''}
gw_port = p_utils.create_port(self._core_plugin,
context.elevated(), {'port': port_data})
if not gw_port['fixed_ips']:
LOG.debug('No IPs available for external network %s',
network_id)
with p_utils.delete_port_on_error(self._core_plugin,
context.elevated(), gw_port['id']):
with context.session.begin(subtransactions=True):
router.gw_port = self._core_plugin._get_port(
context.elevated(), gw_port['id'])
router_port = l3_obj.RouterPort(
context,
router_id=router.id,
port_id=gw_port['id'],
port_type=DEVICE_OWNER_ROUTER_GW
)
context.session.add(router)
router_port.create()
def _validate_gw_info(self, context, gw_port, info, ext_ips):
network_id = info['network_id'] if info else None
if network_id:
network_db = self._core_plugin._get_network(context, network_id)
if not network_db.external:
msg = _("Network %s is not an external network") % network_id
raise n_exc.BadRequest(resource='router', msg=msg)
if ext_ips:
subnets = self._core_plugin.get_subnets_by_network(context,
network_id)
for s in subnets:
if not s['gateway_ip']:
continue
for ext_ip in ext_ips:
if ext_ip.get('ip_address') == s['gateway_ip']:
msg = _("External IP %s is the same as the "
"gateway IP") % ext_ip.get('ip_address')
raise n_exc.BadRequest(resource='router', msg=msg)
return network_id
# NOTE(yamamoto): This method is an override point for plugins
# inheriting this class. Do not optimize this out.
def router_gw_port_has_floating_ips(self, context, router_id):
"""Return True if the router's gateway port is serving floating IPs."""
return bool(self.get_floatingips_count(context,
{'router_id': [router_id]}))
def _delete_current_gw_port(self, context, router_id, router,
new_network_id):
"""Delete gw port if attached to an old network."""
port_requires_deletion = (
router.gw_port and router.gw_port['network_id'] != new_network_id)
if not port_requires_deletion:
return
admin_ctx = context.elevated()
old_network_id = router.gw_port['network_id']
if self.router_gw_port_has_floating_ips(admin_ctx, router_id):
raise l3.RouterExternalGatewayInUseByFloatingIp(
router_id=router_id, net_id=router.gw_port['network_id'])
gw_ips = [x['ip_address'] for x in router.gw_port['fixed_ips']]
gw_port_id = router.gw_port['id']
self._delete_router_gw_port_db(context, router)
self._core_plugin.delete_port(
admin_ctx, gw_port_id, l3_port_check=False)
with context.session.begin(subtransactions=True):
context.session.refresh(router)
registry.notify(resources.ROUTER_GATEWAY,
events.AFTER_DELETE, self,
router_id=router_id,
context=context,
router=router,
network_id=old_network_id,
new_network_id=new_network_id,
gateway_ips=gw_ips)
def _delete_router_gw_port_db(self, context, router):
with context.session.begin(subtransactions=True):
router.gw_port = None
if router not in context.session:
context.session.add(router)
try:
kwargs = {'context': context, 'router_id': router.id}
registry.notify(
resources.ROUTER_GATEWAY, events.BEFORE_DELETE, self,
**kwargs)
except exceptions.CallbackFailure as e:
# NOTE(armax): preserve old check's behavior
if len(e.errors) == 1:
raise e.errors[0].error
raise l3.RouterInUse(router_id=router.id, reason=e)
def _create_gw_port(self, context, router_id, router, new_network_id,
ext_ips):
new_valid_gw_port_attachment = (
new_network_id and (not router.gw_port or
router.gw_port['network_id'] != new_network_id))
if new_valid_gw_port_attachment:
subnets = self._core_plugin.get_subnets_by_network(context,
new_network_id)
try:
kwargs = {'context': context, 'router_id': router_id,
'network_id': new_network_id, 'subnets': subnets}
registry.notify(
resources.ROUTER_GATEWAY, events.BEFORE_CREATE, self,
**kwargs)
except exceptions.CallbackFailure as e:
# raise the underlying exception
raise e.errors[0].error
self._check_for_dup_router_subnets(context, router,
new_network_id,
subnets,
include_gateway=True)
self._create_router_gw_port(context, router,
new_network_id, ext_ips)
registry.notify(resources.ROUTER_GATEWAY,
events.AFTER_CREATE,
self._create_gw_port,
gw_ips=ext_ips,
network_id=new_network_id,
router_id=router_id)
def _update_current_gw_port(self, context, router_id, router, ext_ips):
self._core_plugin.update_port(context, router.gw_port['id'], {'port':
{'fixed_ips': ext_ips}})
context.session.expire(router.gw_port)
def _update_router_gw_info(self, context, router_id, info, router=None):
# TODO(salvatore-orlando): guarantee atomic behavior also across
# operations that span beyond the model classes handled by this
# class (e.g.: delete_port)
router = router or self._get_router(context, router_id)
gw_port = router.gw_port
ext_ips = info.get('external_fixed_ips') if info else []
ext_ip_change = self._check_for_external_ip_change(
context, gw_port, ext_ips)
network_id = self._validate_gw_info(context, gw_port, info, ext_ips)
if gw_port and ext_ip_change and gw_port['network_id'] == network_id:
self._update_current_gw_port(context, router_id, router,
ext_ips)
else:
self._delete_current_gw_port(context, router_id, router,
network_id)
self._create_gw_port(context, router_id, router, network_id,
ext_ips)
def _check_for_external_ip_change(self, context, gw_port, ext_ips):
# determine if new external IPs differ from the existing fixed_ips
if not ext_ips:
# no external_fixed_ips were included
return False
if not gw_port:
return True
subnet_ids = set(ip['subnet_id'] for ip in gw_port['fixed_ips'])
new_subnet_ids = set(f['subnet_id'] for f in ext_ips
if f.get('subnet_id'))
subnet_change = not new_subnet_ids == subnet_ids
if subnet_change:
return True
ip_addresses = set(ip['ip_address'] for ip in gw_port['fixed_ips'])
new_ip_addresses = set(f['ip_address'] for f in ext_ips
if f.get('ip_address'))
ip_address_change = not ip_addresses == new_ip_addresses
return ip_address_change
def _ensure_router_not_in_use(self, context, router_id):
"""Ensure that no internal network interface is attached
to the router.
"""
router = self._get_router(context, router_id)
device_owner = self._get_device_owner(context, router)
if any(rp.port_type == device_owner
for rp in router.attached_ports):
raise l3.RouterInUse(router_id=router_id)
return router
@db_api.retry_if_session_inactive()
def delete_router(self, context, id):
registry.notify(resources.ROUTER, events.BEFORE_DELETE,
self, context=context, router_id=id)
#TODO(nati) Refactor here when we have router insertion model
router = self._ensure_router_not_in_use(context, id)
original = self._make_router_dict(router)
self._delete_current_gw_port(context, id, router, None)
with context.session.begin(subtransactions=True):
context.session.refresh(router)
router_ports = router.attached_ports
for rp in router_ports:
self._core_plugin.delete_port(context.elevated(),
rp.port.id,
l3_port_check=False)
with context.session.begin(subtransactions=True):
context.session.refresh(router)
registry.notify(resources.ROUTER, events.PRECOMMIT_DELETE,
self, context=context, router_db=router,
router_id=id)
# we bump the revision even though we are about to delete to throw
# staledataerror if something snuck in with a new interface
router.bump_revision()
context.session.flush()
context.session.delete(router)
registry.notify(resources.ROUTER, events.AFTER_DELETE, self,
context=context, router_id=id, original=original)
@db_api.retry_if_session_inactive()
def get_router(self, context, id, fields=None):
router = self._get_router(context, id)
return self._make_router_dict(router, fields)
@db_api.retry_if_session_inactive()
def get_routers(self, context, filters=None, fields=None,
sorts=None, limit=None, marker=None,
page_reverse=False):
marker_obj = db_utils.get_marker_obj(self, context, 'router',
limit, marker)
return model_query.get_collection(context, l3_models.Router,
self._make_router_dict,
filters=filters, fields=fields,
sorts=sorts,
limit=limit,
marker_obj=marker_obj,
page_reverse=page_reverse)
@db_api.retry_if_session_inactive()
def get_routers_count(self, context, filters=None):
return model_query.get_collection_count(context, l3_models.Router,
filters=filters)
def _check_for_dup_router_subnets(self, context, router,
network_id, new_subnets,
include_gateway=False):
# It's possible these ports are on the same network, but
# different subnets.
new_subnet_ids = {s['id'] for s in new_subnets}
router_subnets = []
for p in (rp.port for rp in router.attached_ports):
for ip in p['fixed_ips']:
if ip['subnet_id'] in new_subnet_ids:
msg = (_("Router already has a port on subnet %s")
% ip['subnet_id'])
raise n_exc.BadRequest(resource='router', msg=msg)
gw_owner = (p.get('device_owner') == DEVICE_OWNER_ROUTER_GW)
if include_gateway == gw_owner:
router_subnets.append(ip['subnet_id'])
# Ignore temporary Prefix Delegation CIDRs
new_subnets = [s for s in new_subnets
if s['cidr'] != constants.PROVISIONAL_IPV6_PD_PREFIX]
id_filter = {'id': router_subnets}
subnets = self._core_plugin.get_subnets(context.elevated(),
filters=id_filter)
for sub in subnets:
cidr = sub['cidr']
ipnet = netaddr.IPNetwork(cidr)
for s in new_subnets:
new_cidr = s['cidr']
new_ipnet = netaddr.IPNetwork(new_cidr)
match1 = netaddr.all_matching_cidrs(new_ipnet, [cidr])
match2 = netaddr.all_matching_cidrs(ipnet, [new_cidr])
if match1 or match2:
data = {'subnet_cidr': new_cidr,
'subnet_id': s['id'],
'cidr': cidr,
'sub_id': sub['id']}
msg = (_("Cidr %(subnet_cidr)s of subnet "
"%(subnet_id)s overlaps with cidr %(cidr)s "
"of subnet %(sub_id)s") % data)
raise n_exc.BadRequest(resource='router', msg=msg)
def _get_device_owner(self, context, router=None):
"""Get device_owner for the specified router."""
# NOTE(armando-migliaccio): in the base case this is invariant
return DEVICE_OWNER_ROUTER_INTF
def _validate_interface_info(self, interface_info, for_removal=False):
port_id_specified = interface_info and 'port_id' in interface_info
subnet_id_specified = interface_info and 'subnet_id' in interface_info
if not (port_id_specified or subnet_id_specified):
msg = _("Either subnet_id or port_id must be specified")
raise n_exc.BadRequest(resource='router', msg=msg)
for key in ('port_id', 'subnet_id'):
if key not in interface_info:
continue
err = validators.validate_uuid(interface_info[key])
if err:
raise n_exc.BadRequest(resource='router', msg=err)
if not for_removal:
if port_id_specified and subnet_id_specified:
msg = _("Cannot specify both subnet-id and port-id")
raise n_exc.BadRequest(resource='router', msg=msg)
return port_id_specified, subnet_id_specified
def _check_router_port(self, context, port_id, device_id):
"""Check that a port is available for an attachment to a router
:param context: The context of the request.
:param port_id: The port to be attached.
:param device_id: This method will check that device_id corresponds to
the device_id of the port. It raises PortInUse exception if it
doesn't.
:returns: The port description returned by the core plugin.
:raises: PortInUse if the device_id is not the same as the port's one.
:raises: BadRequest if the port has no fixed IP.
"""
port = self._core_plugin.get_port(context, port_id)
if port['device_id'] != device_id:
raise n_exc.PortInUse(net_id=port['network_id'],
port_id=port['id'],
device_id=port['device_id'])
if not port['fixed_ips']:
msg = _('Router port must have at least one fixed IP')
raise n_exc.BadRequest(resource='router', msg=msg)
return port
def _validate_router_port_info(self, context, router, port_id):
with db_api.autonested_transaction(context.session):
# check again within transaction to mitigate race
port = self._check_router_port(context, port_id, router.id)
# Only allow one router port with IPv6 subnets per network id
if self._port_has_ipv6_address(port):
for existing_port in (rp.port for rp in router.attached_ports):
if (existing_port['network_id'] == port['network_id'] and
self._port_has_ipv6_address(existing_port)):
msg = _("Cannot have multiple router ports with the "
"same network id if both contain IPv6 "
"subnets. Existing port %(p)s has IPv6 "
"subnet(s) and network id %(nid)s")
raise n_exc.BadRequest(resource='router', msg=msg % {
'p': existing_port['id'],
'nid': existing_port['network_id']})
fixed_ips = [ip for ip in port['fixed_ips']]
subnets = []
for fixed_ip in fixed_ips:
subnet = self._core_plugin.get_subnet(context,
fixed_ip['subnet_id'])
subnets.append(subnet)
if subnets:
self._check_for_dup_router_subnets(context, router,
port['network_id'],
subnets)
# Keep the restriction against multiple IPv4 subnets
if len([s for s in subnets if s['ip_version'] == 4]) > 1:
msg = _("Cannot have multiple "
"IPv4 subnets on router port")
raise n_exc.BadRequest(resource='router', msg=msg)
return port, subnets
def _notify_attaching_interface(self, context, router_db, port,
interface_info):
"""Notify third party code that an interface is being attached to a
router
:param context: The context of the request.
:param router_db: The router db object having an interface attached.
:param port: The port object being attached to the router.
:param interface_info: The requested interface attachment info passed
to add_router_interface.
:raises: RouterInterfaceAttachmentConflict if a third party code
prevent the port to be attach to the router.
"""
try:
registry.notify(resources.ROUTER_INTERFACE,
events.BEFORE_CREATE,
self,
context=context,
router_db=router_db,
port=port,
interface_info=interface_info,
router_id=router_db.id,
network_id=port['network_id'])
except exceptions.CallbackFailure as e:
# raise the underlying exception
reason = (_('cannot perform router interface attachment '
'due to %(reason)s') % {'reason': e})
raise l3.RouterInterfaceAttachmentConflict(reason=reason)
def _add_interface_by_port(self, context, router, port_id, owner):
# Update owner before actual process in order to avoid the
# case where a port might get attached to a router without the
# owner successfully updating due to an unavailable backend.
self._core_plugin.update_port(
context, port_id, {'port': {'device_id': router.id,
'device_owner': owner}})
return self._validate_router_port_info(context, router, port_id)
def _port_has_ipv6_address(self, port):
for fixed_ip in port['fixed_ips']:
if netaddr.IPNetwork(fixed_ip['ip_address']).version == 6:
return True
def _find_ipv6_router_port_by_network(self, context, router, net_id):
router_dev_owner = self._get_device_owner(context, router)
for port in router.attached_ports:
p = port['port']
if p['device_owner'] != router_dev_owner:
# we don't want any special purpose internal ports
continue
if p['network_id'] == net_id and self._port_has_ipv6_address(p):
return port
def _add_interface_by_subnet(self, context, router, subnet_id, owner):
subnet = self._core_plugin.get_subnet(context, subnet_id)
if not subnet['gateway_ip']:
msg = _('Subnet for router interface must have a gateway IP')
raise n_exc.BadRequest(resource='router', msg=msg)
if (subnet['ip_version'] == 6 and subnet['ipv6_ra_mode'] is None
and subnet['ipv6_address_mode'] is not None):
msg = (_('IPv6 subnet %s configured to receive RAs from an '
'external router cannot be added to Neutron Router.') %
subnet['id'])
raise n_exc.BadRequest(resource='router', msg=msg)
self._check_for_dup_router_subnets(context, router,
subnet['network_id'], [subnet])
fixed_ip = {'ip_address': subnet['gateway_ip'],
'subnet_id': subnet['id']}
if (subnet['ip_version'] == 6 and not
ipv6_utils.is_ipv6_pd_enabled(subnet)):
# Add new prefix to an existing ipv6 port with the same network id
# if one exists
port = self._find_ipv6_router_port_by_network(context, router,
subnet['network_id'])
if port:
fixed_ips = list(map(dict, port['port']['fixed_ips']))
fixed_ips.append(fixed_ip)
return self._core_plugin.update_port(context,
port['port_id'], {'port':
{'fixed_ips': fixed_ips}}), [subnet], False
port_data = {'tenant_id': router.tenant_id,
'network_id': subnet['network_id'],
'fixed_ips': [fixed_ip],
'admin_state_up': True,
'device_id': router.id,
'device_owner': owner,
'name': ''}
return p_utils.create_port(self._core_plugin, context,
{'port': port_data}), [subnet], True
@staticmethod
def _make_router_interface_info(
router_id, tenant_id, port_id, network_id, subnet_id, subnet_ids):
return {
'id': router_id,
'tenant_id': tenant_id,
'port_id': port_id,
'network_id': network_id,
'subnet_id': subnet_id, # deprecated by IPv6 multi-prefix
'subnet_ids': subnet_ids
}
@db_api.retry_if_session_inactive()
def add_router_interface(self, context, router_id, interface_info=None):
router = self._get_router(context, router_id)
add_by_port, add_by_sub = self._validate_interface_info(interface_info)
device_owner = self._get_device_owner(context, router_id)
# This should be True unless adding an IPv6 prefix to an existing port
new_router_intf = True
cleanup_port = False
if add_by_port:
port_id = interface_info['port_id']
port = self._check_router_port(context, port_id, '')
revert_value = {'device_id': '',
'device_owner': port['device_owner']}
with p_utils.update_port_on_error(
self._core_plugin, context, port_id, revert_value):
port, subnets = self._add_interface_by_port(
context, router, port_id, device_owner)
# add_by_subnet is not used here, because the validation logic of
# _validate_interface_info ensures that either of add_by_* is True.
else:
port, subnets, new_router_intf = self._add_interface_by_subnet(
context, router, interface_info['subnet_id'], device_owner)
cleanup_port = new_router_intf # only cleanup port we created
revert_value = {'device_id': '',
'device_owner': port['device_owner']}
if cleanup_port:
mgr = p_utils.delete_port_on_error(
self._core_plugin, context, port['id'])
else:
mgr = p_utils.update_port_on_error(
self._core_plugin, context, port['id'], revert_value)
if new_router_intf:
with mgr:
self._notify_attaching_interface(context, router_db=router,
port=port,
interface_info=interface_info)
l3_obj.RouterPort(
context,
port_id=port['id'],
router_id=router.id,
port_type=device_owner
).create()
# Update owner after actual process again in order to
# make sure the records in routerports table and ports
# table are consistent.
self._core_plugin.update_port(
context, port['id'], {'port': {
'device_id': router.id,
'device_owner': device_owner}})
gw_ips = []
gw_network_id = None
if router.gw_port:
gw_network_id = router.gw_port.network_id
gw_ips = [x['ip_address'] for x in router.gw_port.fixed_ips]
registry.notify(resources.ROUTER_INTERFACE,
events.AFTER_CREATE,
self,
context=context,
network_id=gw_network_id,
gateway_ips=gw_ips,
cidrs=[x['cidr'] for x in subnets],
subnets=subnets,
port_id=port['id'],
router_id=router_id,
port=port,
new_interface=new_router_intf,
interface_info=interface_info)
with context.session.begin(subtransactions=True):
context.session.refresh(router)
return self._make_router_interface_info(
router.id, port['tenant_id'], port['id'], port['network_id'],
subnets[-1]['id'], [subnet['id'] for subnet in subnets])
def _confirm_router_interface_not_in_use(self, context, router_id,
subnet_id):
subnet = self._core_plugin.get_subnet(context, subnet_id)
subnet_cidr = netaddr.IPNetwork(subnet['cidr'])
try:
kwargs = {'context': context, 'router_id': router_id,
'subnet_id': subnet_id}
registry.notify(
resources.ROUTER_INTERFACE,
events.BEFORE_DELETE, self, **kwargs)
except exceptions.CallbackFailure as e:
# NOTE(armax): preserve old check's behavior
if len(e.errors) == 1:
raise e.errors[0].error
raise l3.RouterInUse(router_id=router_id, reason=e)
fip_objs = l3_obj.FloatingIP.get_objects(context, router_id=router_id)
for fip_obj in fip_objs:
if fip_obj.fixed_ip_address in subnet_cidr:
raise l3.RouterInterfaceInUseByFloatingIP(
router_id=router_id, subnet_id=subnet_id)
def _remove_interface_by_port(self, context, router_id,
port_id, subnet_id, owner):
obj = l3_obj.RouterPort.get_object(
context,
port_id=port_id,
router_id=router_id,
port_type=owner
)
if obj:
try:
port = self._core_plugin.get_port(context, obj.port_id)
except n_exc.PortNotFound:
raise l3.RouterInterfaceNotFound(router_id=router_id,
port_id=port_id)
else:
raise l3.RouterInterfaceNotFound(router_id=router_id,
port_id=port_id)
port_subnet_ids = [fixed_ip['subnet_id']
for fixed_ip in port['fixed_ips']]
if subnet_id and subnet_id not in port_subnet_ids:
raise n_exc.SubnetMismatchForPort(
port_id=port_id, subnet_id=subnet_id)
subnets = [self._core_plugin.get_subnet(context, port_subnet_id)
for port_subnet_id in port_subnet_ids]
for port_subnet_id in port_subnet_ids:
self._confirm_router_interface_not_in_use(
context, router_id, port_subnet_id)
self._core_plugin.delete_port(context, port['id'],
l3_port_check=False)
return (port, subnets)
def _remove_interface_by_subnet(self, context,
router_id, subnet_id, owner):
self._confirm_router_interface_not_in_use(
context, router_id, subnet_id)
subnet = self._core_plugin.get_subnet(context, subnet_id)
try:
ports = port_obj.Port.get_ports_by_router(
context, router_id, owner, subnet)
for p in ports:
try:
p = self._core_plugin.get_port(context, p.id)
except n_exc.PortNotFound:
continue
port_subnets = [fip['subnet_id'] for fip in p['fixed_ips']]
if subnet_id in port_subnets and len(port_subnets) > 1:
# multiple prefix port - delete prefix from port
fixed_ips = [dict(fip) for fip in p['fixed_ips']
if fip['subnet_id'] != subnet_id]
self._core_plugin.update_port(context, p['id'],
{'port':
{'fixed_ips': fixed_ips}})
return (p, [subnet])
elif subnet_id in port_subnets:
# only one subnet on port - delete the port
self._core_plugin.delete_port(context, p['id'],
l3_port_check=False)
return (p, [subnet])
except exc.NoResultFound:
pass
raise l3.RouterInterfaceNotFoundForSubnet(router_id=router_id,
subnet_id=subnet_id)
@db_api.retry_if_session_inactive()
def remove_router_interface(self, context, router_id, interface_info):
remove_by_port, remove_by_subnet = (
self._validate_interface_info(interface_info, for_removal=True)
)
port_id = interface_info.get('port_id')
subnet_id = interface_info.get('subnet_id')
device_owner = self._get_device_owner(context, router_id)
if remove_by_port:
port, subnets = self._remove_interface_by_port(context, router_id,
port_id, subnet_id,
device_owner)
# remove_by_subnet is not used here, because the validation logic of
# _validate_interface_info ensures that at least one of remote_by_*
# is True.
else:
port, subnets = self._remove_interface_by_subnet(
context, router_id, subnet_id, device_owner)
gw_network_id = None
gw_ips = []
router = self._get_router(context, router_id)
if router.gw_port:
gw_network_id = router.gw_port.network_id
gw_ips = [x['ip_address'] for x in router.gw_port.fixed_ips]
registry.notify(resources.ROUTER_INTERFACE,
events.AFTER_DELETE,
self,
context=context,
cidrs=[x['cidr'] for x in subnets],
network_id=gw_network_id,
gateway_ips=gw_ips,
port=port,
router_id=router_id,
interface_info=interface_info)
with context.session.begin(subtransactions=True):
context.session.refresh(router)
return self._make_router_interface_info(router_id, port['tenant_id'],
port['id'], port['network_id'],
subnets[0]['id'],
[subnet['id'] for subnet in
subnets])
def _get_floatingip(self, context, id):
floatingip = l3_obj.FloatingIP.get_object(context, id=id)
if not floatingip:
raise l3.FloatingIPNotFound(floatingip_id=id)
return floatingip
def _make_floatingip_dict(self, floatingip, fields=None,
process_extensions=True):
floating_ip_address = (str(floatingip.floating_ip_address)
if floatingip.floating_ip_address else None)
fixed_ip_address = (str(floatingip.fixed_ip_address)
if floatingip.fixed_ip_address else None)
res = {'id': floatingip.id,
'tenant_id': floatingip.project_id,
'floating_ip_address': floating_ip_address,
'floating_network_id': floatingip.floating_network_id,
'router_id': floatingip.router_id,
'port_id': floatingip.fixed_port_id,
'fixed_ip_address': fixed_ip_address,
'status': floatingip.status}
# NOTE(mlavalle): The following assumes this mixin is used in a
# class inheriting from CommonDbMixin, which is true for all existing
# plugins.
# TODO(lujinluo): Change floatingip.db_obj to floatingip once all
# codes are migrated to use Floating IP OVO object.
if process_extensions:
resource_extend.apply_funcs(l3.FLOATINGIPS, res, floatingip.db_obj)
return db_utils.resource_fields(res, fields)
def _get_router_for_floatingip(self, context, internal_port,
internal_subnet_id,
external_network_id):
subnet = self._core_plugin.get_subnet(context, internal_subnet_id)
return self.get_router_for_floatingip(context,
internal_port, subnet, external_network_id)
# NOTE(yamamoto): This method is an override point for plugins
# inheriting this class. Do not optimize this out.
def get_router_for_floatingip(self, context, internal_port,
internal_subnet, external_network_id):
"""Find a router to handle the floating-ip association.
:param internal_port: The port for the fixed-ip.
:param internal_subnet: The subnet for the fixed-ip.
:param external_network_id: The external network for floating-ip.
:raises: ExternalGatewayForFloatingIPNotFound if no suitable router
is found.
"""
# Find routers(with router_id and interface address) that
# connect given internal subnet and the external network.
# Among them, if the router's interface address matches
# with subnet's gateway-ip, return that router.
# Otherwise return the first router.
RouterPort = l3_models.RouterPort
gw_port = orm.aliased(models_v2.Port, name="gw_port")
# TODO(lujinluo): Need IPAllocation and Port object
routerport_qry = context.session.query(
RouterPort.router_id, models_v2.IPAllocation.ip_address).join(
models_v2.Port, models_v2.IPAllocation).filter(
models_v2.Port.network_id == internal_port['network_id'],
RouterPort.port_type.in_(constants.ROUTER_INTERFACE_OWNERS),
models_v2.IPAllocation.subnet_id == internal_subnet['id']
).join(gw_port, gw_port.device_id == RouterPort.router_id).filter(
gw_port.network_id == external_network_id,
gw_port.device_owner == DEVICE_OWNER_ROUTER_GW
).distinct()
first_router_id = None
for router_id, interface_ip in routerport_qry:
if interface_ip == internal_subnet['gateway_ip']:
return router_id
if not first_router_id:
first_router_id = router_id
if first_router_id:
return first_router_id
raise l3.ExternalGatewayForFloatingIPNotFound(
subnet_id=internal_subnet['id'],
external_network_id=external_network_id,
port_id=internal_port['id'])
def _port_ipv4_fixed_ips(self, port):
return [ip for ip in port['fixed_ips']
if netaddr.IPAddress(ip['ip_address']).version == 4]
def _internal_fip_assoc_data(self, context, fip, tenant_id):
"""Retrieve internal port data for floating IP.
Retrieve information concerning the internal port where
the floating IP should be associated to.
"""
internal_port = self._core_plugin.get_port(context, fip['port_id'])
if internal_port['tenant_id'] != tenant_id and not context.is_admin:
port_id = fip['port_id']
msg = (_('Cannot process floating IP association with '
'Port %s, since that port is owned by a '
'different tenant') % port_id)
raise n_exc.BadRequest(resource='floatingip', msg=msg)
internal_subnet_id = None
if fip.get('fixed_ip_address'):
internal_ip_address = fip['fixed_ip_address']
if netaddr.IPAddress(internal_ip_address).version != 4:
msg = (_('Cannot process floating IP association with %s, '
'since that is not an IPv4 address') %
internal_ip_address)
raise n_exc.BadRequest(resource='floatingip', msg=msg)
for ip in internal_port['fixed_ips']:
if ip['ip_address'] == internal_ip_address:
internal_subnet_id = ip['subnet_id']
if not internal_subnet_id:
msg = (_('Port %(id)s does not have fixed ip %(address)s') %
{'id': internal_port['id'],
'address': internal_ip_address})
raise n_exc.BadRequest(resource='floatingip', msg=msg)
else:
ipv4_fixed_ips = self._port_ipv4_fixed_ips(internal_port)
if not ipv4_fixed_ips:
msg = (_('Cannot add floating IP to port %s that has '
'no fixed IPv4 addresses') % internal_port['id'])
raise n_exc.BadRequest(resource='floatingip', msg=msg)
if len(ipv4_fixed_ips) > 1:
msg = (_('Port %s has multiple fixed IPv4 addresses. Must '
'provide a specific IPv4 address when assigning a '
'floating IP') % internal_port['id'])
raise n_exc.BadRequest(resource='floatingip', msg=msg)
internal_ip_address = ipv4_fixed_ips[0]['ip_address']
internal_subnet_id = ipv4_fixed_ips[0]['subnet_id']
return internal_port, internal_subnet_id, internal_ip_address
def _get_assoc_data(self, context, fip, floatingip_obj):
"""Determine/extract data associated with the internal port.
When a floating IP is associated with an internal port,
we need to extract/determine some data associated with the
internal port, including the internal_ip_address, and router_id.
The confirmation of the internal port whether owned by the tenant who
owns the floating IP will be confirmed by _get_router_for_floatingip.
"""
(internal_port, internal_subnet_id,
internal_ip_address) = self._internal_fip_assoc_data(
context, fip, floatingip_obj.project_id)
router_id = self._get_router_for_floatingip(
context, internal_port,
internal_subnet_id, floatingip_obj.floating_network_id)
return (fip['port_id'], internal_ip_address, router_id)
def _check_and_get_fip_assoc(self, context, fip, floatingip_obj):
port_id = internal_ip_address = router_id = None
if fip.get('fixed_ip_address') and not fip.get('port_id'):
msg = _("fixed_ip_address cannot be specified without a port_id")
raise n_exc.BadRequest(resource='floatingip', msg=msg)
if fip.get('port_id'):
port_id, internal_ip_address, router_id = self._get_assoc_data(
context,
fip,
floatingip_obj)
if port_id == floatingip_obj.fixed_port_id:
# Floating IP association is not changed.
return port_id, internal_ip_address, router_id
fip_exists = l3_obj.FloatingIP.objects_exist(
context,
fixed_port_id=fip['port_id'],
floating_network_id=floatingip_obj.floating_network_id,
fixed_ip_address=netaddr.IPAddress(internal_ip_address))
if fip_exists:
floating_ip_address = (str(floatingip_obj.floating_ip_address)
if floatingip_obj.floating_ip_address else None)
raise l3.FloatingIPPortAlreadyAssociated(
port_id=fip['port_id'],
fip_id=floatingip_obj.id,
floating_ip_address=floating_ip_address,
fixed_ip=internal_ip_address,
net_id=floatingip_obj.floating_network_id)
if fip and 'port_id' not in fip and floatingip_obj.fixed_port_id:
# NOTE(liuyulong): without the fix of bug #1610045 here could
# also let floating IP can be dissociated with an empty
# updating dict.
fip['port_id'] = floatingip_obj.fixed_port_id
port_id, internal_ip_address, router_id = self._get_assoc_data(
context, fip, floatingip_obj)
# After all upper conditions, if updating API dict is submitted with
# {'port_id': null}, then the floating IP cloud also be dissociated.
return port_id, internal_ip_address, router_id
def _update_fip_assoc(self, context, fip, floatingip_obj, external_port):
previous_router_id = floatingip_obj.router_id
port_id, internal_ip_address, router_id = (
self._check_and_get_fip_assoc(context, fip, floatingip_obj))
floatingip_obj.fixed_ip_address = (
netaddr.IPAddress(internal_ip_address)
if internal_ip_address else None)
floatingip_obj.fixed_port_id = port_id
floatingip_obj.router_id = router_id
floatingip_obj.last_known_router_id = previous_router_id
if 'description' in fip:
floatingip_obj.description = fip['description']
floating_ip_address = (str(floatingip_obj.floating_ip_address)
if floatingip_obj.floating_ip_address else None)
return {'fixed_ip_address': internal_ip_address,
'fixed_port_id': port_id,
'router_id': router_id,
'last_known_router_id': previous_router_id,
'floating_ip_address': floating_ip_address,
'floating_network_id': floatingip_obj.floating_network_id,
'floating_ip_id': floatingip_obj.id,
'context': context}
def _is_ipv4_network(self, context, net_id):
net = self._core_plugin._get_network(context, net_id)
return any(s.ip_version == 4 for s in net.subnets)
def _create_floatingip(self, context, floatingip,
initial_status=constants.FLOATINGIP_STATUS_ACTIVE):
fip = floatingip['floatingip']
fip_id = uuidutils.generate_uuid()
f_net_id = fip['floating_network_id']
if not self._core_plugin._network_is_external(context, f_net_id):
msg = _("Network %s is not a valid external network") % f_net_id
raise n_exc.BadRequest(resource='floatingip', msg=msg)
if not self._is_ipv4_network(context, f_net_id):
msg = _("Network %s does not contain any IPv4 subnet") % f_net_id
raise n_exc.BadRequest(resource='floatingip', msg=msg)
# This external port is never exposed to the tenant.
# it is used purely for internal system and admin use when
# managing floating IPs.
port = {'tenant_id': '', # tenant intentionally not set
'network_id': f_net_id,
'admin_state_up': True,
'device_id': 'PENDING',
'device_owner': DEVICE_OWNER_FLOATINGIP,
'status': constants.PORT_STATUS_NOTAPPLICABLE,
'name': ''}
if fip.get('floating_ip_address'):
port['fixed_ips'] = [
{'ip_address': fip['floating_ip_address']}]
if fip.get('subnet_id'):
port['fixed_ips'] = [
{'subnet_id': fip['subnet_id']}]
# 'status' in port dict could not be updated by default, use
# check_allow_post to stop the verification of system
external_port = p_utils.create_port(self._core_plugin,
context.elevated(),
{'port': port},
check_allow_post=False)
with p_utils.delete_port_on_error(self._core_plugin,
context.elevated(),
external_port['id']),\
context.session.begin(subtransactions=True):
# Ensure IPv4 addresses are allocated on external port
external_ipv4_ips = self._port_ipv4_fixed_ips(external_port)
if not external_ipv4_ips:
raise n_exc.ExternalIpAddressExhausted(net_id=f_net_id)
floating_fixed_ip = external_ipv4_ips[0]
floating_ip_address = floating_fixed_ip['ip_address']
floatingip_obj = l3_obj.FloatingIP(
context,
id=fip_id,
project_id=fip['tenant_id'],
status=initial_status,
floating_network_id=fip['floating_network_id'],
floating_ip_address=floating_ip_address,
floating_port_id=external_port['id'],
description=fip.get('description'))
# Update association with internal port
# and define external IP address
assoc_result = self._update_fip_assoc(
context, fip, floatingip_obj, external_port)
floatingip_obj.create()
floatingip_dict = self._make_floatingip_dict(
floatingip_obj, process_extensions=False)
if self._is_dns_integration_supported:
dns_data = self._process_dns_floatingip_create_precommit(
context, floatingip_dict, fip)
if self._is_fip_qos_supported:
self._process_extra_fip_qos_create(context, fip_id, fip)
floatingip_obj = l3_obj.FloatingIP.get_object(
context, id=floatingip_obj.id)
floatingip_db = floatingip_obj.db_obj
registry.notify(resources.FLOATING_IP, events.PRECOMMIT_CREATE,
self, context=context, floatingip=fip,
floatingip_id=fip_id,
floatingip_db=floatingip_db)
self._core_plugin.update_port(context.elevated(), external_port['id'],
{'port': {'device_id': fip_id}})
registry.notify(resources.FLOATING_IP,
events.AFTER_UPDATE,
self._update_fip_assoc,
**assoc_result)
if self._is_dns_integration_supported:
self._process_dns_floatingip_create_postcommit(context,
floatingip_dict,
dns_data)
# TODO(lujinluo): Change floatingip_db to floatingip_obj once all
# codes are migrated to use Floating IP OVO object.
resource_extend.apply_funcs(l3.FLOATINGIPS, floatingip_dict,
floatingip_db)
return floatingip_dict
@db_api.retry_if_session_inactive()
def create_floatingip(self, context, floatingip,
initial_status=constants.FLOATINGIP_STATUS_ACTIVE):
return self._create_floatingip(context, floatingip, initial_status)
def _update_floatingip(self, context, id, floatingip):
fip = floatingip['floatingip']
with context.session.begin(subtransactions=True):
floatingip_obj = self._get_floatingip(context, id)
old_floatingip = self._make_floatingip_dict(floatingip_obj)
fip_port_id = floatingip_obj.floating_port_id
assoc_result = self._update_fip_assoc(
context, fip, floatingip_obj,
self._core_plugin.get_port(context.elevated(), fip_port_id))
floatingip_obj.update()
floatingip_dict = self._make_floatingip_dict(floatingip_obj)
if self._is_dns_integration_supported:
dns_data = self._process_dns_floatingip_update_precommit(
context, floatingip_dict)
if self._is_fip_qos_supported:
self._process_extra_fip_qos_update(context,
floatingip_obj,
fip,
old_floatingip)
floatingip_obj = l3_obj.FloatingIP.get_object(
context, id=floatingip_obj.id)
floatingip_db = floatingip_obj.db_obj
registry.notify(resources.FLOATING_IP,
events.AFTER_UPDATE,
self._update_fip_assoc,
**assoc_result)
if self._is_dns_integration_supported:
self._process_dns_floatingip_update_postcommit(context,
floatingip_dict,
dns_data)
# TODO(lujinluo): Change floatingip_db to floatingip_obj once all
# codes are migrated to use Floating IP OVO object.
resource_extend.apply_funcs(l3.FLOATINGIPS, floatingip_dict,
floatingip_db)
return old_floatingip, floatingip_dict
def _floatingips_to_router_ids(self, floatingips):
return list(set([floatingip['router_id']
for floatingip in floatingips
if floatingip['router_id']]))
@db_api.retry_if_session_inactive()
def update_floatingip(self, context, id, floatingip):
_old_floatingip, floatingip = self._update_floatingip(
context, id, floatingip)
return floatingip
@db_api.retry_if_session_inactive()
def update_floatingip_status(self, context, floatingip_id, status):
"""Update operational status for floating IP in neutron DB."""
l3_obj.FloatingIP.update_objects(
context, {'status': status}, id=floatingip_id)
def _delete_floatingip(self, context, id):
floatingip = self._get_floatingip(context, id)
floatingip_dict = self._make_floatingip_dict(floatingip)
if self._is_dns_integration_supported:
self._process_dns_floatingip_delete(context, floatingip_dict)
# Foreign key cascade will take care of the removal of the
# floating IP record once the port is deleted. We can't start
# a transaction first to remove it ourselves because the delete_port
# method will yield in its post-commit activities.
self._core_plugin.delete_port(context.elevated(),
floatingip.floating_port_id,
l3_port_check=False)
return floatingip_dict
@db_api.retry_if_session_inactive()
def delete_floatingip(self, context, id):
self._delete_floatingip(context, id)
@db_api.retry_if_session_inactive()
def get_floatingip(self, context, id, fields=None):
floatingip = self._get_floatingip(context, id)
return self._make_floatingip_dict(floatingip, fields)
@db_api.retry_if_session_inactive()
def get_floatingips(self, context, filters=None, fields=None,
sorts=None, limit=None, marker=None,
page_reverse=False):
pager = base_obj.Pager(sorts, limit, page_reverse, marker)
filters = filters or {}
for key, val in API_TO_DB_COLUMN_MAP.items():
if key in filters:
filters[val] = filters.pop(key)
floatingip_objs = l3_obj.FloatingIP.get_objects(
context, _pager=pager, validate_filters=False, **filters)
floatingip_dicts = [
self._make_floatingip_dict(floatingip_obj, fields)
for floatingip_obj in floatingip_objs
]
return floatingip_dicts
@db_api.retry_if_session_inactive()
def delete_disassociated_floatingips(self, context, network_id):
fip_objs = l3_obj.FloatingIP.get_objects(
context,
floating_network_id=network_id, router_id=None, fixed_port_id=None)
for fip in fip_objs:
self.delete_floatingip(context, fip.id)
@db_api.retry_if_session_inactive()
def get_floatingips_count(self, context, filters=None):
filters = filters or {}
return l3_obj.FloatingIP.count(context, **filters)
def _router_exists(self, context, router_id):
try:
self.get_router(context.elevated(), router_id)
return True
except l3.RouterNotFound:
return False
def prevent_l3_port_deletion(self, context, port_id):
"""Checks to make sure a port is allowed to be deleted.
Raises an exception if this is not the case. This should be called by
any plugin when the API requests the deletion of a port, since some
ports for L3 are not intended to be deleted directly via a DELETE
to /ports, but rather via other API calls that perform the proper
deletion checks.
"""
try:
port = self._core_plugin.get_port(context, port_id)
except n_exc.PortNotFound:
# non-existent ports don't need to be protected from deletion
return
if port['device_owner'] not in self.router_device_owners:
return
# Raise port in use only if the port has IP addresses
# Otherwise it's a stale port that can be removed
fixed_ips = port['fixed_ips']
if not fixed_ips:
LOG.debug("Port %(port_id)s has owner %(port_owner)s, but "
"no IP address, so it can be deleted",
{'port_id': port['id'],
'port_owner': port['device_owner']})
return
# NOTE(kevinbenton): we also check to make sure that the
# router still exists. It's possible for HA router interfaces
# to remain after the router is deleted if they encounter an
# error during deletion.
# Elevated context in case router is owned by another tenant
if port['device_owner'] == DEVICE_OWNER_FLOATINGIP:
if not l3_obj.FloatingIP.objects_exist(
context, id=port['device_id']):
LOG.debug("Floating IP %(f_id)s corresponding to port "
"%(port_id)s no longer exists, allowing deletion.",
{'f_id': port['device_id'], 'port_id': port['id']})
return
elif not self._router_exists(context, port['device_id']):
LOG.debug("Router %(router_id)s corresponding to port "
"%(port_id)s no longer exists, allowing deletion.",
{'router_id': port['device_id'],
'port_id': port['id']})
return
reason = _('has device owner %s') % port['device_owner']
raise n_exc.ServicePortInUse(port_id=port['id'],
reason=reason)
@db_api.retry_if_session_inactive()
def disassociate_floatingips(self, context, port_id, do_notify=True):
"""Disassociate all floating IPs linked to specific port.
@param port_id: ID of the port to disassociate floating IPs.
@param do_notify: whether we should notify routers right away.
This parameter is ignored.
@return: set of router-ids that require notification updates
"""
with context.session.begin(subtransactions=True):
floating_ip_objs = l3_obj.FloatingIP.get_objects(
context, fixed_port_id=port_id)
router_ids = {fip.router_id for fip in floating_ip_objs}
values = {'fixed_port_id': None,
'fixed_ip_address': None,
'router_id': None}
l3_obj.FloatingIP.update_objects(
context, values, fixed_port_id=port_id)
return router_ids
def _get_floatingips_by_port_id(self, context, port_id):
"""Helper function to retrieve the fips associated with a port_id."""
return l3_obj.FloatingIP.get_objects(context, fixed_port_id=port_id)
def _build_routers_list(self, context, routers, gw_ports):
"""Subclasses can override this to add extra gateway info"""
return routers
def _make_router_dict_with_gw_port(self, router, fields):
result = self._make_router_dict(router, fields)
if router.get('gw_port'):
result['gw_port'] = self._core_plugin._make_port_dict(
router['gw_port'])
return result
def _get_sync_routers(self, context, router_ids=None, active=None):
"""Query routers and their gw ports for l3 agent.
Query routers with the router_ids. The gateway ports, if any,
will be queried too.
l3 agent has an option to deal with only one router id. In addition,
when we need to notify the agent the data about only one router
(when modification of router, its interfaces, gw_port and floatingips),
we will have router_ids.
@param router_ids: the list of router ids which we want to query.
if it is None, all of routers will be queried.
@return: a list of dicted routers with dicted gw_port populated if any
"""
filters = {'id': router_ids} if router_ids else {}
if active is not None:
filters['admin_state_up'] = [active]
router_dicts = model_query.get_collection(
context, l3_models.Router, self._make_router_dict_with_gw_port,
filters=filters)
if not router_dicts:
return []
gw_ports = dict((r['gw_port']['id'], r['gw_port'])
for r in router_dicts
if r.get('gw_port'))
return self._build_routers_list(context, router_dicts, gw_ports)
def _make_floatingip_dict_with_scope(self, floatingip_obj, scope_id):
d = self._make_floatingip_dict(floatingip_obj)
d['fixed_ip_address_scope'] = scope_id
return d
def _get_sync_floating_ips(self, context, router_ids):
"""Query floating_ips that relate to list of router_ids with scope.
This is different than the regular get_floatingips in that it finds the
address scope of the fixed IP. The router needs to know this to
distinguish it from other scopes.
There are a few redirections to go through to discover the address
scope from the floating ip.
"""
if not router_ids:
return []
return [
self._make_floatingip_dict_with_scope(*scoped_fip)
for scoped_fip in l3_obj.FloatingIP.get_scoped_floating_ips(
context, router_ids)
]
def _get_sync_interfaces(self, context, router_ids, device_owners=None):
"""Query router interfaces that relate to list of router_ids."""
device_owners = device_owners or [DEVICE_OWNER_ROUTER_INTF,
DEVICE_OWNER_HA_REPLICATED_INT]
if not router_ids:
return []
# TODO(lujinluo): Need Port as synthetic field
objs = l3_obj.RouterPort.get_objects(
context, router_id=router_ids, port_type=list(device_owners))
interfaces = [self._core_plugin._make_port_dict(rp.db_obj.port)
for rp in objs]
return interfaces
@staticmethod
def _each_port_having_fixed_ips(ports):
for port in ports or []:
fixed_ips = port.get('fixed_ips', [])
if not fixed_ips:
# Skip ports without IPs, which can occur if a subnet
# attached to a router is deleted
LOG.info("Skipping port %s as no IP is configure on "
"it",
port['id'])
continue
yield port
def _get_subnets_by_network_list(self, context, network_ids):
if not network_ids:
return {}
query = context.session.query(models_v2.Subnet,
models_v2.SubnetPool.address_scope_id)
query = query.outerjoin(
models_v2.SubnetPool,
models_v2.Subnet.subnetpool_id == models_v2.SubnetPool.id)
query = query.filter(models_v2.Subnet.network_id.in_(network_ids))
fields = ['id', 'cidr', 'gateway_ip', 'dns_nameservers',
'network_id', 'ipv6_ra_mode', 'subnetpool_id']
def make_subnet_dict_with_scope(row):
subnet_db, address_scope_id = row
subnet = self._core_plugin._make_subnet_dict(
subnet_db, fields, context=context)
subnet['address_scope_id'] = address_scope_id
return subnet
subnets_by_network = dict((id, []) for id in network_ids)
for subnet in (make_subnet_dict_with_scope(row) for row in query):
subnets_by_network[subnet['network_id']].append(subnet)
return subnets_by_network
def _get_mtus_by_network_list(self, context, network_ids):
if not network_ids:
return {}
filters = {'network_id': network_ids}
fields = ['id', 'mtu']
networks = self._core_plugin.get_networks(context, filters=filters,
fields=fields)
mtus_by_network = dict((network['id'], network.get('mtu', 0))
for network in networks)
return mtus_by_network
def _populate_mtu_and_subnets_for_ports(self, context, ports):
"""Populate ports with subnets.
These ports already have fixed_ips populated.
"""
network_ids = [p['network_id']
for p in self._each_port_having_fixed_ips(ports)]
mtus_by_network = self._get_mtus_by_network_list(context, network_ids)
subnets_by_network = self._get_subnets_by_network_list(
context, network_ids)
for port in self._each_port_having_fixed_ips(ports):
port['subnets'] = []
port['extra_subnets'] = []
port['address_scopes'] = {constants.IP_VERSION_4: None,
constants.IP_VERSION_6: None}
scopes = {}
for subnet in subnets_by_network[port['network_id']]:
scope = subnet['address_scope_id']
cidr = netaddr.IPNetwork(subnet['cidr'])
scopes[cidr.version] = scope
# If this subnet is used by the port (has a matching entry
# in the port's fixed_ips), then add this subnet to the
# port's subnets list, and populate the fixed_ips entry
# entry with the subnet's prefix length.
subnet_info = {'id': subnet['id'],
'cidr': subnet['cidr'],
'gateway_ip': subnet['gateway_ip'],
'dns_nameservers': subnet['dns_nameservers'],
'ipv6_ra_mode': subnet['ipv6_ra_mode'],
'subnetpool_id': subnet['subnetpool_id']}
for fixed_ip in port['fixed_ips']:
if fixed_ip['subnet_id'] == subnet['id']:
port['subnets'].append(subnet_info)
prefixlen = cidr.prefixlen
fixed_ip['prefixlen'] = prefixlen
break
else:
# This subnet is not used by the port.
port['extra_subnets'].append(subnet_info)
port['address_scopes'].update(scopes)
port['mtu'] = mtus_by_network.get(port['network_id'], 0)
def _process_floating_ips(self, context, routers_dict, floating_ips):
for floating_ip in floating_ips:
router = routers_dict.get(floating_ip['router_id'])
if router:
router_floatingips = router.get(constants.FLOATINGIP_KEY,
[])
router_floatingips.append(floating_ip)
router[constants.FLOATINGIP_KEY] = router_floatingips
def _process_interfaces(self, routers_dict, interfaces):
for interface in interfaces:
router = routers_dict.get(interface['device_id'])
if router:
router_interfaces = router.get(constants.INTERFACE_KEY, [])
router_interfaces.append(interface)
router[constants.INTERFACE_KEY] = router_interfaces
def _get_router_info_list(self, context, router_ids=None, active=None,
device_owners=None):
"""Query routers and their related floating_ips, interfaces."""
with context.session.begin(subtransactions=True):
routers = self._get_sync_routers(context,
router_ids=router_ids,
active=active)
router_ids = [router['id'] for router in routers]
interfaces = self._get_sync_interfaces(
context, router_ids, device_owners)
floating_ips = self._get_sync_floating_ips(context, router_ids)
return (routers, interfaces, floating_ips)
def get_sync_data(self, context, router_ids=None, active=None):
routers, interfaces, floating_ips = self._get_router_info_list(
context, router_ids=router_ids, active=active)
ports_to_populate = [router['gw_port'] for router in routers
if router.get('gw_port')] + interfaces
self._populate_mtu_and_subnets_for_ports(context, ports_to_populate)
routers_dict = dict((router['id'], router) for router in routers)
self._process_floating_ips(context, routers_dict, floating_ips)
self._process_interfaces(routers_dict, interfaces)
return list(routers_dict.values())
@registry.has_registry_receivers
class L3RpcNotifierMixin(object):
"""Mixin class to add rpc notifier attribute to db_base_plugin_v2."""
@staticmethod
@registry.receives(resources.PORT, [events.AFTER_DELETE])
def _notify_routers_callback(resource, event, trigger, **kwargs):
context = kwargs['context']
router_ids = kwargs['router_ids']
l3plugin = directory.get_plugin(plugin_constants.L3)
if l3plugin:
l3plugin.notify_routers_updated(context, router_ids)
else:
LOG.debug('%s not configured', plugin_constants.L3)
@staticmethod
@registry.receives(resources.SUBNET, [events.AFTER_UPDATE])
def _notify_subnet_gateway_ip_update(resource, event, trigger, **kwargs):
l3plugin = directory.get_plugin(plugin_constants.L3)
if not l3plugin:
return
context = kwargs['context']
orig = kwargs['original_subnet']
updated = kwargs['subnet']
if orig['gateway_ip'] == updated['gateway_ip']:
return
network_id = updated['network_id']
subnet_id = updated['id']
query = context.session.query(models_v2.Port).filter_by(
network_id=network_id,
device_owner=DEVICE_OWNER_ROUTER_GW)
query = query.join(models_v2.Port.fixed_ips).filter(
models_v2.IPAllocation.subnet_id == subnet_id)
router_ids = set(port['device_id'] for port in query)
for router_id in router_ids:
l3plugin.notify_router_updated(context, router_id)
@staticmethod
@registry.receives(resources.SUBNETPOOL_ADDRESS_SCOPE,
[events.AFTER_UPDATE])
def _notify_subnetpool_address_scope_update(resource, event,
trigger, **kwargs):
context = kwargs['context']
subnetpool_id = kwargs['subnetpool_id']
router_ids = l3_obj.RouterPort.get_router_ids_by_subnetpool(
context, subnetpool_id)
l3plugin = directory.get_plugin(plugin_constants.L3)
if l3plugin:
l3plugin.notify_routers_updated(context, router_ids)
else:
LOG.debug('%s not configured', plugin_constants.L3)
@property
def l3_rpc_notifier(self):
if not hasattr(self, '_l3_rpc_notifier'):
self._l3_rpc_notifier = l3_rpc_agent_api.L3AgentNotifyAPI()
return self._l3_rpc_notifier
@l3_rpc_notifier.setter
def l3_rpc_notifier(self, value):
self._l3_rpc_notifier = value
def notify_router_updated(self, context, router_id,
operation=None):
if router_id:
self.l3_rpc_notifier.routers_updated(
context, [router_id], operation)
def notify_routers_updated(self, context, router_ids,
operation=None, data=None):
if router_ids:
self.l3_rpc_notifier.routers_updated(
context, router_ids, operation, data)
def notify_router_deleted(self, context, router_id):
self.l3_rpc_notifier.router_deleted(context, router_id)
class L3_NAT_db_mixin(L3_NAT_dbonly_mixin, L3RpcNotifierMixin):
"""Mixin class to add rpc notifier methods to db_base_plugin_v2."""
def create_router(self, context, router):
router_dict = super(L3_NAT_db_mixin, self).create_router(context,
router)
if router_dict.get('external_gateway_info'):
self.notify_router_updated(context, router_dict['id'], None)
return router_dict
def update_router(self, context, id, router):
router_dict = super(L3_NAT_db_mixin, self).update_router(context,
id, router)
self.notify_router_updated(context, router_dict['id'], None)
return router_dict
def delete_router(self, context, id):
super(L3_NAT_db_mixin, self).delete_router(context, id)
self.notify_router_deleted(context, id)
def notify_router_interface_action(
self, context, router_interface_info, action):
l3_method = '%s_router_interface' % action
super(L3_NAT_db_mixin, self).notify_routers_updated(
context, [router_interface_info['id']], l3_method,
{'subnet_id': router_interface_info['subnet_id']})
mapping = {'add': 'create', 'remove': 'delete'}
notifier = n_rpc.get_notifier('network')
router_event = 'router.interface.%s' % mapping[action]
notifier.info(context, router_event,
{'router_interface': router_interface_info})
def add_router_interface(self, context, router_id, interface_info=None):
router_interface_info = super(
L3_NAT_db_mixin, self).add_router_interface(
context, router_id, interface_info)
self.notify_router_interface_action(
context, router_interface_info, 'add')
return router_interface_info
def remove_router_interface(self, context, router_id, interface_info):
router_interface_info = super(
L3_NAT_db_mixin, self).remove_router_interface(
context, router_id, interface_info)
self.notify_router_interface_action(
context, router_interface_info, 'remove')
return router_interface_info
def create_floatingip(self, context, floatingip,
initial_status=constants.FLOATINGIP_STATUS_ACTIVE):
floatingip_dict = super(L3_NAT_db_mixin, self).create_floatingip(
context, floatingip, initial_status)
router_id = floatingip_dict['router_id']
self.notify_router_updated(context, router_id, 'create_floatingip')
return floatingip_dict
def update_floatingip(self, context, id, floatingip):
old_floatingip, floatingip = self._update_floatingip(
context, id, floatingip)
router_ids = self._floatingips_to_router_ids(
[old_floatingip, floatingip])
super(L3_NAT_db_mixin, self).notify_routers_updated(
context, router_ids, 'update_floatingip', {})
return floatingip
def delete_floatingip(self, context, id):
floating_ip = self._delete_floatingip(context, id)
self.notify_router_updated(context, floating_ip['router_id'],
'delete_floatingip')
def disassociate_floatingips(self, context, port_id, do_notify=True):
"""Disassociate all floating IPs linked to specific port.
@param port_id: ID of the port to disassociate floating IPs.
@param do_notify: whether we should notify routers right away.
@return: set of router-ids that require notification updates
if do_notify is False, otherwise None.
"""
router_ids = super(L3_NAT_db_mixin, self).disassociate_floatingips(
context, port_id, do_notify)
if do_notify:
self.notify_routers_updated(context, router_ids)
# since caller assumes that we handled notifications on its
# behalf, return nothing
return
return router_ids
def notify_routers_updated(self, context, router_ids):
super(L3_NAT_db_mixin, self).notify_routers_updated(
context, list(router_ids), 'disassociate_floatingips', {})
def _migrate_router_ports(
self, context, router_db, old_owner, new_owner):
"""Update the model to support the dvr case of a router."""
for rp in router_db.attached_ports:
if rp.port_type == old_owner:
rp.port_type = new_owner
rp.port.device_owner = new_owner
| apache-2.0 |
hoehnp/navit_test | lib/python2.7/site-packages/pip/_vendor/requests/packages/chardet/mbcsgroupprober.py | 2769 | 1967 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Universal charset detector code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 2001
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
# Shy Shalom - original C code
# Proofpoint, Inc.
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
from .charsetgroupprober import CharSetGroupProber
from .utf8prober import UTF8Prober
from .sjisprober import SJISProber
from .eucjpprober import EUCJPProber
from .gb2312prober import GB2312Prober
from .euckrprober import EUCKRProber
from .cp949prober import CP949Prober
from .big5prober import Big5Prober
from .euctwprober import EUCTWProber
class MBCSGroupProber(CharSetGroupProber):
def __init__(self):
CharSetGroupProber.__init__(self)
self._mProbers = [
UTF8Prober(),
SJISProber(),
EUCJPProber(),
GB2312Prober(),
EUCKRProber(),
CP949Prober(),
Big5Prober(),
EUCTWProber()
]
self.reset()
| gpl-2.0 |
40223145c2g18/c2g18 | exts/wsgi/static/Brython2.1.0-20140419-113919/Lib/test/re_tests.py | 879 | 31796 | #!/usr/bin/env python3
# -*- mode: python -*-
# Re test suite and benchmark suite v1.5
# The 3 possible outcomes for each pattern
[SUCCEED, FAIL, SYNTAX_ERROR] = range(3)
# Benchmark suite (needs expansion)
#
# The benchmark suite does not test correctness, just speed. The
# first element of each tuple is the regex pattern; the second is a
# string to match it against. The benchmarking code will embed the
# second string inside several sizes of padding, to test how regex
# matching performs on large strings.
benchmarks = [
# test common prefix
('Python|Perl', 'Perl'), # Alternation
('(Python|Perl)', 'Perl'), # Grouped alternation
('Python|Perl|Tcl', 'Perl'), # Alternation
('(Python|Perl|Tcl)', 'Perl'), # Grouped alternation
('(Python)\\1', 'PythonPython'), # Backreference
('([0a-z][a-z0-9]*,)+', 'a5,b7,c9,'), # Disable the fastmap optimization
('([a-z][a-z0-9]*,)+', 'a5,b7,c9,'), # A few sets
('Python', 'Python'), # Simple text literal
('.*Python', 'Python'), # Bad text literal
('.*Python.*', 'Python'), # Worse text literal
('.*(Python)', 'Python'), # Bad text literal with grouping
]
# Test suite (for verifying correctness)
#
# The test suite is a list of 5- or 3-tuples. The 5 parts of a
# complete tuple are:
# element 0: a string containing the pattern
# 1: the string to match against the pattern
# 2: the expected result (SUCCEED, FAIL, SYNTAX_ERROR)
# 3: a string that will be eval()'ed to produce a test string.
# This is an arbitrary Python expression; the available
# variables are "found" (the whole match), and "g1", "g2", ...
# up to "g99" contain the contents of each group, or the
# string 'None' if the group wasn't given a value, or the
# string 'Error' if the group index was out of range;
# also "groups", the return value of m.group() (a tuple).
# 4: The expected result of evaluating the expression.
# If the two don't match, an error is reported.
#
# If the regex isn't expected to work, the latter two elements can be omitted.
tests = [
# Test ?P< and ?P= extensions
('(?P<foo_123', '', SYNTAX_ERROR), # Unterminated group identifier
('(?P<1>a)', '', SYNTAX_ERROR), # Begins with a digit
('(?P<!>a)', '', SYNTAX_ERROR), # Begins with an illegal char
('(?P<foo!>a)', '', SYNTAX_ERROR), # Begins with an illegal char
# Same tests, for the ?P= form
('(?P<foo_123>a)(?P=foo_123', 'aa', SYNTAX_ERROR),
('(?P<foo_123>a)(?P=1)', 'aa', SYNTAX_ERROR),
('(?P<foo_123>a)(?P=!)', 'aa', SYNTAX_ERROR),
('(?P<foo_123>a)(?P=foo_124', 'aa', SYNTAX_ERROR), # Backref to undefined group
('(?P<foo_123>a)', 'a', SUCCEED, 'g1', 'a'),
('(?P<foo_123>a)(?P=foo_123)', 'aa', SUCCEED, 'g1', 'a'),
# Test octal escapes
('\\1', 'a', SYNTAX_ERROR), # Backreference
('[\\1]', '\1', SUCCEED, 'found', '\1'), # Character
('\\09', chr(0) + '9', SUCCEED, 'found', chr(0) + '9'),
('\\141', 'a', SUCCEED, 'found', 'a'),
('(a)(b)(c)(d)(e)(f)(g)(h)(i)(j)(k)(l)\\119', 'abcdefghijklk9', SUCCEED, 'found+"-"+g11', 'abcdefghijklk9-k'),
# Test \0 is handled everywhere
(r'\0', '\0', SUCCEED, 'found', '\0'),
(r'[\0a]', '\0', SUCCEED, 'found', '\0'),
(r'[a\0]', '\0', SUCCEED, 'found', '\0'),
(r'[^a\0]', '\0', FAIL),
# Test various letter escapes
(r'\a[\b]\f\n\r\t\v', '\a\b\f\n\r\t\v', SUCCEED, 'found', '\a\b\f\n\r\t\v'),
(r'[\a][\b][\f][\n][\r][\t][\v]', '\a\b\f\n\r\t\v', SUCCEED, 'found', '\a\b\f\n\r\t\v'),
# NOTE: not an error under PCRE/PRE:
# (r'\u', '', SYNTAX_ERROR), # A Perl escape
(r'\c\e\g\h\i\j\k\m\o\p\q\y\z', 'ceghijkmopqyz', SUCCEED, 'found', 'ceghijkmopqyz'),
(r'\xff', '\377', SUCCEED, 'found', chr(255)),
# new \x semantics
(r'\x00ffffffffffffff', '\377', FAIL, 'found', chr(255)),
(r'\x00f', '\017', FAIL, 'found', chr(15)),
(r'\x00fe', '\376', FAIL, 'found', chr(254)),
# (r'\x00ffffffffffffff', '\377', SUCCEED, 'found', chr(255)),
# (r'\x00f', '\017', SUCCEED, 'found', chr(15)),
# (r'\x00fe', '\376', SUCCEED, 'found', chr(254)),
(r"^\w+=(\\[\000-\277]|[^\n\\])*", "SRC=eval.c g.c blah blah blah \\\\\n\tapes.c",
SUCCEED, 'found', "SRC=eval.c g.c blah blah blah \\\\"),
# Test that . only matches \n in DOTALL mode
('a.b', 'acb', SUCCEED, 'found', 'acb'),
('a.b', 'a\nb', FAIL),
('a.*b', 'acc\nccb', FAIL),
('a.{4,5}b', 'acc\nccb', FAIL),
('a.b', 'a\rb', SUCCEED, 'found', 'a\rb'),
('a.b(?s)', 'a\nb', SUCCEED, 'found', 'a\nb'),
('a.*(?s)b', 'acc\nccb', SUCCEED, 'found', 'acc\nccb'),
('(?s)a.{4,5}b', 'acc\nccb', SUCCEED, 'found', 'acc\nccb'),
('(?s)a.b', 'a\nb', SUCCEED, 'found', 'a\nb'),
(')', '', SYNTAX_ERROR), # Unmatched right bracket
('', '', SUCCEED, 'found', ''), # Empty pattern
('abc', 'abc', SUCCEED, 'found', 'abc'),
('abc', 'xbc', FAIL),
('abc', 'axc', FAIL),
('abc', 'abx', FAIL),
('abc', 'xabcy', SUCCEED, 'found', 'abc'),
('abc', 'ababc', SUCCEED, 'found', 'abc'),
('ab*c', 'abc', SUCCEED, 'found', 'abc'),
('ab*bc', 'abc', SUCCEED, 'found', 'abc'),
('ab*bc', 'abbc', SUCCEED, 'found', 'abbc'),
('ab*bc', 'abbbbc', SUCCEED, 'found', 'abbbbc'),
('ab+bc', 'abbc', SUCCEED, 'found', 'abbc'),
('ab+bc', 'abc', FAIL),
('ab+bc', 'abq', FAIL),
('ab+bc', 'abbbbc', SUCCEED, 'found', 'abbbbc'),
('ab?bc', 'abbc', SUCCEED, 'found', 'abbc'),
('ab?bc', 'abc', SUCCEED, 'found', 'abc'),
('ab?bc', 'abbbbc', FAIL),
('ab?c', 'abc', SUCCEED, 'found', 'abc'),
('^abc$', 'abc', SUCCEED, 'found', 'abc'),
('^abc$', 'abcc', FAIL),
('^abc', 'abcc', SUCCEED, 'found', 'abc'),
('^abc$', 'aabc', FAIL),
('abc$', 'aabc', SUCCEED, 'found', 'abc'),
('^', 'abc', SUCCEED, 'found+"-"', '-'),
('$', 'abc', SUCCEED, 'found+"-"', '-'),
('a.c', 'abc', SUCCEED, 'found', 'abc'),
('a.c', 'axc', SUCCEED, 'found', 'axc'),
('a.*c', 'axyzc', SUCCEED, 'found', 'axyzc'),
('a.*c', 'axyzd', FAIL),
('a[bc]d', 'abc', FAIL),
('a[bc]d', 'abd', SUCCEED, 'found', 'abd'),
('a[b-d]e', 'abd', FAIL),
('a[b-d]e', 'ace', SUCCEED, 'found', 'ace'),
('a[b-d]', 'aac', SUCCEED, 'found', 'ac'),
('a[-b]', 'a-', SUCCEED, 'found', 'a-'),
('a[\\-b]', 'a-', SUCCEED, 'found', 'a-'),
# NOTE: not an error under PCRE/PRE:
# ('a[b-]', 'a-', SYNTAX_ERROR),
('a[]b', '-', SYNTAX_ERROR),
('a[', '-', SYNTAX_ERROR),
('a\\', '-', SYNTAX_ERROR),
('abc)', '-', SYNTAX_ERROR),
('(abc', '-', SYNTAX_ERROR),
('a]', 'a]', SUCCEED, 'found', 'a]'),
('a[]]b', 'a]b', SUCCEED, 'found', 'a]b'),
('a[\]]b', 'a]b', SUCCEED, 'found', 'a]b'),
('a[^bc]d', 'aed', SUCCEED, 'found', 'aed'),
('a[^bc]d', 'abd', FAIL),
('a[^-b]c', 'adc', SUCCEED, 'found', 'adc'),
('a[^-b]c', 'a-c', FAIL),
('a[^]b]c', 'a]c', FAIL),
('a[^]b]c', 'adc', SUCCEED, 'found', 'adc'),
('\\ba\\b', 'a-', SUCCEED, '"-"', '-'),
('\\ba\\b', '-a', SUCCEED, '"-"', '-'),
('\\ba\\b', '-a-', SUCCEED, '"-"', '-'),
('\\by\\b', 'xy', FAIL),
('\\by\\b', 'yz', FAIL),
('\\by\\b', 'xyz', FAIL),
('x\\b', 'xyz', FAIL),
('x\\B', 'xyz', SUCCEED, '"-"', '-'),
('\\Bz', 'xyz', SUCCEED, '"-"', '-'),
('z\\B', 'xyz', FAIL),
('\\Bx', 'xyz', FAIL),
('\\Ba\\B', 'a-', FAIL, '"-"', '-'),
('\\Ba\\B', '-a', FAIL, '"-"', '-'),
('\\Ba\\B', '-a-', FAIL, '"-"', '-'),
('\\By\\B', 'xy', FAIL),
('\\By\\B', 'yz', FAIL),
('\\By\\b', 'xy', SUCCEED, '"-"', '-'),
('\\by\\B', 'yz', SUCCEED, '"-"', '-'),
('\\By\\B', 'xyz', SUCCEED, '"-"', '-'),
('ab|cd', 'abc', SUCCEED, 'found', 'ab'),
('ab|cd', 'abcd', SUCCEED, 'found', 'ab'),
('()ef', 'def', SUCCEED, 'found+"-"+g1', 'ef-'),
('$b', 'b', FAIL),
('a\\(b', 'a(b', SUCCEED, 'found+"-"+g1', 'a(b-Error'),
('a\\(*b', 'ab', SUCCEED, 'found', 'ab'),
('a\\(*b', 'a((b', SUCCEED, 'found', 'a((b'),
('a\\\\b', 'a\\b', SUCCEED, 'found', 'a\\b'),
('((a))', 'abc', SUCCEED, 'found+"-"+g1+"-"+g2', 'a-a-a'),
('(a)b(c)', 'abc', SUCCEED, 'found+"-"+g1+"-"+g2', 'abc-a-c'),
('a+b+c', 'aabbabc', SUCCEED, 'found', 'abc'),
('(a+|b)*', 'ab', SUCCEED, 'found+"-"+g1', 'ab-b'),
('(a+|b)+', 'ab', SUCCEED, 'found+"-"+g1', 'ab-b'),
('(a+|b)?', 'ab', SUCCEED, 'found+"-"+g1', 'a-a'),
(')(', '-', SYNTAX_ERROR),
('[^ab]*', 'cde', SUCCEED, 'found', 'cde'),
('abc', '', FAIL),
('a*', '', SUCCEED, 'found', ''),
('a|b|c|d|e', 'e', SUCCEED, 'found', 'e'),
('(a|b|c|d|e)f', 'ef', SUCCEED, 'found+"-"+g1', 'ef-e'),
('abcd*efg', 'abcdefg', SUCCEED, 'found', 'abcdefg'),
('ab*', 'xabyabbbz', SUCCEED, 'found', 'ab'),
('ab*', 'xayabbbz', SUCCEED, 'found', 'a'),
('(ab|cd)e', 'abcde', SUCCEED, 'found+"-"+g1', 'cde-cd'),
('[abhgefdc]ij', 'hij', SUCCEED, 'found', 'hij'),
('^(ab|cd)e', 'abcde', FAIL, 'xg1y', 'xy'),
('(abc|)ef', 'abcdef', SUCCEED, 'found+"-"+g1', 'ef-'),
('(a|b)c*d', 'abcd', SUCCEED, 'found+"-"+g1', 'bcd-b'),
('(ab|ab*)bc', 'abc', SUCCEED, 'found+"-"+g1', 'abc-a'),
('a([bc]*)c*', 'abc', SUCCEED, 'found+"-"+g1', 'abc-bc'),
('a([bc]*)(c*d)', 'abcd', SUCCEED, 'found+"-"+g1+"-"+g2', 'abcd-bc-d'),
('a([bc]+)(c*d)', 'abcd', SUCCEED, 'found+"-"+g1+"-"+g2', 'abcd-bc-d'),
('a([bc]*)(c+d)', 'abcd', SUCCEED, 'found+"-"+g1+"-"+g2', 'abcd-b-cd'),
('a[bcd]*dcdcde', 'adcdcde', SUCCEED, 'found', 'adcdcde'),
('a[bcd]+dcdcde', 'adcdcde', FAIL),
('(ab|a)b*c', 'abc', SUCCEED, 'found+"-"+g1', 'abc-ab'),
('((a)(b)c)(d)', 'abcd', SUCCEED, 'g1+"-"+g2+"-"+g3+"-"+g4', 'abc-a-b-d'),
('[a-zA-Z_][a-zA-Z0-9_]*', 'alpha', SUCCEED, 'found', 'alpha'),
('^a(bc+|b[eh])g|.h$', 'abh', SUCCEED, 'found+"-"+g1', 'bh-None'),
('(bc+d$|ef*g.|h?i(j|k))', 'effgz', SUCCEED, 'found+"-"+g1+"-"+g2', 'effgz-effgz-None'),
('(bc+d$|ef*g.|h?i(j|k))', 'ij', SUCCEED, 'found+"-"+g1+"-"+g2', 'ij-ij-j'),
('(bc+d$|ef*g.|h?i(j|k))', 'effg', FAIL),
('(bc+d$|ef*g.|h?i(j|k))', 'bcdd', FAIL),
('(bc+d$|ef*g.|h?i(j|k))', 'reffgz', SUCCEED, 'found+"-"+g1+"-"+g2', 'effgz-effgz-None'),
('(((((((((a)))))))))', 'a', SUCCEED, 'found', 'a'),
('multiple words of text', 'uh-uh', FAIL),
('multiple words', 'multiple words, yeah', SUCCEED, 'found', 'multiple words'),
('(.*)c(.*)', 'abcde', SUCCEED, 'found+"-"+g1+"-"+g2', 'abcde-ab-de'),
('\\((.*), (.*)\\)', '(a, b)', SUCCEED, 'g2+"-"+g1', 'b-a'),
('[k]', 'ab', FAIL),
('a[-]?c', 'ac', SUCCEED, 'found', 'ac'),
('(abc)\\1', 'abcabc', SUCCEED, 'g1', 'abc'),
('([a-c]*)\\1', 'abcabc', SUCCEED, 'g1', 'abc'),
('^(.+)?B', 'AB', SUCCEED, 'g1', 'A'),
('(a+).\\1$', 'aaaaa', SUCCEED, 'found+"-"+g1', 'aaaaa-aa'),
('^(a+).\\1$', 'aaaa', FAIL),
('(abc)\\1', 'abcabc', SUCCEED, 'found+"-"+g1', 'abcabc-abc'),
('([a-c]+)\\1', 'abcabc', SUCCEED, 'found+"-"+g1', 'abcabc-abc'),
('(a)\\1', 'aa', SUCCEED, 'found+"-"+g1', 'aa-a'),
('(a+)\\1', 'aa', SUCCEED, 'found+"-"+g1', 'aa-a'),
('(a+)+\\1', 'aa', SUCCEED, 'found+"-"+g1', 'aa-a'),
('(a).+\\1', 'aba', SUCCEED, 'found+"-"+g1', 'aba-a'),
('(a)ba*\\1', 'aba', SUCCEED, 'found+"-"+g1', 'aba-a'),
('(aa|a)a\\1$', 'aaa', SUCCEED, 'found+"-"+g1', 'aaa-a'),
('(a|aa)a\\1$', 'aaa', SUCCEED, 'found+"-"+g1', 'aaa-a'),
('(a+)a\\1$', 'aaa', SUCCEED, 'found+"-"+g1', 'aaa-a'),
('([abc]*)\\1', 'abcabc', SUCCEED, 'found+"-"+g1', 'abcabc-abc'),
('(a)(b)c|ab', 'ab', SUCCEED, 'found+"-"+g1+"-"+g2', 'ab-None-None'),
('(a)+x', 'aaax', SUCCEED, 'found+"-"+g1', 'aaax-a'),
('([ac])+x', 'aacx', SUCCEED, 'found+"-"+g1', 'aacx-c'),
('([^/]*/)*sub1/', 'd:msgs/tdir/sub1/trial/away.cpp', SUCCEED, 'found+"-"+g1', 'd:msgs/tdir/sub1/-tdir/'),
('([^.]*)\\.([^:]*):[T ]+(.*)', 'track1.title:TBlah blah blah', SUCCEED, 'found+"-"+g1+"-"+g2+"-"+g3', 'track1.title:TBlah blah blah-track1-title-Blah blah blah'),
('([^N]*N)+', 'abNNxyzN', SUCCEED, 'found+"-"+g1', 'abNNxyzN-xyzN'),
('([^N]*N)+', 'abNNxyz', SUCCEED, 'found+"-"+g1', 'abNN-N'),
('([abc]*)x', 'abcx', SUCCEED, 'found+"-"+g1', 'abcx-abc'),
('([abc]*)x', 'abc', FAIL),
('([xyz]*)x', 'abcx', SUCCEED, 'found+"-"+g1', 'x-'),
('(a)+b|aac', 'aac', SUCCEED, 'found+"-"+g1', 'aac-None'),
# Test symbolic groups
('(?P<i d>aaa)a', 'aaaa', SYNTAX_ERROR),
('(?P<id>aaa)a', 'aaaa', SUCCEED, 'found+"-"+id', 'aaaa-aaa'),
('(?P<id>aa)(?P=id)', 'aaaa', SUCCEED, 'found+"-"+id', 'aaaa-aa'),
('(?P<id>aa)(?P=xd)', 'aaaa', SYNTAX_ERROR),
# Test octal escapes/memory references
('\\1', 'a', SYNTAX_ERROR),
('\\09', chr(0) + '9', SUCCEED, 'found', chr(0) + '9'),
('\\141', 'a', SUCCEED, 'found', 'a'),
('(a)(b)(c)(d)(e)(f)(g)(h)(i)(j)(k)(l)\\119', 'abcdefghijklk9', SUCCEED, 'found+"-"+g11', 'abcdefghijklk9-k'),
# All tests from Perl
('abc', 'abc', SUCCEED, 'found', 'abc'),
('abc', 'xbc', FAIL),
('abc', 'axc', FAIL),
('abc', 'abx', FAIL),
('abc', 'xabcy', SUCCEED, 'found', 'abc'),
('abc', 'ababc', SUCCEED, 'found', 'abc'),
('ab*c', 'abc', SUCCEED, 'found', 'abc'),
('ab*bc', 'abc', SUCCEED, 'found', 'abc'),
('ab*bc', 'abbc', SUCCEED, 'found', 'abbc'),
('ab*bc', 'abbbbc', SUCCEED, 'found', 'abbbbc'),
('ab{0,}bc', 'abbbbc', SUCCEED, 'found', 'abbbbc'),
('ab+bc', 'abbc', SUCCEED, 'found', 'abbc'),
('ab+bc', 'abc', FAIL),
('ab+bc', 'abq', FAIL),
('ab{1,}bc', 'abq', FAIL),
('ab+bc', 'abbbbc', SUCCEED, 'found', 'abbbbc'),
('ab{1,}bc', 'abbbbc', SUCCEED, 'found', 'abbbbc'),
('ab{1,3}bc', 'abbbbc', SUCCEED, 'found', 'abbbbc'),
('ab{3,4}bc', 'abbbbc', SUCCEED, 'found', 'abbbbc'),
('ab{4,5}bc', 'abbbbc', FAIL),
('ab?bc', 'abbc', SUCCEED, 'found', 'abbc'),
('ab?bc', 'abc', SUCCEED, 'found', 'abc'),
('ab{0,1}bc', 'abc', SUCCEED, 'found', 'abc'),
('ab?bc', 'abbbbc', FAIL),
('ab?c', 'abc', SUCCEED, 'found', 'abc'),
('ab{0,1}c', 'abc', SUCCEED, 'found', 'abc'),
('^abc$', 'abc', SUCCEED, 'found', 'abc'),
('^abc$', 'abcc', FAIL),
('^abc', 'abcc', SUCCEED, 'found', 'abc'),
('^abc$', 'aabc', FAIL),
('abc$', 'aabc', SUCCEED, 'found', 'abc'),
('^', 'abc', SUCCEED, 'found', ''),
('$', 'abc', SUCCEED, 'found', ''),
('a.c', 'abc', SUCCEED, 'found', 'abc'),
('a.c', 'axc', SUCCEED, 'found', 'axc'),
('a.*c', 'axyzc', SUCCEED, 'found', 'axyzc'),
('a.*c', 'axyzd', FAIL),
('a[bc]d', 'abc', FAIL),
('a[bc]d', 'abd', SUCCEED, 'found', 'abd'),
('a[b-d]e', 'abd', FAIL),
('a[b-d]e', 'ace', SUCCEED, 'found', 'ace'),
('a[b-d]', 'aac', SUCCEED, 'found', 'ac'),
('a[-b]', 'a-', SUCCEED, 'found', 'a-'),
('a[b-]', 'a-', SUCCEED, 'found', 'a-'),
('a[b-a]', '-', SYNTAX_ERROR),
('a[]b', '-', SYNTAX_ERROR),
('a[', '-', SYNTAX_ERROR),
('a]', 'a]', SUCCEED, 'found', 'a]'),
('a[]]b', 'a]b', SUCCEED, 'found', 'a]b'),
('a[^bc]d', 'aed', SUCCEED, 'found', 'aed'),
('a[^bc]d', 'abd', FAIL),
('a[^-b]c', 'adc', SUCCEED, 'found', 'adc'),
('a[^-b]c', 'a-c', FAIL),
('a[^]b]c', 'a]c', FAIL),
('a[^]b]c', 'adc', SUCCEED, 'found', 'adc'),
('ab|cd', 'abc', SUCCEED, 'found', 'ab'),
('ab|cd', 'abcd', SUCCEED, 'found', 'ab'),
('()ef', 'def', SUCCEED, 'found+"-"+g1', 'ef-'),
('*a', '-', SYNTAX_ERROR),
('(*)b', '-', SYNTAX_ERROR),
('$b', 'b', FAIL),
('a\\', '-', SYNTAX_ERROR),
('a\\(b', 'a(b', SUCCEED, 'found+"-"+g1', 'a(b-Error'),
('a\\(*b', 'ab', SUCCEED, 'found', 'ab'),
('a\\(*b', 'a((b', SUCCEED, 'found', 'a((b'),
('a\\\\b', 'a\\b', SUCCEED, 'found', 'a\\b'),
('abc)', '-', SYNTAX_ERROR),
('(abc', '-', SYNTAX_ERROR),
('((a))', 'abc', SUCCEED, 'found+"-"+g1+"-"+g2', 'a-a-a'),
('(a)b(c)', 'abc', SUCCEED, 'found+"-"+g1+"-"+g2', 'abc-a-c'),
('a+b+c', 'aabbabc', SUCCEED, 'found', 'abc'),
('a{1,}b{1,}c', 'aabbabc', SUCCEED, 'found', 'abc'),
('a**', '-', SYNTAX_ERROR),
('a.+?c', 'abcabc', SUCCEED, 'found', 'abc'),
('(a+|b)*', 'ab', SUCCEED, 'found+"-"+g1', 'ab-b'),
('(a+|b){0,}', 'ab', SUCCEED, 'found+"-"+g1', 'ab-b'),
('(a+|b)+', 'ab', SUCCEED, 'found+"-"+g1', 'ab-b'),
('(a+|b){1,}', 'ab', SUCCEED, 'found+"-"+g1', 'ab-b'),
('(a+|b)?', 'ab', SUCCEED, 'found+"-"+g1', 'a-a'),
('(a+|b){0,1}', 'ab', SUCCEED, 'found+"-"+g1', 'a-a'),
(')(', '-', SYNTAX_ERROR),
('[^ab]*', 'cde', SUCCEED, 'found', 'cde'),
('abc', '', FAIL),
('a*', '', SUCCEED, 'found', ''),
('([abc])*d', 'abbbcd', SUCCEED, 'found+"-"+g1', 'abbbcd-c'),
('([abc])*bcd', 'abcd', SUCCEED, 'found+"-"+g1', 'abcd-a'),
('a|b|c|d|e', 'e', SUCCEED, 'found', 'e'),
('(a|b|c|d|e)f', 'ef', SUCCEED, 'found+"-"+g1', 'ef-e'),
('abcd*efg', 'abcdefg', SUCCEED, 'found', 'abcdefg'),
('ab*', 'xabyabbbz', SUCCEED, 'found', 'ab'),
('ab*', 'xayabbbz', SUCCEED, 'found', 'a'),
('(ab|cd)e', 'abcde', SUCCEED, 'found+"-"+g1', 'cde-cd'),
('[abhgefdc]ij', 'hij', SUCCEED, 'found', 'hij'),
('^(ab|cd)e', 'abcde', FAIL),
('(abc|)ef', 'abcdef', SUCCEED, 'found+"-"+g1', 'ef-'),
('(a|b)c*d', 'abcd', SUCCEED, 'found+"-"+g1', 'bcd-b'),
('(ab|ab*)bc', 'abc', SUCCEED, 'found+"-"+g1', 'abc-a'),
('a([bc]*)c*', 'abc', SUCCEED, 'found+"-"+g1', 'abc-bc'),
('a([bc]*)(c*d)', 'abcd', SUCCEED, 'found+"-"+g1+"-"+g2', 'abcd-bc-d'),
('a([bc]+)(c*d)', 'abcd', SUCCEED, 'found+"-"+g1+"-"+g2', 'abcd-bc-d'),
('a([bc]*)(c+d)', 'abcd', SUCCEED, 'found+"-"+g1+"-"+g2', 'abcd-b-cd'),
('a[bcd]*dcdcde', 'adcdcde', SUCCEED, 'found', 'adcdcde'),
('a[bcd]+dcdcde', 'adcdcde', FAIL),
('(ab|a)b*c', 'abc', SUCCEED, 'found+"-"+g1', 'abc-ab'),
('((a)(b)c)(d)', 'abcd', SUCCEED, 'g1+"-"+g2+"-"+g3+"-"+g4', 'abc-a-b-d'),
('[a-zA-Z_][a-zA-Z0-9_]*', 'alpha', SUCCEED, 'found', 'alpha'),
('^a(bc+|b[eh])g|.h$', 'abh', SUCCEED, 'found+"-"+g1', 'bh-None'),
('(bc+d$|ef*g.|h?i(j|k))', 'effgz', SUCCEED, 'found+"-"+g1+"-"+g2', 'effgz-effgz-None'),
('(bc+d$|ef*g.|h?i(j|k))', 'ij', SUCCEED, 'found+"-"+g1+"-"+g2', 'ij-ij-j'),
('(bc+d$|ef*g.|h?i(j|k))', 'effg', FAIL),
('(bc+d$|ef*g.|h?i(j|k))', 'bcdd', FAIL),
('(bc+d$|ef*g.|h?i(j|k))', 'reffgz', SUCCEED, 'found+"-"+g1+"-"+g2', 'effgz-effgz-None'),
('((((((((((a))))))))))', 'a', SUCCEED, 'g10', 'a'),
('((((((((((a))))))))))\\10', 'aa', SUCCEED, 'found', 'aa'),
# Python does not have the same rules for \\41 so this is a syntax error
# ('((((((((((a))))))))))\\41', 'aa', FAIL),
# ('((((((((((a))))))))))\\41', 'a!', SUCCEED, 'found', 'a!'),
('((((((((((a))))))))))\\41', '', SYNTAX_ERROR),
('(?i)((((((((((a))))))))))\\41', '', SYNTAX_ERROR),
('(((((((((a)))))))))', 'a', SUCCEED, 'found', 'a'),
('multiple words of text', 'uh-uh', FAIL),
('multiple words', 'multiple words, yeah', SUCCEED, 'found', 'multiple words'),
('(.*)c(.*)', 'abcde', SUCCEED, 'found+"-"+g1+"-"+g2', 'abcde-ab-de'),
('\\((.*), (.*)\\)', '(a, b)', SUCCEED, 'g2+"-"+g1', 'b-a'),
('[k]', 'ab', FAIL),
('a[-]?c', 'ac', SUCCEED, 'found', 'ac'),
('(abc)\\1', 'abcabc', SUCCEED, 'g1', 'abc'),
('([a-c]*)\\1', 'abcabc', SUCCEED, 'g1', 'abc'),
('(?i)abc', 'ABC', SUCCEED, 'found', 'ABC'),
('(?i)abc', 'XBC', FAIL),
('(?i)abc', 'AXC', FAIL),
('(?i)abc', 'ABX', FAIL),
('(?i)abc', 'XABCY', SUCCEED, 'found', 'ABC'),
('(?i)abc', 'ABABC', SUCCEED, 'found', 'ABC'),
('(?i)ab*c', 'ABC', SUCCEED, 'found', 'ABC'),
('(?i)ab*bc', 'ABC', SUCCEED, 'found', 'ABC'),
('(?i)ab*bc', 'ABBC', SUCCEED, 'found', 'ABBC'),
('(?i)ab*?bc', 'ABBBBC', SUCCEED, 'found', 'ABBBBC'),
('(?i)ab{0,}?bc', 'ABBBBC', SUCCEED, 'found', 'ABBBBC'),
('(?i)ab+?bc', 'ABBC', SUCCEED, 'found', 'ABBC'),
('(?i)ab+bc', 'ABC', FAIL),
('(?i)ab+bc', 'ABQ', FAIL),
('(?i)ab{1,}bc', 'ABQ', FAIL),
('(?i)ab+bc', 'ABBBBC', SUCCEED, 'found', 'ABBBBC'),
('(?i)ab{1,}?bc', 'ABBBBC', SUCCEED, 'found', 'ABBBBC'),
('(?i)ab{1,3}?bc', 'ABBBBC', SUCCEED, 'found', 'ABBBBC'),
('(?i)ab{3,4}?bc', 'ABBBBC', SUCCEED, 'found', 'ABBBBC'),
('(?i)ab{4,5}?bc', 'ABBBBC', FAIL),
('(?i)ab??bc', 'ABBC', SUCCEED, 'found', 'ABBC'),
('(?i)ab??bc', 'ABC', SUCCEED, 'found', 'ABC'),
('(?i)ab{0,1}?bc', 'ABC', SUCCEED, 'found', 'ABC'),
('(?i)ab??bc', 'ABBBBC', FAIL),
('(?i)ab??c', 'ABC', SUCCEED, 'found', 'ABC'),
('(?i)ab{0,1}?c', 'ABC', SUCCEED, 'found', 'ABC'),
('(?i)^abc$', 'ABC', SUCCEED, 'found', 'ABC'),
('(?i)^abc$', 'ABCC', FAIL),
('(?i)^abc', 'ABCC', SUCCEED, 'found', 'ABC'),
('(?i)^abc$', 'AABC', FAIL),
('(?i)abc$', 'AABC', SUCCEED, 'found', 'ABC'),
('(?i)^', 'ABC', SUCCEED, 'found', ''),
('(?i)$', 'ABC', SUCCEED, 'found', ''),
('(?i)a.c', 'ABC', SUCCEED, 'found', 'ABC'),
('(?i)a.c', 'AXC', SUCCEED, 'found', 'AXC'),
('(?i)a.*?c', 'AXYZC', SUCCEED, 'found', 'AXYZC'),
('(?i)a.*c', 'AXYZD', FAIL),
('(?i)a[bc]d', 'ABC', FAIL),
('(?i)a[bc]d', 'ABD', SUCCEED, 'found', 'ABD'),
('(?i)a[b-d]e', 'ABD', FAIL),
('(?i)a[b-d]e', 'ACE', SUCCEED, 'found', 'ACE'),
('(?i)a[b-d]', 'AAC', SUCCEED, 'found', 'AC'),
('(?i)a[-b]', 'A-', SUCCEED, 'found', 'A-'),
('(?i)a[b-]', 'A-', SUCCEED, 'found', 'A-'),
('(?i)a[b-a]', '-', SYNTAX_ERROR),
('(?i)a[]b', '-', SYNTAX_ERROR),
('(?i)a[', '-', SYNTAX_ERROR),
('(?i)a]', 'A]', SUCCEED, 'found', 'A]'),
('(?i)a[]]b', 'A]B', SUCCEED, 'found', 'A]B'),
('(?i)a[^bc]d', 'AED', SUCCEED, 'found', 'AED'),
('(?i)a[^bc]d', 'ABD', FAIL),
('(?i)a[^-b]c', 'ADC', SUCCEED, 'found', 'ADC'),
('(?i)a[^-b]c', 'A-C', FAIL),
('(?i)a[^]b]c', 'A]C', FAIL),
('(?i)a[^]b]c', 'ADC', SUCCEED, 'found', 'ADC'),
('(?i)ab|cd', 'ABC', SUCCEED, 'found', 'AB'),
('(?i)ab|cd', 'ABCD', SUCCEED, 'found', 'AB'),
('(?i)()ef', 'DEF', SUCCEED, 'found+"-"+g1', 'EF-'),
('(?i)*a', '-', SYNTAX_ERROR),
('(?i)(*)b', '-', SYNTAX_ERROR),
('(?i)$b', 'B', FAIL),
('(?i)a\\', '-', SYNTAX_ERROR),
('(?i)a\\(b', 'A(B', SUCCEED, 'found+"-"+g1', 'A(B-Error'),
('(?i)a\\(*b', 'AB', SUCCEED, 'found', 'AB'),
('(?i)a\\(*b', 'A((B', SUCCEED, 'found', 'A((B'),
('(?i)a\\\\b', 'A\\B', SUCCEED, 'found', 'A\\B'),
('(?i)abc)', '-', SYNTAX_ERROR),
('(?i)(abc', '-', SYNTAX_ERROR),
('(?i)((a))', 'ABC', SUCCEED, 'found+"-"+g1+"-"+g2', 'A-A-A'),
('(?i)(a)b(c)', 'ABC', SUCCEED, 'found+"-"+g1+"-"+g2', 'ABC-A-C'),
('(?i)a+b+c', 'AABBABC', SUCCEED, 'found', 'ABC'),
('(?i)a{1,}b{1,}c', 'AABBABC', SUCCEED, 'found', 'ABC'),
('(?i)a**', '-', SYNTAX_ERROR),
('(?i)a.+?c', 'ABCABC', SUCCEED, 'found', 'ABC'),
('(?i)a.*?c', 'ABCABC', SUCCEED, 'found', 'ABC'),
('(?i)a.{0,5}?c', 'ABCABC', SUCCEED, 'found', 'ABC'),
('(?i)(a+|b)*', 'AB', SUCCEED, 'found+"-"+g1', 'AB-B'),
('(?i)(a+|b){0,}', 'AB', SUCCEED, 'found+"-"+g1', 'AB-B'),
('(?i)(a+|b)+', 'AB', SUCCEED, 'found+"-"+g1', 'AB-B'),
('(?i)(a+|b){1,}', 'AB', SUCCEED, 'found+"-"+g1', 'AB-B'),
('(?i)(a+|b)?', 'AB', SUCCEED, 'found+"-"+g1', 'A-A'),
('(?i)(a+|b){0,1}', 'AB', SUCCEED, 'found+"-"+g1', 'A-A'),
('(?i)(a+|b){0,1}?', 'AB', SUCCEED, 'found+"-"+g1', '-None'),
('(?i))(', '-', SYNTAX_ERROR),
('(?i)[^ab]*', 'CDE', SUCCEED, 'found', 'CDE'),
('(?i)abc', '', FAIL),
('(?i)a*', '', SUCCEED, 'found', ''),
('(?i)([abc])*d', 'ABBBCD', SUCCEED, 'found+"-"+g1', 'ABBBCD-C'),
('(?i)([abc])*bcd', 'ABCD', SUCCEED, 'found+"-"+g1', 'ABCD-A'),
('(?i)a|b|c|d|e', 'E', SUCCEED, 'found', 'E'),
('(?i)(a|b|c|d|e)f', 'EF', SUCCEED, 'found+"-"+g1', 'EF-E'),
('(?i)abcd*efg', 'ABCDEFG', SUCCEED, 'found', 'ABCDEFG'),
('(?i)ab*', 'XABYABBBZ', SUCCEED, 'found', 'AB'),
('(?i)ab*', 'XAYABBBZ', SUCCEED, 'found', 'A'),
('(?i)(ab|cd)e', 'ABCDE', SUCCEED, 'found+"-"+g1', 'CDE-CD'),
('(?i)[abhgefdc]ij', 'HIJ', SUCCEED, 'found', 'HIJ'),
('(?i)^(ab|cd)e', 'ABCDE', FAIL),
('(?i)(abc|)ef', 'ABCDEF', SUCCEED, 'found+"-"+g1', 'EF-'),
('(?i)(a|b)c*d', 'ABCD', SUCCEED, 'found+"-"+g1', 'BCD-B'),
('(?i)(ab|ab*)bc', 'ABC', SUCCEED, 'found+"-"+g1', 'ABC-A'),
('(?i)a([bc]*)c*', 'ABC', SUCCEED, 'found+"-"+g1', 'ABC-BC'),
('(?i)a([bc]*)(c*d)', 'ABCD', SUCCEED, 'found+"-"+g1+"-"+g2', 'ABCD-BC-D'),
('(?i)a([bc]+)(c*d)', 'ABCD', SUCCEED, 'found+"-"+g1+"-"+g2', 'ABCD-BC-D'),
('(?i)a([bc]*)(c+d)', 'ABCD', SUCCEED, 'found+"-"+g1+"-"+g2', 'ABCD-B-CD'),
('(?i)a[bcd]*dcdcde', 'ADCDCDE', SUCCEED, 'found', 'ADCDCDE'),
('(?i)a[bcd]+dcdcde', 'ADCDCDE', FAIL),
('(?i)(ab|a)b*c', 'ABC', SUCCEED, 'found+"-"+g1', 'ABC-AB'),
('(?i)((a)(b)c)(d)', 'ABCD', SUCCEED, 'g1+"-"+g2+"-"+g3+"-"+g4', 'ABC-A-B-D'),
('(?i)[a-zA-Z_][a-zA-Z0-9_]*', 'ALPHA', SUCCEED, 'found', 'ALPHA'),
('(?i)^a(bc+|b[eh])g|.h$', 'ABH', SUCCEED, 'found+"-"+g1', 'BH-None'),
('(?i)(bc+d$|ef*g.|h?i(j|k))', 'EFFGZ', SUCCEED, 'found+"-"+g1+"-"+g2', 'EFFGZ-EFFGZ-None'),
('(?i)(bc+d$|ef*g.|h?i(j|k))', 'IJ', SUCCEED, 'found+"-"+g1+"-"+g2', 'IJ-IJ-J'),
('(?i)(bc+d$|ef*g.|h?i(j|k))', 'EFFG', FAIL),
('(?i)(bc+d$|ef*g.|h?i(j|k))', 'BCDD', FAIL),
('(?i)(bc+d$|ef*g.|h?i(j|k))', 'REFFGZ', SUCCEED, 'found+"-"+g1+"-"+g2', 'EFFGZ-EFFGZ-None'),
('(?i)((((((((((a))))))))))', 'A', SUCCEED, 'g10', 'A'),
('(?i)((((((((((a))))))))))\\10', 'AA', SUCCEED, 'found', 'AA'),
#('(?i)((((((((((a))))))))))\\41', 'AA', FAIL),
#('(?i)((((((((((a))))))))))\\41', 'A!', SUCCEED, 'found', 'A!'),
('(?i)(((((((((a)))))))))', 'A', SUCCEED, 'found', 'A'),
('(?i)(?:(?:(?:(?:(?:(?:(?:(?:(?:(a))))))))))', 'A', SUCCEED, 'g1', 'A'),
('(?i)(?:(?:(?:(?:(?:(?:(?:(?:(?:(a|b|c))))))))))', 'C', SUCCEED, 'g1', 'C'),
('(?i)multiple words of text', 'UH-UH', FAIL),
('(?i)multiple words', 'MULTIPLE WORDS, YEAH', SUCCEED, 'found', 'MULTIPLE WORDS'),
('(?i)(.*)c(.*)', 'ABCDE', SUCCEED, 'found+"-"+g1+"-"+g2', 'ABCDE-AB-DE'),
('(?i)\\((.*), (.*)\\)', '(A, B)', SUCCEED, 'g2+"-"+g1', 'B-A'),
('(?i)[k]', 'AB', FAIL),
# ('(?i)abcd', 'ABCD', SUCCEED, 'found+"-"+\\found+"-"+\\\\found', 'ABCD-$&-\\ABCD'),
# ('(?i)a(bc)d', 'ABCD', SUCCEED, 'g1+"-"+\\g1+"-"+\\\\g1', 'BC-$1-\\BC'),
('(?i)a[-]?c', 'AC', SUCCEED, 'found', 'AC'),
('(?i)(abc)\\1', 'ABCABC', SUCCEED, 'g1', 'ABC'),
('(?i)([a-c]*)\\1', 'ABCABC', SUCCEED, 'g1', 'ABC'),
('a(?!b).', 'abad', SUCCEED, 'found', 'ad'),
('a(?=d).', 'abad', SUCCEED, 'found', 'ad'),
('a(?=c|d).', 'abad', SUCCEED, 'found', 'ad'),
('a(?:b|c|d)(.)', 'ace', SUCCEED, 'g1', 'e'),
('a(?:b|c|d)*(.)', 'ace', SUCCEED, 'g1', 'e'),
('a(?:b|c|d)+?(.)', 'ace', SUCCEED, 'g1', 'e'),
('a(?:b|(c|e){1,2}?|d)+?(.)', 'ace', SUCCEED, 'g1 + g2', 'ce'),
('^(.+)?B', 'AB', SUCCEED, 'g1', 'A'),
# lookbehind: split by : but not if it is escaped by -.
('(?<!-):(.*?)(?<!-):', 'a:bc-:de:f', SUCCEED, 'g1', 'bc-:de' ),
# escaping with \ as we know it
('(?<!\\\):(.*?)(?<!\\\):', 'a:bc\\:de:f', SUCCEED, 'g1', 'bc\\:de' ),
# terminating with ' and escaping with ? as in edifact
("(?<!\\?)'(.*?)(?<!\\?)'", "a'bc?'de'f", SUCCEED, 'g1', "bc?'de" ),
# Comments using the (?#...) syntax
('w(?# comment', 'w', SYNTAX_ERROR),
('w(?# comment 1)xy(?# comment 2)z', 'wxyz', SUCCEED, 'found', 'wxyz'),
# Check odd placement of embedded pattern modifiers
# not an error under PCRE/PRE:
('w(?i)', 'W', SUCCEED, 'found', 'W'),
# ('w(?i)', 'W', SYNTAX_ERROR),
# Comments using the x embedded pattern modifier
("""(?x)w# comment 1
x y
# comment 2
z""", 'wxyz', SUCCEED, 'found', 'wxyz'),
# using the m embedded pattern modifier
('^abc', """jkl
abc
xyz""", FAIL),
('(?m)^abc', """jkl
abc
xyz""", SUCCEED, 'found', 'abc'),
('(?m)abc$', """jkl
xyzabc
123""", SUCCEED, 'found', 'abc'),
# using the s embedded pattern modifier
('a.b', 'a\nb', FAIL),
('(?s)a.b', 'a\nb', SUCCEED, 'found', 'a\nb'),
# test \w, etc. both inside and outside character classes
('\\w+', '--ab_cd0123--', SUCCEED, 'found', 'ab_cd0123'),
('[\\w]+', '--ab_cd0123--', SUCCEED, 'found', 'ab_cd0123'),
('\\D+', '1234abc5678', SUCCEED, 'found', 'abc'),
('[\\D]+', '1234abc5678', SUCCEED, 'found', 'abc'),
('[\\da-fA-F]+', '123abc', SUCCEED, 'found', '123abc'),
# not an error under PCRE/PRE:
# ('[\\d-x]', '-', SYNTAX_ERROR),
(r'([\s]*)([\S]*)([\s]*)', ' testing!1972', SUCCEED, 'g3+g2+g1', 'testing!1972 '),
(r'(\s*)(\S*)(\s*)', ' testing!1972', SUCCEED, 'g3+g2+g1', 'testing!1972 '),
(r'\xff', '\377', SUCCEED, 'found', chr(255)),
# new \x semantics
(r'\x00ff', '\377', FAIL),
# (r'\x00ff', '\377', SUCCEED, 'found', chr(255)),
(r'\t\n\v\r\f\a\g', '\t\n\v\r\f\ag', SUCCEED, 'found', '\t\n\v\r\f\ag'),
('\t\n\v\r\f\a\g', '\t\n\v\r\f\ag', SUCCEED, 'found', '\t\n\v\r\f\ag'),
(r'\t\n\v\r\f\a', '\t\n\v\r\f\a', SUCCEED, 'found', chr(9)+chr(10)+chr(11)+chr(13)+chr(12)+chr(7)),
(r'[\t][\n][\v][\r][\f][\b]', '\t\n\v\r\f\b', SUCCEED, 'found', '\t\n\v\r\f\b'),
#
# post-1.5.2 additions
# xmllib problem
(r'(([a-z]+):)?([a-z]+)$', 'smil', SUCCEED, 'g1+"-"+g2+"-"+g3', 'None-None-smil'),
# bug 110866: reference to undefined group
(r'((.)\1+)', '', SYNTAX_ERROR),
# bug 111869: search (PRE/PCRE fails on this one, SRE doesn't)
(r'.*d', 'abc\nabd', SUCCEED, 'found', 'abd'),
# bug 112468: various expected syntax errors
(r'(', '', SYNTAX_ERROR),
(r'[\41]', '!', SUCCEED, 'found', '!'),
# bug 114033: nothing to repeat
(r'(x?)?', 'x', SUCCEED, 'found', 'x'),
# bug 115040: rescan if flags are modified inside pattern
(r' (?x)foo ', 'foo', SUCCEED, 'found', 'foo'),
# bug 115618: negative lookahead
(r'(?<!abc)(d.f)', 'abcdefdof', SUCCEED, 'found', 'dof'),
# bug 116251: character class bug
(r'[\w-]+', 'laser_beam', SUCCEED, 'found', 'laser_beam'),
# bug 123769+127259: non-greedy backtracking bug
(r'.*?\S *:', 'xx:', SUCCEED, 'found', 'xx:'),
(r'a[ ]*?\ (\d+).*', 'a 10', SUCCEED, 'found', 'a 10'),
(r'a[ ]*?\ (\d+).*', 'a 10', SUCCEED, 'found', 'a 10'),
# bug 127259: \Z shouldn't depend on multiline mode
(r'(?ms).*?x\s*\Z(.*)','xx\nx\n', SUCCEED, 'g1', ''),
# bug 128899: uppercase literals under the ignorecase flag
(r'(?i)M+', 'MMM', SUCCEED, 'found', 'MMM'),
(r'(?i)m+', 'MMM', SUCCEED, 'found', 'MMM'),
(r'(?i)[M]+', 'MMM', SUCCEED, 'found', 'MMM'),
(r'(?i)[m]+', 'MMM', SUCCEED, 'found', 'MMM'),
# bug 130748: ^* should be an error (nothing to repeat)
(r'^*', '', SYNTAX_ERROR),
# bug 133283: minimizing repeat problem
(r'"(?:\\"|[^"])*?"', r'"\""', SUCCEED, 'found', r'"\""'),
# bug 477728: minimizing repeat problem
(r'^.*?$', 'one\ntwo\nthree\n', FAIL),
# bug 483789: minimizing repeat problem
(r'a[^>]*?b', 'a>b', FAIL),
# bug 490573: minimizing repeat problem
(r'^a*?$', 'foo', FAIL),
# bug 470582: nested groups problem
(r'^((a)c)?(ab)$', 'ab', SUCCEED, 'g1+"-"+g2+"-"+g3', 'None-None-ab'),
# another minimizing repeat problem (capturing groups in assertions)
('^([ab]*?)(?=(b)?)c', 'abc', SUCCEED, 'g1+"-"+g2', 'ab-None'),
('^([ab]*?)(?!(b))c', 'abc', SUCCEED, 'g1+"-"+g2', 'ab-None'),
('^([ab]*?)(?<!(a))c', 'abc', SUCCEED, 'g1+"-"+g2', 'ab-None'),
]
u = '\N{LATIN CAPITAL LETTER A WITH DIAERESIS}'
tests.extend([
# bug 410271: \b broken under locales
(r'\b.\b', 'a', SUCCEED, 'found', 'a'),
(r'(?u)\b.\b', u, SUCCEED, 'found', u),
(r'(?u)\w', u, SUCCEED, 'found', u),
])
| gpl-2.0 |
fkfk/sc02b_kernel | Documentation/networking/cxacru-cf.py | 14668 | 1626 | #!/usr/bin/env python
# Copyright 2009 Simon Arlott
#
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the Free
# Software Foundation; either version 2 of the License, or (at your option)
# any later version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
# more details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 59
# Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#
# Usage: cxacru-cf.py < cxacru-cf.bin
# Output: values string suitable for the sysfs adsl_config attribute
#
# Warning: cxacru-cf.bin with MD5 hash cdbac2689969d5ed5d4850f117702110
# contains mis-aligned values which will stop the modem from being able
# to make a connection. If the first and last two bytes are removed then
# the values become valid, but the modulation will be forced to ANSI
# T1.413 only which may not be appropriate.
#
# The original binary format is a packed list of le32 values.
import sys
import struct
i = 0
while True:
buf = sys.stdin.read(4)
if len(buf) == 0:
break
elif len(buf) != 4:
sys.stdout.write("\n")
sys.stderr.write("Error: read {0} not 4 bytes\n".format(len(buf)))
sys.exit(1)
if i > 0:
sys.stdout.write(" ")
sys.stdout.write("{0:x}={1}".format(i, struct.unpack("<I", buf)[0]))
i += 1
sys.stdout.write("\n")
| gpl-2.0 |
ychen820/microblog | y/google-cloud-sdk/.install/.backup/platform/gsutil/third_party/boto/tests/unit/cloudsearch2/test_exceptions.py | 4 | 1399 | import mock
from boto.compat import json
from tests.unit import unittest
from .test_search import HOSTNAME, CloudSearchSearchBaseTest
from boto.cloudsearch2.search import SearchConnection, SearchServiceException
def fake_loads_value_error(content, *args, **kwargs):
"""Callable to generate a fake ValueError"""
raise ValueError("HAHAHA! Totally not simplejson & you gave me bad JSON.")
def fake_loads_json_error(content, *args, **kwargs):
"""Callable to generate a fake JSONDecodeError"""
raise json.JSONDecodeError('Using simplejson & you gave me bad JSON.',
'', 0)
class CloudSearchJSONExceptionTest(CloudSearchSearchBaseTest):
response = '{}'
def test_no_simplejson_value_error(self):
with mock.patch.object(json, 'loads', fake_loads_value_error):
search = SearchConnection(endpoint=HOSTNAME)
with self.assertRaisesRegexp(SearchServiceException, 'non-json'):
search.search(q='test')
@unittest.skipUnless(hasattr(json, 'JSONDecodeError'),
'requires simplejson')
def test_simplejson_jsondecodeerror(self):
with mock.patch.object(json, 'loads', fake_loads_json_error):
search = SearchConnection(endpoint=HOSTNAME)
with self.assertRaisesRegexp(SearchServiceException, 'non-json'):
search.search(q='test')
| bsd-3-clause |
dkubiak789/odoo | addons/resource/faces/__init__.py | 448 | 1325 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from pcalendar import Calendar, WorkingDate, StartDate, EndDate, Minutes
from task import Project, BalancedProject, AdjustedProject, Task, \
STRICT, SLOPPY, SMART, Multi, YearlyMax, WeeklyMax, MonthlyMax, \
DailyMax, VariableLoad
from resource import Resource
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
ustramooner/CouchPotato | library/hachoir_core/field/generic_field_set.py | 12 | 19175 | from hachoir_core.field import (MissingField, BasicFieldSet, Field, ParserError,
createRawField, createNullField, createPaddingField, FakeArray)
from hachoir_core.dict import Dict, UniqKeyError
from hachoir_core.error import HACHOIR_ERRORS
from hachoir_core.tools import lowerBound
import hachoir_core.config as config
class GenericFieldSet(BasicFieldSet):
"""
Ordered list of fields. Use operator [] to access fields using their
name (field names are unique in a field set, but not in the whole
document).
Class attributes:
- endian: Bytes order (L{BIG_ENDIAN} or L{LITTLE_ENDIAN}). Optional if the
field set has a parent ;
- static_size: (optional) Size of FieldSet in bits. This attribute should
be used in parser of constant size.
Instance attributes/methods:
- _fields: Ordered dictionnary of all fields, may be incomplete
because feeded when a field is requested ;
- stream: Input stream used to feed fields' value
- root: The root of all field sets ;
- __len__(): Number of fields, may need to create field set ;
- __getitem__(): Get an field by it's name or it's path.
And attributes inherited from Field class:
- parent: Parent field (may be None if it's the root) ;
- name: Field name (unique in parent field set) ;
- value: The field set ;
- address: Field address (in bits) relative to parent ;
- description: A string describing the content (can be None) ;
- size: Size of field set in bits, may need to create field set.
Event handling:
- "connectEvent": Connect an handler to an event ;
- "raiseEvent": Raise an event.
To implement a new field set, you need to:
- create a class which inherite from FieldSet ;
- write createFields() method using lines like:
yield Class(self, "name", ...) ;
- and maybe set endian and static_size class attributes.
"""
_current_size = 0
def __init__(self, parent, name, stream, description=None, size=None):
"""
Constructor
@param parent: Parent field set, None for root parser
@param name: Name of the field, have to be unique in parent. If it ends
with "[]", end will be replaced with "[new_id]" (eg. "raw[]"
becomes "raw[0]", next will be "raw[1]", and then "raw[2]", etc.)
@type name: str
@param stream: Input stream from which data are read
@type stream: L{InputStream}
@param description: Optional string description
@type description: str|None
@param size: Size in bits. If it's None, size will be computed. You
can also set size with class attribute static_size
"""
BasicFieldSet.__init__(self, parent, name, stream, description, size)
self._fields = Dict()
self._field_generator = self.createFields()
self._array_cache = {}
self.__is_feeding = False
def array(self, key):
try:
return self._array_cache[key]
except KeyError:
array = FakeArray(self, key)
self._array_cache[key] = array
return self._array_cache[key]
def reset(self):
"""
Reset a field set:
* clear fields ;
* restart field generator ;
* set current size to zero ;
* clear field array count.
But keep: name, value, description and size.
"""
BasicFieldSet.reset(self)
self._fields = Dict()
self._field_generator = self.createFields()
self._current_size = 0
self._array_cache = {}
def __str__(self):
return '<%s path=%s, current_size=%s, current length=%s>' % \
(self.__class__.__name__, self.path, self._current_size, len(self._fields))
def __len__(self):
"""
Returns number of fields, may need to create all fields
if it's not done yet.
"""
if self._field_generator is not None:
self._feedAll()
return len(self._fields)
def _getCurrentLength(self):
return len(self._fields)
current_length = property(_getCurrentLength)
def _getSize(self):
if self._size is None:
self._feedAll()
return self._size
size = property(_getSize, doc="Size in bits, may create all fields to get size")
def _getCurrentSize(self):
assert not(self.done)
return self._current_size
current_size = property(_getCurrentSize)
eof = property(lambda self: self._checkSize(self._current_size + 1, True) < 0)
def _checkSize(self, size, strict):
field = self
while field._size is None:
if not field._parent:
assert self.stream.size is None
if not strict:
return None
if self.stream.sizeGe(size):
return 0
break
size += field._address
field = field._parent
return field._size - size
autofix = property(lambda self: self.root.autofix)
def _addField(self, field):
"""
Add a field to the field set:
* add it into _fields
* update _current_size
May raise a StopIteration() on error
"""
if not issubclass(field.__class__, Field):
raise ParserError("Field type (%s) is not a subclass of 'Field'!"
% field.__class__.__name__)
assert isinstance(field._name, str)
if field._name.endswith("[]"):
self.setUniqueFieldName(field)
if config.debug:
self.info("[+] DBG: _addField(%s)" % field.name)
# required for the msoffice parser
if field._address != self._current_size:
self.warning("Fix address of %s to %s (was %s)" %
(field.path, self._current_size, field._address))
field._address = self._current_size
ask_stop = False
# Compute field size and check that there is enough place for it
self.__is_feeding = True
try:
field_size = field.size
except HACHOIR_ERRORS, err:
if field.is_field_set and field.current_length and field.eof:
self.warning("Error when getting size of '%s': %s" % (field.name, err))
field._stopFeeding()
ask_stop = True
else:
self.warning("Error when getting size of '%s': delete it" % field.name)
self.__is_feeding = False
raise
self.__is_feeding = False
# No more place?
dsize = self._checkSize(field._address + field.size, False)
if (dsize is not None and dsize < 0) or (field.is_field_set and field.size <= 0):
if self.autofix and self._current_size:
self._fixFieldSize(field, field.size + dsize)
else:
raise ParserError("Field %s is too large!" % field.path)
self._current_size += field.size
try:
self._fields.append(field._name, field)
except UniqKeyError, err:
self.warning("Duplicate field name " + unicode(err))
field._name += "[]"
self.setUniqueFieldName(field)
self._fields.append(field._name, field)
if ask_stop:
raise StopIteration()
def _fixFieldSize(self, field, new_size):
if new_size > 0:
if field.is_field_set and 0 < field.size:
field._truncate(new_size)
return
# Don't add the field <=> delete item
if self._size is None:
self._size = self._current_size + new_size
self.warning("[Autofix] Delete '%s' (too large)" % field.path)
raise StopIteration()
def _getField(self, name, const):
field = Field._getField(self, name, const)
if field is None:
if name in self._fields:
field = self._fields[name]
elif self._field_generator is not None and not const:
field = self._feedUntil(name)
return field
def getField(self, key, const=True):
if isinstance(key, (int, long)):
if key < 0:
raise KeyError("Key must be positive!")
if not const:
self.readFirstFields(key+1)
if len(self._fields.values) <= key:
raise MissingField(self, key)
return self._fields.values[key]
return Field.getField(self, key, const)
def _truncate(self, size):
assert size > 0
if size < self._current_size:
self._size = size
while True:
field = self._fields.values[-1]
if field._address < size:
break
del self._fields[-1]
self._current_size = field._address
size -= field._address
if size < field._size:
if field.is_field_set:
field._truncate(size)
else:
del self._fields[-1]
field = createRawField(self, size, "raw[]")
self._fields.append(field._name, field)
self._current_size = self._size
else:
assert size < self._size or self._size is None
self._size = size
if self._size == self._current_size:
self._field_generator = None
def _deleteField(self, index):
field = self._fields.values[index]
size = field.size
self._current_size -= size
del self._fields[index]
return field
def _fixLastField(self):
"""
Try to fix last field when we know current field set size.
Returns new added field if any, or None.
"""
assert self._size is not None
# Stop parser
message = ["stop parser"]
self._field_generator = None
# If last field is too big, delete it
while self._size < self._current_size:
field = self._deleteField(len(self._fields)-1)
message.append("delete field %s" % field.path)
assert self._current_size <= self._size
# If field size current is smaller: add a raw field
size = self._size - self._current_size
if size:
field = createRawField(self, size, "raw[]")
message.append("add padding")
self._current_size += field.size
self._fields.append(field._name, field)
else:
field = None
message = ", ".join(message)
self.warning("[Autofix] Fix parser error: " + message)
assert self._current_size == self._size
return field
def _stopFeeding(self):
new_field = None
if self._size is None:
if self._parent:
self._size = self._current_size
elif self._size != self._current_size:
if self.autofix:
new_field = self._fixLastField()
else:
raise ParserError("Invalid parser \"%s\" size!" % self.path)
self._field_generator = None
return new_field
def _fixFeedError(self, exception):
"""
Try to fix a feeding error. Returns False if error can't be fixed,
otherwise returns new field if any, or None.
"""
if self._size is None or not self.autofix:
return False
self.warning(unicode(exception))
return self._fixLastField()
def _feedUntil(self, field_name):
"""
Return the field if it was found, None else
"""
if self.__is_feeding \
or (self._field_generator and self._field_generator.gi_running):
self.warning("Unable to get %s (and generator is already running)"
% field_name)
return None
try:
while True:
field = self._field_generator.next()
self._addField(field)
if field.name == field_name:
return field
except HACHOIR_ERRORS, err:
if self._fixFeedError(err) is False:
raise
except StopIteration:
self._stopFeeding()
return None
def readMoreFields(self, number):
"""
Read more number fields, or do nothing if parsing is done.
Returns number of new added fields.
"""
if self._field_generator is None:
return 0
oldlen = len(self._fields)
try:
for index in xrange(number):
self._addField( self._field_generator.next() )
except HACHOIR_ERRORS, err:
if self._fixFeedError(err) is False:
raise
except StopIteration:
self._stopFeeding()
return len(self._fields) - oldlen
def _feedAll(self):
if self._field_generator is None:
return
try:
while True:
field = self._field_generator.next()
self._addField(field)
except HACHOIR_ERRORS, err:
if self._fixFeedError(err) is False:
raise
except StopIteration:
self._stopFeeding()
def __iter__(self):
"""
Create a generator to iterate on each field, may create new
fields when needed
"""
try:
done = 0
while True:
if done == len(self._fields):
if self._field_generator is None:
break
self._addField( self._field_generator.next() )
for field in self._fields.values[done:]:
yield field
done += 1
except HACHOIR_ERRORS, err:
field = self._fixFeedError(err)
if isinstance(field, Field):
yield field
elif hasattr(field, '__iter__'):
for f in field:
yield f
elif field is False:
raise
except StopIteration:
field = self._stopFeeding()
if isinstance(field, Field):
yield field
elif hasattr(field, '__iter__'):
for f in field:
yield f
def _isDone(self):
return (self._field_generator is None)
done = property(_isDone, doc="Boolean to know if parsing is done or not")
#
# FieldSet_SeekUtility
#
def seekBit(self, address, name="padding[]",
description=None, relative=True, null=False):
"""
Create a field to seek to specified address,
or None if it's not needed.
May raise an (ParserError) exception if address is invalid.
"""
if relative:
nbits = address - self._current_size
else:
nbits = address - (self.absolute_address + self._current_size)
if nbits < 0:
raise ParserError("Seek error, unable to go back!")
if 0 < nbits:
if null:
return createNullField(self, nbits, name, description)
else:
return createPaddingField(self, nbits, name, description)
else:
return None
def seekByte(self, address, name="padding[]", description=None, relative=True, null=False):
"""
Same as seekBit(), but with address in byte.
"""
return self.seekBit(address * 8, name, description, relative, null=null)
#
# RandomAccessFieldSet
#
def replaceField(self, name, new_fields):
# TODO: Check in self and not self.field
# Problem is that "generator is already executing"
if name not in self._fields:
raise ParserError("Unable to replace %s: field doesn't exist!" % name)
assert 1 <= len(new_fields)
old_field = self[name]
total_size = sum( (field.size for field in new_fields) )
if old_field.size != total_size:
raise ParserError("Unable to replace %s: "
"new field(s) hasn't same size (%u bits instead of %u bits)!"
% (name, total_size, old_field.size))
field = new_fields[0]
if field._name.endswith("[]"):
self.setUniqueFieldName(field)
field._address = old_field.address
if field.name != name and field.name in self._fields:
raise ParserError(
"Unable to replace %s: name \"%s\" is already used!"
% (name, field.name))
self._fields.replace(name, field.name, field)
self.raiseEvent("field-replaced", old_field, field)
if 1 < len(new_fields):
index = self._fields.index(new_fields[0].name)+1
address = field.address + field.size
for field in new_fields[1:]:
if field._name.endswith("[]"):
self.setUniqueFieldName(field)
field._address = address
if field.name in self._fields:
raise ParserError(
"Unable to replace %s: name \"%s\" is already used!"
% (name, field.name))
self._fields.insert(index, field.name, field)
self.raiseEvent("field-inserted", index, field)
index += 1
address += field.size
def getFieldByAddress(self, address, feed=True):
"""
Only search in existing fields
"""
if feed and self._field_generator is not None:
self._feedAll()
if address < self._current_size:
i = lowerBound(self._fields.values, lambda x: x.address + x.size <= address)
if i is not None:
return self._fields.values[i]
return None
def writeFieldsIn(self, old_field, address, new_fields):
"""
Can only write in existing fields (address < self._current_size)
"""
# Check size
total_size = sum( field.size for field in new_fields )
if old_field.size < total_size:
raise ParserError( \
"Unable to write fields at address %s " \
"(too big)!" % (address))
# Need padding before?
replace = []
size = address - old_field.address
assert 0 <= size
if 0 < size:
padding = createPaddingField(self, size)
padding._address = old_field.address
replace.append(padding)
# Set fields address
for field in new_fields:
field._address = address
address += field.size
replace.append(field)
# Need padding after?
size = (old_field.address + old_field.size) - address
assert 0 <= size
if 0 < size:
padding = createPaddingField(self, size)
padding._address = address
replace.append(padding)
self.replaceField(old_field.name, replace)
def nextFieldAddress(self):
return self._current_size
def getFieldIndex(self, field):
return self._fields.index(field._name)
| gpl-3.0 |
witgo/spark | python/pyspark/testing/utils.py | 8 | 5372 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import glob
import os
import struct
import sys
import unittest
from time import time, sleep
from pyspark import SparkContext, SparkConf
have_scipy = False
have_numpy = False
try:
import scipy.sparse # noqa: F401
have_scipy = True
except:
# No SciPy, but that's okay, we'll skip those tests
pass
try:
import numpy as np # noqa: F401
have_numpy = True
except:
# No NumPy, but that's okay, we'll skip those tests
pass
SPARK_HOME = os.environ["SPARK_HOME"]
def read_int(b):
return struct.unpack("!i", b)[0]
def write_int(i):
return struct.pack("!i", i)
def eventually(condition, timeout=30.0, catch_assertions=False):
"""
Wait a given amount of time for a condition to pass, else fail with an error.
This is a helper utility for PySpark tests.
Parameters
----------
condition : function
Function that checks for termination conditions. condition() can return:
- True: Conditions met. Return without error.
- other value: Conditions not met yet. Continue. Upon timeout,
include last such value in error message.
Note that this method may be called at any time during
streaming execution (e.g., even before any results
have been created).
timeout : int
Number of seconds to wait. Default 30 seconds.
catch_assertions : bool
If False (default), do not catch AssertionErrors.
If True, catch AssertionErrors; continue, but save
error to throw upon timeout.
"""
start_time = time()
lastValue = None
while time() - start_time < timeout:
if catch_assertions:
try:
lastValue = condition()
except AssertionError as e:
lastValue = e
else:
lastValue = condition()
if lastValue is True:
return
sleep(0.01)
if isinstance(lastValue, AssertionError):
raise lastValue
else:
raise AssertionError(
"Test failed due to timeout after %g sec, with last condition returning: %s"
% (timeout, lastValue))
class QuietTest(object):
def __init__(self, sc):
self.log4j = sc._jvm.org.apache.log4j
def __enter__(self):
self.old_level = self.log4j.LogManager.getRootLogger().getLevel()
self.log4j.LogManager.getRootLogger().setLevel(self.log4j.Level.FATAL)
def __exit__(self, exc_type, exc_val, exc_tb):
self.log4j.LogManager.getRootLogger().setLevel(self.old_level)
class PySparkTestCase(unittest.TestCase):
def setUp(self):
self._old_sys_path = list(sys.path)
class_name = self.__class__.__name__
self.sc = SparkContext('local[4]', class_name)
def tearDown(self):
self.sc.stop()
sys.path = self._old_sys_path
class ReusedPySparkTestCase(unittest.TestCase):
@classmethod
def conf(cls):
"""
Override this in subclasses to supply a more specific conf
"""
return SparkConf()
@classmethod
def setUpClass(cls):
cls.sc = SparkContext('local[4]', cls.__name__, conf=cls.conf())
@classmethod
def tearDownClass(cls):
cls.sc.stop()
class ByteArrayOutput(object):
def __init__(self):
self.buffer = bytearray()
def write(self, b):
self.buffer += b
def close(self):
pass
def search_jar(project_relative_path, sbt_jar_name_prefix, mvn_jar_name_prefix):
# Note that 'sbt_jar_name_prefix' and 'mvn_jar_name_prefix' are used since the prefix can
# vary for SBT or Maven specifically. See also SPARK-26856
project_full_path = os.path.join(
os.environ["SPARK_HOME"], project_relative_path)
# We should ignore the following jars
ignored_jar_suffixes = ("javadoc.jar", "sources.jar", "test-sources.jar", "tests.jar")
# Search jar in the project dir using the jar name_prefix for both sbt build and maven
# build because the artifact jars are in different directories.
sbt_build = glob.glob(os.path.join(
project_full_path, "target/scala-*/%s*.jar" % sbt_jar_name_prefix))
maven_build = glob.glob(os.path.join(
project_full_path, "target/%s*.jar" % mvn_jar_name_prefix))
jar_paths = sbt_build + maven_build
jars = [jar for jar in jar_paths if not jar.endswith(ignored_jar_suffixes)]
if not jars:
return None
elif len(jars) > 1:
raise Exception("Found multiple JARs: %s; please remove all but one" % (", ".join(jars)))
else:
return jars[0]
| apache-2.0 |
mettekou/ghc | testsuite/driver/testlib.py | 1 | 68147 | # coding=utf8
#
# (c) Simon Marlow 2002
#
from __future__ import print_function
import io
import shutil
import os
import errno
import string
import re
import traceback
import time
import datetime
import copy
import glob
from math import ceil, trunc
import collections
import subprocess
from testglobals import *
from testutil import *
from extra_files import extra_src_files
try:
basestring
except: # Python 3
basestring = (str,bytes)
if config.use_threads:
import threading
try:
import thread
except ImportError: # Python 3
import _thread as thread
global wantToStop
wantToStop = False
global pool_sema
if config.use_threads:
pool_sema = threading.BoundedSemaphore(value=config.threads)
def stopNow():
global wantToStop
wantToStop = True
def stopping():
return wantToStop
# Options valid for the current test only (these get reset to
# testdir_testopts after each test).
global testopts_local
if config.use_threads:
testopts_local = threading.local()
else:
class TestOpts_Local:
pass
testopts_local = TestOpts_Local()
def getTestOpts():
return testopts_local.x
def setLocalTestOpts(opts):
global testopts_local
testopts_local.x=opts
def isStatsTest():
opts = getTestOpts()
return bool(opts.compiler_stats_range_fields or opts.stats_range_fields)
# This can be called at the top of a file of tests, to set default test options
# for the following tests.
def setTestOpts( f ):
global thisdir_settings
thisdir_settings = [thisdir_settings, f]
# -----------------------------------------------------------------------------
# Canned setup functions for common cases. eg. for a test you might say
#
# test('test001', normal, compile, [''])
#
# to run it without any options, but change it to
#
# test('test001', expect_fail, compile, [''])
#
# to expect failure for this test.
def normal( name, opts ):
return;
def skip( name, opts ):
opts.skip = 1
def expect_fail( name, opts ):
# The compiler, testdriver, OS or platform is missing a certain
# feature, and we don't plan to or can't fix it now or in the
# future.
opts.expect = 'fail';
def reqlib( lib ):
return lambda name, opts, l=lib: _reqlib (name, opts, l )
def stage1(name, opts):
# See Note [Why is there no stage1 setup function?]
framework_fail(name, 'stage1 setup function does not exist',
'add your test to testsuite/tests/stage1 instead')
# Note [Why is there no stage1 setup function?]
#
# Presumably a stage1 setup function would signal that the stage1
# compiler should be used to compile a test.
#
# Trouble is, the path to the compiler + the `ghc --info` settings for
# that compiler are currently passed in from the `make` part of the
# testsuite driver.
#
# Switching compilers in the Python part would be entirely too late, as
# all ghc_with_* settings would be wrong. See config/ghc for possible
# consequences (for example, config.run_ways would still be
# based on the default compiler, quite likely causing ./validate --slow
# to fail).
#
# It would be possible to let the Python part of the testsuite driver
# make the call to `ghc --info`, but doing so would require quite some
# work. Care has to be taken to not affect the run_command tests for
# example, as they also use the `ghc --info` settings:
# quasiquotation/qq007/Makefile:ifeq "$(GhcDynamic)" "YES"
#
# If you want a test to run using the stage1 compiler, add it to the
# testsuite/tests/stage1 directory. Validate runs the tests in that
# directory with `make stage=1`.
# Cache the results of looking to see if we have a library or not.
# This makes quite a difference, especially on Windows.
have_lib = {}
def _reqlib( name, opts, lib ):
if lib in have_lib:
got_it = have_lib[lib]
else:
cmd = strip_quotes(config.ghc_pkg)
p = subprocess.Popen([cmd, '--no-user-package-db', 'describe', lib],
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
# read from stdout and stderr to avoid blocking due to
# buffers filling
p.communicate()
r = p.wait()
got_it = r == 0
have_lib[lib] = got_it
if not got_it:
opts.expect = 'missing-lib'
def req_haddock( name, opts ):
if not config.haddock:
opts.expect = 'missing-lib'
def req_profiling( name, opts ):
'''Require the profiling libraries (add 'GhcLibWays += p' to mk/build.mk)'''
if not config.have_profiling:
opts.expect = 'fail'
def req_shared_libs( name, opts ):
if not config.have_shared_libs:
opts.expect = 'fail'
def req_interp( name, opts ):
if not config.have_interp:
opts.expect = 'fail'
def req_smp( name, opts ):
if not config.have_smp:
opts.expect = 'fail'
def ignore_stdout(name, opts):
opts.ignore_stdout = True
def ignore_stderr(name, opts):
opts.ignore_stderr = True
def combined_output( name, opts ):
opts.combined_output = True
# -----
def expect_fail_for( ways ):
return lambda name, opts, w=ways: _expect_fail_for( name, opts, w )
def _expect_fail_for( name, opts, ways ):
opts.expect_fail_for = ways
def expect_broken( bug ):
# This test is a expected not to work due to the indicated trac bug
# number.
return lambda name, opts, b=bug: _expect_broken (name, opts, b )
def _expect_broken( name, opts, bug ):
record_broken(name, opts, bug)
opts.expect = 'fail';
def expect_broken_for( bug, ways ):
return lambda name, opts, b=bug, w=ways: _expect_broken_for( name, opts, b, w )
def _expect_broken_for( name, opts, bug, ways ):
record_broken(name, opts, bug)
opts.expect_fail_for = ways
def record_broken(name, opts, bug):
global brokens
me = (bug, opts.testdir, name)
if not me in brokens:
brokens.append(me)
def _expect_pass(way):
# Helper function. Not intended for use in .T files.
opts = getTestOpts()
return opts.expect == 'pass' and way not in opts.expect_fail_for
# -----
def omit_ways( ways ):
return lambda name, opts, w=ways: _omit_ways( name, opts, w )
def _omit_ways( name, opts, ways ):
opts.omit_ways = ways
# -----
def only_ways( ways ):
return lambda name, opts, w=ways: _only_ways( name, opts, w )
def _only_ways( name, opts, ways ):
opts.only_ways = ways
# -----
def extra_ways( ways ):
return lambda name, opts, w=ways: _extra_ways( name, opts, w )
def _extra_ways( name, opts, ways ):
opts.extra_ways = ways
# -----
def set_stdin( file ):
return lambda name, opts, f=file: _set_stdin(name, opts, f);
def _set_stdin( name, opts, f ):
opts.stdin = f
# -----
def exit_code( val ):
return lambda name, opts, v=val: _exit_code(name, opts, v);
def _exit_code( name, opts, v ):
opts.exit_code = v
def signal_exit_code( val ):
if opsys('solaris2'):
return exit_code( val );
else:
# When application running on Linux receives fatal error
# signal, then its exit code is encoded as 128 + signal
# value. See http://www.tldp.org/LDP/abs/html/exitcodes.html
# I assume that Mac OS X behaves in the same way at least Mac
# OS X builder behavior suggests this.
return exit_code( val+128 );
# -----
def compile_timeout_multiplier( val ):
return lambda name, opts, v=val: _compile_timeout_multiplier(name, opts, v)
def _compile_timeout_multiplier( name, opts, v ):
opts.compile_timeout_multiplier = v
def run_timeout_multiplier( val ):
return lambda name, opts, v=val: _run_timeout_multiplier(name, opts, v)
def _run_timeout_multiplier( name, opts, v ):
opts.run_timeout_multiplier = v
# -----
def extra_run_opts( val ):
return lambda name, opts, v=val: _extra_run_opts(name, opts, v);
def _extra_run_opts( name, opts, v ):
opts.extra_run_opts = v
# -----
def extra_hc_opts( val ):
return lambda name, opts, v=val: _extra_hc_opts(name, opts, v);
def _extra_hc_opts( name, opts, v ):
opts.extra_hc_opts = v
# -----
def extra_clean( files ):
# TODO. Remove all calls to extra_clean.
return lambda _name, _opts: None
def extra_files(files):
return lambda name, opts: _extra_files(name, opts, files)
def _extra_files(name, opts, files):
opts.extra_files.extend(files)
# -----
def stats_num_field( field, expecteds ):
return lambda name, opts, f=field, e=expecteds: _stats_num_field(name, opts, f, e);
def _stats_num_field( name, opts, field, expecteds ):
if field in opts.stats_range_fields:
framework_fail(name, 'duplicate-numfield', 'Duplicate ' + field + ' num_field check')
if type(expecteds) is list:
for (b, expected, dev) in expecteds:
if b:
opts.stats_range_fields[field] = (expected, dev)
return
framework_fail(name, 'numfield-no-expected', 'No expected value found for ' + field + ' in num_field check')
else:
(expected, dev) = expecteds
opts.stats_range_fields[field] = (expected, dev)
def compiler_stats_num_field( field, expecteds ):
return lambda name, opts, f=field, e=expecteds: _compiler_stats_num_field(name, opts, f, e);
def _compiler_stats_num_field( name, opts, field, expecteds ):
if field in opts.compiler_stats_range_fields:
framework_fail(name, 'duplicate-numfield', 'Duplicate ' + field + ' num_field check')
# Compiler performance numbers change when debugging is on, making the results
# useless and confusing. Therefore, skip if debugging is on.
if compiler_debugged():
skip(name, opts)
for (b, expected, dev) in expecteds:
if b:
opts.compiler_stats_range_fields[field] = (expected, dev)
return
framework_fail(name, 'numfield-no-expected', 'No expected value found for ' + field + ' in num_field check')
# -----
def when(b, f):
# When list_brokens is on, we want to see all expect_broken calls,
# so we always do f
if b or config.list_broken:
return f
else:
return normal
def unless(b, f):
return when(not b, f)
def doing_ghci():
return 'ghci' in config.run_ways
def ghc_dynamic():
return config.ghc_dynamic
def fast():
return config.speed == 2
def platform( plat ):
return config.platform == plat
def opsys( os ):
return config.os == os
def arch( arch ):
return config.arch == arch
def wordsize( ws ):
return config.wordsize == str(ws)
def msys( ):
return config.msys
def cygwin( ):
return config.cygwin
def have_vanilla( ):
return config.have_vanilla
def have_dynamic( ):
return config.have_dynamic
def have_profiling( ):
return config.have_profiling
def in_tree_compiler( ):
return config.in_tree_compiler
def unregisterised( ):
return config.unregisterised
def compiler_profiled( ):
return config.compiler_profiled
def compiler_debugged( ):
return config.compiler_debugged
# ---
def high_memory_usage(name, opts):
opts.alone = True
# If a test is for a multi-CPU race, then running the test alone
# increases the chance that we'll actually see it.
def multi_cpu_race(name, opts):
opts.alone = True
# ---
def literate( name, opts ):
opts.literate = 1;
def c_src( name, opts ):
opts.c_src = 1;
def objc_src( name, opts ):
opts.objc_src = 1;
def objcpp_src( name, opts ):
opts.objcpp_src = 1;
def cmm_src( name, opts ):
opts.cmm_src = 1;
def outputdir( odir ):
return lambda name, opts, d=odir: _outputdir(name, opts, d)
def _outputdir( name, opts, odir ):
opts.outputdir = odir;
# ----
def pre_cmd( cmd ):
return lambda name, opts, c=cmd: _pre_cmd(name, opts, cmd)
def _pre_cmd( name, opts, cmd ):
opts.pre_cmd = cmd
# ----
def clean_cmd( cmd ):
# TODO. Remove all calls to clean_cmd.
return lambda _name, _opts: None
# ----
def cmd_prefix( prefix ):
return lambda name, opts, p=prefix: _cmd_prefix(name, opts, prefix)
def _cmd_prefix( name, opts, prefix ):
opts.cmd_wrapper = lambda cmd, p=prefix: p + ' ' + cmd;
# ----
def cmd_wrapper( fun ):
return lambda name, opts, f=fun: _cmd_wrapper(name, opts, fun)
def _cmd_wrapper( name, opts, fun ):
opts.cmd_wrapper = fun
# ----
def compile_cmd_prefix( prefix ):
return lambda name, opts, p=prefix: _compile_cmd_prefix(name, opts, prefix)
def _compile_cmd_prefix( name, opts, prefix ):
opts.compile_cmd_prefix = prefix
# ----
def check_stdout( f ):
return lambda name, opts, f=f: _check_stdout(name, opts, f)
def _check_stdout( name, opts, f ):
opts.check_stdout = f
def no_check_hp(name, opts):
opts.check_hp = False
# ----
def normalise_slashes( name, opts ):
_normalise_fun(name, opts, normalise_slashes_)
def normalise_exe( name, opts ):
_normalise_fun(name, opts, normalise_exe_)
def normalise_fun( *fs ):
return lambda name, opts: _normalise_fun(name, opts, fs)
def _normalise_fun( name, opts, *fs ):
opts.extra_normaliser = join_normalisers(opts.extra_normaliser, fs)
def normalise_errmsg_fun( *fs ):
return lambda name, opts: _normalise_errmsg_fun(name, opts, fs)
def _normalise_errmsg_fun( name, opts, *fs ):
opts.extra_errmsg_normaliser = join_normalisers(opts.extra_errmsg_normaliser, fs)
def normalise_version_( *pkgs ):
def normalise_version__( str ):
return re.sub('(' + '|'.join(map(re.escape,pkgs)) + ')-[0-9.]+',
'\\1-<VERSION>', str)
return normalise_version__
def normalise_version( *pkgs ):
def normalise_version__( name, opts ):
_normalise_fun(name, opts, normalise_version_(*pkgs))
_normalise_errmsg_fun(name, opts, normalise_version_(*pkgs))
return normalise_version__
def normalise_drive_letter(name, opts):
# Windows only. Change D:\\ to C:\\.
_normalise_fun(name, opts, lambda str: re.sub(r'[A-Z]:\\', r'C:\\', str))
def keep_prof_callstacks(name, opts):
"""Keep profiling callstacks.
Use together with `only_ways(prof_ways)`.
"""
opts.keep_prof_callstacks = True
def join_normalisers(*a):
"""
Compose functions, flattening sequences.
join_normalisers(f1,[f2,f3],f4)
is the same as
lambda x: f1(f2(f3(f4(x))))
"""
def flatten(l):
"""
Taken from http://stackoverflow.com/a/2158532/946226
"""
for el in l:
if isinstance(el, collections.Iterable) and not isinstance(el, basestring):
for sub in flatten(el):
yield sub
else:
yield el
a = flatten(a)
fn = lambda x:x # identity function
for f in a:
assert callable(f)
fn = lambda x,f=f,fn=fn: fn(f(x))
return fn
# ----
# Function for composing two opt-fns together
def executeSetups(fs, name, opts):
if type(fs) is list:
# If we have a list of setups, then execute each one
for f in fs:
executeSetups(f, name, opts)
else:
# fs is a single function, so just apply it
fs(name, opts)
# -----------------------------------------------------------------------------
# The current directory of tests
def newTestDir(tempdir, dir):
global thisdir_settings
# reset the options for this test directory
def settings(name, opts, tempdir=tempdir, dir=dir):
return _newTestDir(name, opts, tempdir, dir)
thisdir_settings = settings
# Should be equal to entry in toplevel .gitignore.
testdir_suffix = '.run'
def _newTestDir(name, opts, tempdir, dir):
opts.srcdir = os.path.join(os.getcwd(), dir)
opts.testdir = os.path.join(tempdir, dir, name + testdir_suffix)
opts.compiler_always_flags = config.compiler_always_flags
# -----------------------------------------------------------------------------
# Actually doing tests
parallelTests = []
aloneTests = []
allTestNames = set([])
def runTest(watcher, opts, name, func, args):
if config.use_threads:
pool_sema.acquire()
t = threading.Thread(target=test_common_thread,
name=name,
args=(watcher, name, opts, func, args))
t.daemon = False
t.start()
else:
test_common_work(watcher, name, opts, func, args)
# name :: String
# setup :: TestOpts -> IO ()
def test(name, setup, func, args):
global aloneTests
global parallelTests
global allTestNames
global thisdir_settings
if name in allTestNames:
framework_fail(name, 'duplicate', 'There are multiple tests with this name')
if not re.match('^[0-9]*[a-zA-Z][a-zA-Z0-9._-]*$', name):
framework_fail(name, 'bad_name', 'This test has an invalid name')
if config.run_only_some_tests:
if name not in config.only:
return
else:
# Note [Mutating config.only]
# config.only is initiallly the set of tests requested by
# the user (via 'make TEST='). We then remove all tests that
# we've already seen (in .T files), so that we can later
# report on any tests we couldn't find and error out.
config.only.remove(name)
# Make a deep copy of the default_testopts, as we need our own copy
# of any dictionaries etc inside it. Otherwise, if one test modifies
# them, all tests will see the modified version!
myTestOpts = copy.deepcopy(default_testopts)
executeSetups([thisdir_settings, setup], name, myTestOpts)
thisTest = lambda watcher: runTest(watcher, myTestOpts, name, func, args)
if myTestOpts.alone:
aloneTests.append(thisTest)
else:
parallelTests.append(thisTest)
allTestNames.add(name)
if config.use_threads:
def test_common_thread(watcher, name, opts, func, args):
try:
test_common_work(watcher, name, opts, func, args)
finally:
pool_sema.release()
def get_package_cache_timestamp():
if config.package_conf_cache_file == '':
return 0.0
else:
try:
return os.stat(config.package_conf_cache_file).st_mtime
except:
return 0.0
do_not_copy = ('.hi', '.o', '.dyn_hi', '.dyn_o', '.out') # 12112
def test_common_work(watcher, name, opts, func, args):
try:
t.total_tests += 1
setLocalTestOpts(opts)
package_conf_cache_file_start_timestamp = get_package_cache_timestamp()
# All the ways we might run this test
if func == compile or func == multimod_compile:
all_ways = config.compile_ways
elif func == compile_and_run or func == multimod_compile_and_run:
all_ways = config.run_ways
elif func == ghci_script:
if 'ghci' in config.run_ways:
all_ways = ['ghci']
else:
all_ways = []
else:
all_ways = ['normal']
# A test itself can request extra ways by setting opts.extra_ways
all_ways = all_ways + [way for way in opts.extra_ways if way not in all_ways]
t.total_test_cases += len(all_ways)
ok_way = lambda way: \
not getTestOpts().skip \
and (getTestOpts().only_ways == None or way in getTestOpts().only_ways) \
and (config.cmdline_ways == [] or way in config.cmdline_ways) \
and (not (config.skip_perf_tests and isStatsTest())) \
and way not in getTestOpts().omit_ways
# Which ways we are asked to skip
do_ways = list(filter (ok_way,all_ways))
# Only run all ways in slow mode.
# See Note [validate and testsuite speed] in toplevel Makefile.
if config.accept:
# Only ever run one way
do_ways = do_ways[:1]
elif config.speed > 0:
# However, if we EXPLICITLY asked for a way (with extra_ways)
# please test it!
explicit_ways = list(filter(lambda way: way in opts.extra_ways, do_ways))
other_ways = list(filter(lambda way: way not in opts.extra_ways, do_ways))
do_ways = other_ways[:1] + explicit_ways
# Find all files in the source directory that this test
# depends on. Do this only once for all ways.
# Generously add all filenames that start with the name of
# the test to this set, as a convenience to test authors.
# They will have to use the `extra_files` setup function to
# specify all other files that their test depends on (but
# this seems to be necessary for only about 10% of all
# tests).
files = set(f for f in os.listdir(opts.srcdir)
if f.startswith(name) and not f == name and
not f.endswith(testdir_suffix) and
not os.path.splitext(f)[1] in do_not_copy)
for filename in (opts.extra_files + extra_src_files.get(name, [])):
if filename.startswith('/'):
framework_fail(name, 'whole-test',
'no absolute paths in extra_files please: ' + filename)
elif '*' in filename:
# Don't use wildcards in extra_files too much, as
# globbing is slow.
files.update((os.path.relpath(f, opts.srcdir)
for f in glob.iglob(in_srcdir(filename))))
elif filename:
files.add(filename)
else:
framework_fail(name, 'whole-test', 'extra_file is empty string')
# Run the required tests...
for way in do_ways:
if stopping():
break
try:
do_test(name, way, func, args, files)
except KeyboardInterrupt:
stopNow()
except Exception as e:
framework_fail(name, way, str(e))
traceback.print_exc()
t.n_tests_skipped += len(set(all_ways) - set(do_ways))
if config.cleanup and do_ways:
cleanup()
package_conf_cache_file_end_timestamp = get_package_cache_timestamp();
if package_conf_cache_file_start_timestamp != package_conf_cache_file_end_timestamp:
framework_fail(name, 'whole-test', 'Package cache timestamps do not match: ' + str(package_conf_cache_file_start_timestamp) + ' ' + str(package_conf_cache_file_end_timestamp))
except Exception as e:
framework_fail(name, 'runTest', 'Unhandled exception: ' + str(e))
finally:
watcher.notify()
def do_test(name, way, func, args, files):
opts = getTestOpts()
full_name = name + '(' + way + ')'
if_verbose(2, "=====> {0} {1} of {2} {3}".format(
full_name, t.total_tests, len(allTestNames),
[len(t.unexpected_passes),
len(t.unexpected_failures),
len(t.framework_failures)]))
# Clean up prior to the test, so that we can't spuriously conclude
# that it passed on the basis of old run outputs.
cleanup()
os.makedirs(opts.testdir)
# Link all source files for this test into a new directory in
# /tmp, and run the test in that directory. This makes it
# possible to run tests in parallel, without modification, that
# would otherwise (accidentally) write to the same output file.
# It also makes it easier to keep the testsuite clean.
for extra_file in files:
src = in_srcdir(extra_file)
dst = in_testdir(os.path.basename(extra_file.rstrip('/\\')))
if os.path.isfile(src):
link_or_copy_file(src, dst)
elif os.path.isdir(src):
os.mkdir(dst)
lndir(src, dst)
else:
if not config.haddock and os.path.splitext(extra_file)[1] == '.t':
# When using a ghc built without haddock support, .t
# files are rightfully missing. Don't
# framework_fail. Test will be skipped later.
pass
else:
framework_fail(name, way,
'extra_file does not exist: ' + extra_file)
if func.__name__ == 'run_command' or opts.pre_cmd:
# When running 'MAKE' make sure 'TOP' still points to the
# root of the testsuite.
src_makefile = in_srcdir('Makefile')
dst_makefile = in_testdir('Makefile')
if os.path.exists(src_makefile):
with io.open(src_makefile, 'r', encoding='utf8') as src:
makefile = re.sub('TOP=.*', 'TOP=' + config.top, src.read(), 1)
with io.open(dst_makefile, 'w', encoding='utf8') as dst:
dst.write(makefile)
if opts.pre_cmd:
exit_code = runCmd('cd "{0}" && {1}'.format(opts.testdir, opts.pre_cmd))
if exit_code != 0:
framework_fail(name, way, 'pre_cmd failed: {0}'.format(exit_code))
try:
result = func(*[name,way] + args)
except:
pass
if opts.expect not in ['pass', 'fail', 'missing-lib']:
framework_fail(name, way, 'bad expected ' + opts.expect)
try:
passFail = result['passFail']
except:
passFail = 'No passFail found'
directory = re.sub('^\\.[/\\\\]', '', opts.testdir)
if passFail == 'pass':
if _expect_pass(way):
t.n_expected_passes += 1
else:
if_verbose(1, '*** unexpected pass for %s' % full_name)
t.unexpected_passes.append((directory, name, 'unexpected', way))
elif passFail == 'fail':
if _expect_pass(way):
reason = result['reason']
tag = result.get('tag')
if tag == 'stat':
if_verbose(1, '*** unexpected stat test failure for %s' % full_name)
t.unexpected_stat_failures.append((directory, name, reason, way))
else:
if_verbose(1, '*** unexpected failure for %s' % full_name)
t.unexpected_failures.append((directory, name, reason, way))
else:
if opts.expect == 'missing-lib':
t.missing_libs.append((directory, name, 'missing-lib', way))
else:
t.n_expected_failures += 1
else:
framework_fail(name, way, 'bad result ' + passFail)
def framework_fail(name, way, reason):
opts = getTestOpts()
directory = re.sub('^\\.[/\\\\]', '', opts.testdir)
full_name = name + '(' + way + ')'
if_verbose(1, '*** framework failure for %s %s ' % (full_name, reason))
t.framework_failures.append((directory, name, way, reason))
def badResult(result):
try:
if result['passFail'] == 'pass':
return False
return True
except:
return True
def passed():
return {'passFail': 'pass'}
def failBecause(reason, tag=None):
return {'passFail': 'fail', 'reason': reason, 'tag': tag}
# -----------------------------------------------------------------------------
# Generic command tests
# A generic command test is expected to run and exit successfully.
#
# The expected exit code can be changed via exit_code() as normal, and
# the expected stdout/stderr are stored in <testname>.stdout and
# <testname>.stderr. The output of the command can be ignored
# altogether by using the setup function ignore_stdout instead of
# run_command.
def run_command( name, way, cmd ):
return simple_run( name, '', cmd, '' )
# -----------------------------------------------------------------------------
# GHCi tests
def ghci_script( name, way, script):
flags = ' '.join(get_compiler_flags())
way_flags = ' '.join(config.way_flags[way])
# We pass HC and HC_OPTS as environment variables, so that the
# script can invoke the correct compiler by using ':! $HC $HC_OPTS'
cmd = ('HC={{compiler}} HC_OPTS="{flags}" {{compiler}} {flags} {way_flags}'
).format(flags=flags, way_flags=way_flags)
getTestOpts().stdin = script
return simple_run( name, way, cmd, getTestOpts().extra_run_opts )
# -----------------------------------------------------------------------------
# Compile-only tests
def compile( name, way, extra_hc_opts ):
return do_compile( name, way, 0, '', [], extra_hc_opts )
def compile_fail( name, way, extra_hc_opts ):
return do_compile( name, way, 1, '', [], extra_hc_opts )
def backpack_typecheck( name, way, extra_hc_opts ):
return do_compile( name, way, 0, '', [], "-fno-code -fwrite-interface " + extra_hc_opts, backpack=1 )
def backpack_typecheck_fail( name, way, extra_hc_opts ):
return do_compile( name, way, 1, '', [], "-fno-code -fwrite-interface " + extra_hc_opts, backpack=1 )
def backpack_compile( name, way, extra_hc_opts ):
return do_compile( name, way, 0, '', [], extra_hc_opts, backpack=1 )
def backpack_compile_fail( name, way, extra_hc_opts ):
return do_compile( name, way, 1, '', [], extra_hc_opts, backpack=1 )
def backpack_run( name, way, extra_hc_opts ):
return compile_and_run__( name, way, '', [], extra_hc_opts, backpack=1 )
def multimod_compile( name, way, top_mod, extra_hc_opts ):
return do_compile( name, way, 0, top_mod, [], extra_hc_opts )
def multimod_compile_fail( name, way, top_mod, extra_hc_opts ):
return do_compile( name, way, 1, top_mod, [], extra_hc_opts )
def multi_compile( name, way, top_mod, extra_mods, extra_hc_opts ):
return do_compile( name, way, 0, top_mod, extra_mods, extra_hc_opts)
def multi_compile_fail( name, way, top_mod, extra_mods, extra_hc_opts ):
return do_compile( name, way, 1, top_mod, extra_mods, extra_hc_opts)
def do_compile(name, way, should_fail, top_mod, extra_mods, extra_hc_opts, **kwargs):
# print 'Compile only, extra args = ', extra_hc_opts
result = extras_build( way, extra_mods, extra_hc_opts )
if badResult(result):
return result
extra_hc_opts = result['hc_opts']
result = simple_build(name, way, extra_hc_opts, should_fail, top_mod, 0, 1, **kwargs)
if badResult(result):
return result
# the actual stderr should always match the expected, regardless
# of whether we expected the compilation to fail or not (successful
# compilations may generate warnings).
expected_stderr_file = find_expected_file(name, 'stderr')
actual_stderr_file = add_suffix(name, 'comp.stderr')
if not compare_outputs(way, 'stderr',
join_normalisers(getTestOpts().extra_errmsg_normaliser,
normalise_errmsg),
expected_stderr_file, actual_stderr_file,
whitespace_normaliser=normalise_whitespace):
return failBecause('stderr mismatch')
# no problems found, this test passed
return passed()
def compile_cmp_asm( name, way, extra_hc_opts ):
print('Compile only, extra args = ', extra_hc_opts)
result = simple_build(name + '.cmm', way, '-keep-s-files -O ' + extra_hc_opts, 0, '', 0, 0)
if badResult(result):
return result
# the actual stderr should always match the expected, regardless
# of whether we expected the compilation to fail or not (successful
# compilations may generate warnings).
expected_asm_file = find_expected_file(name, 'asm')
actual_asm_file = add_suffix(name, 's')
if not compare_outputs(way, 'asm',
join_normalisers(normalise_errmsg, normalise_asm),
expected_asm_file, actual_asm_file):
return failBecause('asm mismatch')
# no problems found, this test passed
return passed()
# -----------------------------------------------------------------------------
# Compile-and-run tests
def compile_and_run__( name, way, top_mod, extra_mods, extra_hc_opts, backpack=0 ):
# print 'Compile and run, extra args = ', extra_hc_opts
result = extras_build( way, extra_mods, extra_hc_opts )
if badResult(result):
return result
extra_hc_opts = result['hc_opts']
if way.startswith('ghci'): # interpreted...
return interpreter_run(name, way, extra_hc_opts, top_mod)
else: # compiled...
result = simple_build(name, way, extra_hc_opts, 0, top_mod, 1, 1, backpack = backpack)
if badResult(result):
return result
cmd = './' + name;
# we don't check the compiler's stderr for a compile-and-run test
return simple_run( name, way, cmd, getTestOpts().extra_run_opts )
def compile_and_run( name, way, extra_hc_opts ):
return compile_and_run__( name, way, '', [], extra_hc_opts)
def multimod_compile_and_run( name, way, top_mod, extra_hc_opts ):
return compile_and_run__( name, way, top_mod, [], extra_hc_opts)
def multi_compile_and_run( name, way, top_mod, extra_mods, extra_hc_opts ):
return compile_and_run__( name, way, top_mod, extra_mods, extra_hc_opts)
def stats( name, way, stats_file ):
opts = getTestOpts()
return checkStats(name, way, stats_file, opts.stats_range_fields)
# -----------------------------------------------------------------------------
# Check -t stats info
def checkStats(name, way, stats_file, range_fields):
full_name = name + '(' + way + ')'
result = passed()
if range_fields:
try:
f = open(in_testdir(stats_file))
except IOError as e:
return failBecause(str(e))
contents = f.read()
f.close()
for (field, (expected, dev)) in range_fields.items():
m = re.search('\("' + field + '", "([0-9]+)"\)', contents)
if m == None:
print('Failed to find field: ', field)
result = failBecause('no such stats field')
val = int(m.group(1))
lowerBound = trunc( expected * ((100 - float(dev))/100))
upperBound = trunc(0.5 + ceil(expected * ((100 + float(dev))/100)))
deviation = round(((float(val) * 100)/ expected) - 100, 1)
if val < lowerBound:
print(field, 'value is too low:')
print('(If this is because you have improved GHC, please')
print('update the test so that GHC doesn\'t regress again)')
result = failBecause('stat too good', tag='stat')
if val > upperBound:
print(field, 'value is too high:')
result = failBecause('stat not good enough', tag='stat')
if val < lowerBound or val > upperBound or config.verbose >= 4:
length = max(len(str(x)) for x in [expected, lowerBound, upperBound, val])
def display(descr, val, extra):
print(descr, str(val).rjust(length), extra)
display(' Expected ' + full_name + ' ' + field + ':', expected, '+/-' + str(dev) + '%')
display(' Lower bound ' + full_name + ' ' + field + ':', lowerBound, '')
display(' Upper bound ' + full_name + ' ' + field + ':', upperBound, '')
display(' Actual ' + full_name + ' ' + field + ':', val, '')
if val != expected:
display(' Deviation ' + full_name + ' ' + field + ':', deviation, '%')
return result
# -----------------------------------------------------------------------------
# Build a single-module program
def extras_build( way, extra_mods, extra_hc_opts ):
for mod, opts in extra_mods:
result = simple_build(mod, way, opts + ' ' + extra_hc_opts, 0, '', 0, 0)
if not (mod.endswith('.hs') or mod.endswith('.lhs')):
extra_hc_opts += ' ' + replace_suffix(mod, 'o')
if badResult(result):
return result
return {'passFail' : 'pass', 'hc_opts' : extra_hc_opts}
def simple_build(name, way, extra_hc_opts, should_fail, top_mod, link, addsuf, backpack = False):
opts = getTestOpts()
# Redirect stdout and stderr to the same file
stdout = in_testdir(name, 'comp.stderr')
stderr = subprocess.STDOUT
if top_mod != '':
srcname = top_mod
elif addsuf:
if backpack:
srcname = add_suffix(name, 'bkp')
else:
srcname = add_hs_lhs_suffix(name)
else:
srcname = name
if top_mod != '':
to_do = '--make '
if link:
to_do = to_do + '-o ' + name
elif backpack:
if link:
to_do = '-o ' + name + ' '
else:
to_do = ''
to_do = to_do + '--backpack '
elif link:
to_do = '-o ' + name
else:
to_do = '-c' # just compile
stats_file = name + '.comp.stats'
if opts.compiler_stats_range_fields:
extra_hc_opts += ' +RTS -V0 -t' + stats_file + ' --machine-readable -RTS'
if backpack:
extra_hc_opts += ' -outputdir ' + name + '.out'
# Required by GHC 7.3+, harmless for earlier versions:
if (getTestOpts().c_src or
getTestOpts().objc_src or
getTestOpts().objcpp_src or
getTestOpts().cmm_src):
extra_hc_opts += ' -no-hs-main '
if getTestOpts().compile_cmd_prefix == '':
cmd_prefix = ''
else:
cmd_prefix = getTestOpts().compile_cmd_prefix + ' '
flags = ' '.join(get_compiler_flags() + config.way_flags[way])
cmd = ('cd "{opts.testdir}" && {cmd_prefix} '
'{{compiler}} {to_do} {srcname} {flags} {extra_hc_opts}'
).format(**locals())
exit_code = runCmd(cmd, None, stdout, stderr, opts.compile_timeout_multiplier)
if exit_code != 0 and not should_fail:
if config.verbose >= 1 and _expect_pass(way):
print('Compile failed (exit code {0}) errors were:'.format(exit_code))
actual_stderr_path = in_testdir(name, 'comp.stderr')
if_verbose_dump(1, actual_stderr_path)
# ToDo: if the sub-shell was killed by ^C, then exit
statsResult = checkStats(name, way, stats_file, opts.compiler_stats_range_fields)
if badResult(statsResult):
return statsResult
if should_fail:
if exit_code == 0:
return failBecause('exit code 0')
else:
if exit_code != 0:
return failBecause('exit code non-0')
return passed()
# -----------------------------------------------------------------------------
# Run a program and check its output
#
# If testname.stdin exists, route input from that, else
# from /dev/null. Route output to testname.run.stdout and
# testname.run.stderr. Returns the exit code of the run.
def simple_run(name, way, prog, extra_run_opts):
opts = getTestOpts()
# figure out what to use for stdin
if opts.stdin:
stdin = in_testdir(opts.stdin)
elif os.path.exists(in_testdir(name, 'stdin')):
stdin = in_testdir(name, 'stdin')
else:
stdin = None
stdout = in_testdir(name, 'run.stdout')
if opts.combined_output:
stderr = subprocess.STDOUT
else:
stderr = in_testdir(name, 'run.stderr')
my_rts_flags = rts_flags(way)
stats_file = name + '.stats'
if opts.stats_range_fields:
stats_args = ' +RTS -V0 -t' + stats_file + ' --machine-readable -RTS'
else:
stats_args = ''
# Put extra_run_opts last: extra_run_opts('+RTS foo') should work.
cmd = prog + stats_args + ' ' + my_rts_flags + ' ' + extra_run_opts
if opts.cmd_wrapper != None:
cmd = opts.cmd_wrapper(cmd)
cmd = 'cd "{opts.testdir}" && {cmd}'.format(**locals())
# run the command
exit_code = runCmd(cmd, stdin, stdout, stderr, opts.run_timeout_multiplier)
# check the exit code
if exit_code != opts.exit_code:
if config.verbose >= 1 and _expect_pass(way):
print('Wrong exit code (expected', opts.exit_code, ', actual', exit_code, ')')
dump_stdout(name)
dump_stderr(name)
return failBecause('bad exit code')
if not (opts.ignore_stderr or stderr_ok(name, way) or opts.combined_output):
return failBecause('bad stderr')
if not (opts.ignore_stdout or stdout_ok(name, way)):
return failBecause('bad stdout')
check_hp = '-h' in my_rts_flags and opts.check_hp
check_prof = '-p' in my_rts_flags
# exit_code > 127 probably indicates a crash, so don't try to run hp2ps.
if check_hp and (exit_code <= 127 or exit_code == 251) and not check_hp_ok(name):
return failBecause('bad heap profile')
if check_prof and not check_prof_ok(name, way):
return failBecause('bad profile')
return checkStats(name, way, stats_file, opts.stats_range_fields)
def rts_flags(way):
args = config.way_rts_flags.get(way, [])
return '+RTS {0} -RTS'.format(' '.join(args)) if args else ''
# -----------------------------------------------------------------------------
# Run a program in the interpreter and check its output
def interpreter_run(name, way, extra_hc_opts, top_mod):
opts = getTestOpts()
stdout = in_testdir(name, 'interp.stdout')
stderr = in_testdir(name, 'interp.stderr')
script = in_testdir(name, 'genscript')
if opts.combined_output:
framework_fail(name, 'unsupported',
'WAY=ghci and combined_output together is not supported')
if (top_mod == ''):
srcname = add_hs_lhs_suffix(name)
else:
srcname = top_mod
delimiter = '===== program output begins here\n'
with io.open(script, 'w', encoding='utf8') as f:
# set the prog name and command-line args to match the compiled
# environment.
f.write(':set prog ' + name + '\n')
f.write(':set args ' + opts.extra_run_opts + '\n')
# Add marker lines to the stdout and stderr output files, so we
# can separate GHCi's output from the program's.
f.write(':! echo ' + delimiter)
f.write(':! echo 1>&2 ' + delimiter)
# Set stdout to be line-buffered to match the compiled environment.
f.write('System.IO.hSetBuffering System.IO.stdout System.IO.LineBuffering\n')
# wrapping in GHC.TopHandler.runIO ensures we get the same output
# in the event of an exception as for the compiled program.
f.write('GHC.TopHandler.runIOFastExit Main.main Prelude.>> Prelude.return ()\n')
stdin = in_testdir(opts.stdin if opts.stdin else add_suffix(name, 'stdin'))
if os.path.exists(stdin):
os.system('cat "{0}" >> "{1}"'.format(stdin, script))
flags = ' '.join(get_compiler_flags() + config.way_flags[way])
cmd = ('{{compiler}} {srcname} {flags} {extra_hc_opts}'
).format(**locals())
if getTestOpts().cmd_wrapper != None:
cmd = opts.cmd_wrapper(cmd);
cmd = 'cd "{opts.testdir}" && {cmd}'.format(**locals())
exit_code = runCmd(cmd, script, stdout, stderr, opts.run_timeout_multiplier)
# split the stdout into compilation/program output
split_file(stdout, delimiter,
in_testdir(name, 'comp.stdout'),
in_testdir(name, 'run.stdout'))
split_file(stderr, delimiter,
in_testdir(name, 'comp.stderr'),
in_testdir(name, 'run.stderr'))
# check the exit code
if exit_code != getTestOpts().exit_code:
print('Wrong exit code (expected', getTestOpts().exit_code, ', actual', exit_code, ')')
dump_stdout(name)
dump_stderr(name)
return failBecause('bad exit code')
# ToDo: if the sub-shell was killed by ^C, then exit
if not (opts.ignore_stderr or stderr_ok(name, way)):
return failBecause('bad stderr')
elif not (opts.ignore_stdout or stdout_ok(name, way)):
return failBecause('bad stdout')
else:
return passed()
def split_file(in_fn, delimiter, out1_fn, out2_fn):
# See Note [Universal newlines].
with io.open(in_fn, 'r', encoding='utf8', errors='replace', newline=None) as infile:
with io.open(out1_fn, 'w', encoding='utf8', newline='') as out1:
with io.open(out2_fn, 'w', encoding='utf8', newline='') as out2:
line = infile.readline()
while re.sub('^\s*','',line) != delimiter and line != '':
out1.write(line)
line = infile.readline()
line = infile.readline()
while line != '':
out2.write(line)
line = infile.readline()
# -----------------------------------------------------------------------------
# Utils
def get_compiler_flags():
opts = getTestOpts()
flags = copy.copy(opts.compiler_always_flags)
flags.append(opts.extra_hc_opts)
if opts.outputdir != None:
flags.extend(["-outputdir", opts.outputdir])
return flags
def stdout_ok(name, way):
actual_stdout_file = add_suffix(name, 'run.stdout')
expected_stdout_file = find_expected_file(name, 'stdout')
extra_norm = join_normalisers(normalise_output, getTestOpts().extra_normaliser)
check_stdout = getTestOpts().check_stdout
if check_stdout:
actual_stdout_path = in_testdir(actual_stdout_file)
return check_stdout(actual_stdout_path, extra_norm)
return compare_outputs(way, 'stdout', extra_norm,
expected_stdout_file, actual_stdout_file)
def dump_stdout( name ):
print('Stdout:')
with open(in_testdir(name, 'run.stdout')) as f:
print(f.read())
def stderr_ok(name, way):
actual_stderr_file = add_suffix(name, 'run.stderr')
expected_stderr_file = find_expected_file(name, 'stderr')
return compare_outputs(way, 'stderr',
join_normalisers(normalise_errmsg, getTestOpts().extra_errmsg_normaliser), \
expected_stderr_file, actual_stderr_file,
whitespace_normaliser=normalise_whitespace)
def dump_stderr( name ):
print("Stderr:")
with open(in_testdir(name, 'run.stderr')) as f:
print(f.read())
def read_no_crs(file):
str = ''
try:
# See Note [Universal newlines].
with io.open(file, 'r', encoding='utf8', errors='replace', newline=None) as h:
str = h.read()
except:
# On Windows, if the program fails very early, it seems the
# files stdout/stderr are redirected to may not get created
pass
return str
def write_file(file, str):
# See Note [Universal newlines].
with io.open(file, 'w', encoding='utf8', newline='') as h:
h.write(str)
# Note [Universal newlines]
#
# We don't want to write any Windows style line endings ever, because
# it would mean that `make accept` would touch every line of the file
# when switching between Linux and Windows.
#
# Furthermore, when reading a file, it is convenient to translate all
# Windows style endings to '\n', as it simplifies searching or massaging
# the content.
#
# Solution: use `io.open` instead of `open`
# * when reading: use newline=None to translate '\r\n' to '\n'
# * when writing: use newline='' to not translate '\n' to '\r\n'
#
# See https://docs.python.org/2/library/io.html#io.open.
#
# This should work with both python2 and python3, and with both mingw*
# as msys2 style Python.
#
# Do note that io.open returns unicode strings. So we have to specify
# the expected encoding. But there is at least one file which is not
# valid utf8 (decodingerror002.stdout). Solution: use errors='replace'.
# Another solution would be to open files in binary mode always, and
# operate on bytes.
def check_hp_ok(name):
opts = getTestOpts()
# do not qualify for hp2ps because we should be in the right directory
hp2psCmd = 'cd "{opts.testdir}" && {{hp2ps}} {name}'.format(**locals())
hp2psResult = runCmd(hp2psCmd)
actual_ps_path = in_testdir(name, 'ps')
if hp2psResult == 0:
if os.path.exists(actual_ps_path):
if gs_working:
gsResult = runCmd(genGSCmd(actual_ps_path))
if (gsResult == 0):
return (True)
else:
print("hp2ps output for " + name + "is not valid PostScript")
else: return (True) # assume postscript is valid without ghostscript
else:
print("hp2ps did not generate PostScript for " + name)
return (False)
else:
print("hp2ps error when processing heap profile for " + name)
return(False)
def check_prof_ok(name, way):
expected_prof_file = find_expected_file(name, 'prof.sample')
expected_prof_path = in_testdir(expected_prof_file)
# Check actual prof file only if we have an expected prof file to
# compare it with.
if not os.path.exists(expected_prof_path):
return True
actual_prof_file = add_suffix(name, 'prof')
actual_prof_path = in_testdir(actual_prof_file)
if not os.path.exists(actual_prof_path):
print(actual_prof_path + " does not exist")
return(False)
if os.path.getsize(actual_prof_path) == 0:
print(actual_prof_path + " is empty")
return(False)
return compare_outputs(way, 'prof', normalise_prof,
expected_prof_file, actual_prof_file,
whitespace_normaliser=normalise_whitespace)
# Compare expected output to actual output, and optionally accept the
# new output. Returns true if output matched or was accepted, false
# otherwise. See Note [Output comparison] for the meaning of the
# normaliser and whitespace_normaliser parameters.
def compare_outputs(way, kind, normaliser, expected_file, actual_file,
whitespace_normaliser=lambda x:x):
expected_path = in_srcdir(expected_file)
actual_path = in_testdir(actual_file)
if os.path.exists(expected_path):
expected_str = normaliser(read_no_crs(expected_path))
# Create the .normalised file in the testdir, not in the srcdir.
expected_normalised_file = add_suffix(expected_file, 'normalised')
expected_normalised_path = in_testdir(expected_normalised_file)
else:
expected_str = ''
expected_normalised_path = '/dev/null'
actual_raw = read_no_crs(actual_path)
actual_str = normaliser(actual_raw)
# See Note [Output comparison].
if whitespace_normaliser(expected_str) == whitespace_normaliser(actual_str):
return 1
else:
if config.verbose >= 1 and _expect_pass(way):
print('Actual ' + kind + ' output differs from expected:')
if expected_normalised_path != '/dev/null':
write_file(expected_normalised_path, expected_str)
actual_normalised_path = add_suffix(actual_path, 'normalised')
write_file(actual_normalised_path, actual_str)
if config.verbose >= 1 and _expect_pass(way):
# See Note [Output comparison].
r = os.system('diff -uw "{0}" "{1}"'.format(expected_normalised_path,
actual_normalised_path))
# If for some reason there were no non-whitespace differences,
# then do a full diff
if r == 0:
r = os.system('diff -u "{0}" "{1}"'.format(expected_normalised_path,
actual_normalised_path))
if config.accept and (getTestOpts().expect == 'fail' or
way in getTestOpts().expect_fail_for):
if_verbose(1, 'Test is expected to fail. Not accepting new output.')
return 0
elif config.accept and actual_raw:
if_verbose(1, 'Accepting new output.')
write_file(expected_path, actual_raw)
return 1
elif config.accept:
if_verbose(1, 'No output. Deleting "{0}".'.format(expected_path))
os.remove(expected_path)
return 1
else:
return 0
# Note [Output comparison]
#
# We do two types of output comparison:
#
# 1. To decide whether a test has failed. We apply a `normaliser` and an
# optional `whitespace_normaliser` to the expected and the actual
# output, before comparing the two.
#
# 2. To show as a diff to the user when the test indeed failed. We apply
# the same `normaliser` function to the outputs, to make the diff as
# small as possible (only showing the actual problem). But we don't
# apply the `whitespace_normaliser` here, because it might completely
# squash all whitespace, making the diff unreadable. Instead we rely
# on the `diff` program to ignore whitespace changes as much as
# possible (#10152).
def normalise_whitespace( str ):
# Merge contiguous whitespace characters into a single space.
return ' '.join(w for w in str.split())
callSite_re = re.compile(r', called at (.+):[\d]+:[\d]+ in [\w\-\.]+:')
def normalise_callstacks(s):
opts = getTestOpts()
def repl(matches):
location = matches.group(1)
location = normalise_slashes_(location)
return ', called at {0}:<line>:<column> in <package-id>:'.format(location)
# Ignore line number differences in call stacks (#10834).
s = re.sub(callSite_re, repl, s)
# Ignore the change in how we identify implicit call-stacks
s = s.replace('from ImplicitParams', 'from HasCallStack')
if not opts.keep_prof_callstacks:
# Don't output prof callstacks. Test output should be
# independent from the WAY we run the test.
s = re.sub(r'CallStack \(from -prof\):(\n .*)*\n?', '', s)
return s
tyCon_re = re.compile(r'TyCon\s*\d+L?\#\#\s*\d+L?\#\#\s*', flags=re.MULTILINE)
def normalise_type_reps(str):
""" Normalise out fingerprints from Typeable TyCon representations """
return re.sub(tyCon_re, 'TyCon FINGERPRINT FINGERPRINT ', str)
def normalise_errmsg( str ):
"""Normalise error-messages emitted via stderr"""
# IBM AIX's `ld` is a bit chatty
if opsys('aix'):
str = str.replace('ld: 0706-027 The -x flag is ignored.\n', '')
# remove " error:" and lower-case " Warning:" to make patch for
# trac issue #10021 smaller
str = modify_lines(str, lambda l: re.sub(' error:', '', l))
str = modify_lines(str, lambda l: re.sub(' Warning:', ' warning:', l))
str = normalise_callstacks(str)
str = normalise_type_reps(str)
# If somefile ends in ".exe" or ".exe:", zap ".exe" (for Windows)
# the colon is there because it appears in error messages; this
# hacky solution is used in place of more sophisticated filename
# mangling
str = re.sub('([^\\s])\\.exe', '\\1', str)
# normalise slashes, minimise Windows/Unix filename differences
str = re.sub('\\\\', '/', str)
# The inplace ghc's are called ghc-stage[123] to avoid filename
# collisions, so we need to normalise that to just "ghc"
str = re.sub('ghc-stage[123]', 'ghc', str)
# Error messages simetimes contain integer implementation package
str = re.sub('integer-(gmp|simple)-[0-9.]+', 'integer-<IMPL>-<VERSION>', str)
# Also filter out bullet characters. This is because bullets are used to
# separate error sections, and tests shouldn't be sensitive to how the
# the division happens.
bullet = '•'.encode('utf8') if isinstance(str, bytes) else '•'
str = str.replace(bullet, '')
return str
# normalise a .prof file, so that we can reasonably compare it against
# a sample. This doesn't compare any of the actual profiling data,
# only the shape of the profile and the number of entries.
def normalise_prof (str):
# strip everything up to the line beginning "COST CENTRE"
str = re.sub('^(.*\n)*COST CENTRE[^\n]*\n','',str)
# strip results for CAFs, these tend to change unpredictably
str = re.sub('[ \t]*(CAF|IDLE).*\n','',str)
# XXX Ignore Main.main. Sometimes this appears under CAF, and
# sometimes under MAIN.
str = re.sub('[ \t]*main[ \t]+Main.*\n','',str)
# We have something like this:
#
# MAIN MAIN <built-in> 53 0 0.0 0.2 0.0 100.0
# CAF Main <entire-module> 105 0 0.0 0.3 0.0 62.5
# readPrec Main Main_1.hs:7:13-16 109 1 0.0 0.6 0.0 0.6
# readPrec Main Main_1.hs:4:13-16 107 1 0.0 0.6 0.0 0.6
# main Main Main_1.hs:(10,1)-(20,20) 106 1 0.0 20.2 0.0 61.0
# == Main Main_1.hs:7:25-26 114 1 0.0 0.0 0.0 0.0
# == Main Main_1.hs:4:25-26 113 1 0.0 0.0 0.0 0.0
# showsPrec Main Main_1.hs:7:19-22 112 2 0.0 1.2 0.0 1.2
# showsPrec Main Main_1.hs:4:19-22 111 2 0.0 0.9 0.0 0.9
# readPrec Main Main_1.hs:7:13-16 110 0 0.0 18.8 0.0 18.8
# readPrec Main Main_1.hs:4:13-16 108 0 0.0 19.9 0.0 19.9
#
# then we remove all the specific profiling data, leaving only the cost
# centre name, module, src, and entries, to end up with this: (modulo
# whitespace between columns)
#
# MAIN MAIN <built-in> 0
# readPrec Main Main_1.hs:7:13-16 1
# readPrec Main Main_1.hs:4:13-16 1
# == Main Main_1.hs:7:25-26 1
# == Main Main_1.hs:4:25-26 1
# showsPrec Main Main_1.hs:7:19-22 2
# showsPrec Main Main_1.hs:4:19-22 2
# readPrec Main Main_1.hs:7:13-16 0
# readPrec Main Main_1.hs:4:13-16 0
# Split 9 whitespace-separated groups, take columns 1 (cost-centre), 2
# (module), 3 (src), and 5 (entries). SCC names can't have whitespace, so
# this works fine.
str = re.sub(r'\s*(\S+)\s*(\S+)\s*(\S+)\s*(\S+)\s*(\S+)\s*(\S+)\s*(\S+)\s*(\S+)\s*(\S+)\s*',
'\\1 \\2 \\3 \\5\n', str)
return str
def normalise_slashes_( str ):
str = re.sub('\\\\', '/', str)
return str
def normalise_exe_( str ):
str = re.sub('\.exe', '', str)
return str
def normalise_output( str ):
# remove " error:" and lower-case " Warning:" to make patch for
# trac issue #10021 smaller
str = modify_lines(str, lambda l: re.sub(' error:', '', l))
str = modify_lines(str, lambda l: re.sub(' Warning:', ' warning:', l))
# Remove a .exe extension (for Windows)
# This can occur in error messages generated by the program.
str = re.sub('([^\\s])\\.exe', '\\1', str)
str = normalise_callstacks(str)
str = normalise_type_reps(str)
return str
def normalise_asm( str ):
lines = str.split('\n')
# Only keep instructions and labels not starting with a dot.
metadata = re.compile('^[ \t]*\\..*$')
out = []
for line in lines:
# Drop metadata directives (e.g. ".type")
if not metadata.match(line):
line = re.sub('@plt', '', line)
instr = line.lstrip().split()
# Drop empty lines.
if not instr:
continue
# Drop operands, except for call instructions.
elif instr[0] == 'call':
out.append(instr[0] + ' ' + instr[1])
else:
out.append(instr[0])
out = '\n'.join(out)
return out
def if_verbose( n, s ):
if config.verbose >= n:
print(s)
def if_verbose_dump( n, f ):
if config.verbose >= n:
try:
with io.open(f) as file:
print(file.read())
except:
print('')
def runCmd(cmd, stdin=None, stdout=None, stderr=None, timeout_multiplier=1.0):
timeout_prog = strip_quotes(config.timeout_prog)
timeout = str(int(ceil(config.timeout * timeout_multiplier)))
# Format cmd using config. Example: cmd='{hpc} report A.tix'
cmd = cmd.format(**config.__dict__)
if_verbose(3, cmd + ('< ' + os.path.basename(stdin) if stdin else ''))
# declare the buffers to a default
stdin_buffer = None
# ***** IMPORTANT *****
# We have to treat input and output as
# just binary data here. Don't try to decode
# it to a string, since we have tests that actually
# feed malformed utf-8 to see how GHC handles it.
if stdin:
with io.open(stdin, 'rb') as f:
stdin_buffer = f.read()
stdout_buffer = ''
stderr_buffer = ''
hStdErr = subprocess.PIPE
if stderr is subprocess.STDOUT:
hStdErr = subprocess.STDOUT
try:
# cmd is a complex command in Bourne-shell syntax
# e.g (cd . && 'C:/users/simonpj/HEAD/inplace/bin/ghc-stage2' ...etc)
# Hence it must ultimately be run by a Bourne shell. It's timeout's job
# to invoke the Bourne shell
r = subprocess.Popen([timeout_prog, timeout, cmd],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=hStdErr)
stdout_buffer, stderr_buffer = r.communicate(stdin_buffer)
except Exception as e:
traceback.print_exc()
framework_fail(name, way, str(e))
finally:
try:
if stdout:
with io.open(stdout, 'ab') as f:
f.write(stdout_buffer)
if stderr:
if stderr is not subprocess.STDOUT:
with io.open(stderr, 'ab') as f:
f.write(stderr_buffer)
except Exception as e:
traceback.print_exc()
framework_fail(name, way, str(e))
if r.returncode == 98:
# The python timeout program uses 98 to signal that ^C was pressed
stopNow()
if r.returncode == 99 and getTestOpts().exit_code != 99:
# Only print a message when timeout killed the process unexpectedly.
if_verbose(1, 'Timeout happened...killed process "{0}"...\n'.format(cmd))
return r.returncode
# -----------------------------------------------------------------------------
# checking if ghostscript is available for checking the output of hp2ps
def genGSCmd(psfile):
return '{{gs}} -dNODISPLAY -dBATCH -dQUIET -dNOPAUSE "{0}"'.format(psfile)
def gsNotWorking():
global gs_working
print("GhostScript not available for hp2ps tests")
global gs_working
gs_working = 0
if config.have_profiling:
if config.gs != '':
resultGood = runCmd(genGSCmd(config.confdir + '/good.ps'));
if resultGood == 0:
resultBad = runCmd(genGSCmd(config.confdir + '/bad.ps') +
' >/dev/null 2>&1')
if resultBad != 0:
print("GhostScript available for hp2ps tests")
gs_working = 1;
else:
gsNotWorking();
else:
gsNotWorking();
else:
gsNotWorking();
def add_suffix( name, suffix ):
if suffix == '':
return name
else:
return name + '.' + suffix
def add_hs_lhs_suffix(name):
if getTestOpts().c_src:
return add_suffix(name, 'c')
elif getTestOpts().cmm_src:
return add_suffix(name, 'cmm')
elif getTestOpts().objc_src:
return add_suffix(name, 'm')
elif getTestOpts().objcpp_src:
return add_suffix(name, 'mm')
elif getTestOpts().literate:
return add_suffix(name, 'lhs')
else:
return add_suffix(name, 'hs')
def replace_suffix( name, suffix ):
base, suf = os.path.splitext(name)
return base + '.' + suffix
def in_testdir(name, suffix=''):
return os.path.join(getTestOpts().testdir, add_suffix(name, suffix))
def in_srcdir(name, suffix=''):
return os.path.join(getTestOpts().srcdir, add_suffix(name, suffix))
# Finding the sample output. The filename is of the form
#
# <test>.stdout[-ws-<wordsize>][-<platform>]
#
def find_expected_file(name, suff):
basename = add_suffix(name, suff)
files = [basename + ws + plat
for plat in ['-' + config.platform, '-' + config.os, '']
for ws in ['-ws-' + config.wordsize, '']]
for f in files:
if os.path.exists(in_srcdir(f)):
return f
return basename
if config.msys:
import stat
def cleanup():
def on_error(function, path, excinfo):
# At least one test (T11489) removes the write bit from a file it
# produces. Windows refuses to delete read-only files with a
# permission error. Try setting the write bit and try again.
if excinfo[1].errno == 13:
os.chmod(path, stat.S_IWRITE)
os.unlink(path)
testdir = getTestOpts().testdir
shutil.rmtree(testdir, ignore_errors=False, onerror=on_error)
else:
def cleanup():
testdir = getTestOpts().testdir
if os.path.exists(testdir):
shutil.rmtree(testdir, ignore_errors=False)
# -----------------------------------------------------------------------------
# Return a list of all the files ending in '.T' below directories roots.
def findTFiles(roots):
for root in roots:
for path, dirs, files in os.walk(root, topdown=True):
# Never pick up .T files in uncleaned .run directories.
dirs[:] = [dir for dir in sorted(dirs)
if not dir.endswith(testdir_suffix)]
for filename in files:
if filename.endswith('.T'):
yield os.path.join(path, filename)
# -----------------------------------------------------------------------------
# Output a test summary to the specified file object
def summary(t, file, short=False):
file.write('\n')
printUnexpectedTests(file,
[t.unexpected_passes, t.unexpected_failures,
t.unexpected_stat_failures, t.framework_failures])
if short:
# Only print the list of unexpected tests above.
return
file.write('SUMMARY for test run started at '
+ time.strftime("%c %Z", t.start_time) + '\n'
+ str(datetime.timedelta(seconds=
round(time.time() - time.mktime(t.start_time)))).rjust(8)
+ ' spent to go through\n'
+ repr(t.total_tests).rjust(8)
+ ' total tests, which gave rise to\n'
+ repr(t.total_test_cases).rjust(8)
+ ' test cases, of which\n'
+ repr(t.n_tests_skipped).rjust(8)
+ ' were skipped\n'
+ '\n'
+ repr(len(t.missing_libs)).rjust(8)
+ ' had missing libraries\n'
+ repr(t.n_expected_passes).rjust(8)
+ ' expected passes\n'
+ repr(t.n_expected_failures).rjust(8)
+ ' expected failures\n'
+ '\n'
+ repr(len(t.framework_failures)).rjust(8)
+ ' caused framework failures\n'
+ repr(len(t.unexpected_passes)).rjust(8)
+ ' unexpected passes\n'
+ repr(len(t.unexpected_failures)).rjust(8)
+ ' unexpected failures\n'
+ repr(len(t.unexpected_stat_failures)).rjust(8)
+ ' unexpected stat failures\n'
+ '\n')
if t.unexpected_passes:
file.write('Unexpected passes:\n')
printTestInfosSummary(file, t.unexpected_passes)
if t.unexpected_failures:
file.write('Unexpected failures:\n')
printTestInfosSummary(file, t.unexpected_failures)
if t.unexpected_stat_failures:
file.write('Unexpected stat failures:\n')
printTestInfosSummary(file, t.unexpected_stat_failures)
if t.framework_failures:
file.write('Framework failures:\n')
printTestInfosSummary(file, t.framework_failures)
if stopping():
file.write('WARNING: Testsuite run was terminated early\n')
def printUnexpectedTests(file, testInfoss):
unexpected = set(name for testInfos in testInfoss
for (_, name, _, _) in testInfos
if not name.endswith('.T'))
if unexpected:
file.write('Unexpected results from:\n')
file.write('TEST="' + ' '.join(unexpected) + '"\n')
file.write('\n')
def printTestInfosSummary(file, testInfos):
maxDirLen = max(len(directory) for (directory, _, _, _) in testInfos)
for (directory, name, reason, way) in testInfos:
directory = directory.ljust(maxDirLen)
file.write(' {directory} {name} [{reason}] ({way})\n'.format(**locals()))
file.write('\n')
def modify_lines(s, f):
s = '\n'.join([f(l) for l in s.splitlines()])
if s and s[-1] != '\n':
# Prevent '\ No newline at end of file' warnings when diffing.
s += '\n'
return s
| bsd-3-clause |
mancoast/CPythonPyc_test | fail/340_test_imghdr.py | 87 | 4413 | import imghdr
import io
import os
import unittest
import warnings
from test.support import findfile, TESTFN, unlink
TEST_FILES = (
('python.png', 'png'),
('python.gif', 'gif'),
('python.bmp', 'bmp'),
('python.ppm', 'ppm'),
('python.pgm', 'pgm'),
('python.pbm', 'pbm'),
('python.jpg', 'jpeg'),
('python.ras', 'rast'),
('python.sgi', 'rgb'),
('python.tiff', 'tiff'),
('python.xbm', 'xbm')
)
class UnseekableIO(io.FileIO):
def tell(self):
raise io.UnsupportedOperation
def seek(self, *args, **kwargs):
raise io.UnsupportedOperation
class TestImghdr(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.testfile = findfile('python.png', subdir='imghdrdata')
with open(cls.testfile, 'rb') as stream:
cls.testdata = stream.read()
def tearDown(self):
unlink(TESTFN)
def test_data(self):
for filename, expected in TEST_FILES:
filename = findfile(filename, subdir='imghdrdata')
self.assertEqual(imghdr.what(filename), expected)
with open(filename, 'rb') as stream:
self.assertEqual(imghdr.what(stream), expected)
with open(filename, 'rb') as stream:
data = stream.read()
self.assertEqual(imghdr.what(None, data), expected)
self.assertEqual(imghdr.what(None, bytearray(data)), expected)
def test_register_test(self):
def test_jumbo(h, file):
if h.startswith(b'eggs'):
return 'ham'
imghdr.tests.append(test_jumbo)
self.addCleanup(imghdr.tests.pop)
self.assertEqual(imghdr.what(None, b'eggs'), 'ham')
def test_file_pos(self):
with open(TESTFN, 'wb') as stream:
stream.write(b'ababagalamaga')
pos = stream.tell()
stream.write(self.testdata)
with open(TESTFN, 'rb') as stream:
stream.seek(pos)
self.assertEqual(imghdr.what(stream), 'png')
self.assertEqual(stream.tell(), pos)
def test_bad_args(self):
with self.assertRaises(TypeError):
imghdr.what()
with self.assertRaises(AttributeError):
imghdr.what(None)
with self.assertRaises(TypeError):
imghdr.what(self.testfile, 1)
with self.assertRaises(AttributeError):
imghdr.what(os.fsencode(self.testfile))
with open(self.testfile, 'rb') as f:
with self.assertRaises(AttributeError):
imghdr.what(f.fileno())
def test_invalid_headers(self):
for header in (b'\211PN\r\n',
b'\001\331',
b'\x59\xA6',
b'cutecat',
b'000000JFI',
b'GIF80'):
self.assertIsNone(imghdr.what(None, header))
def test_string_data(self):
with warnings.catch_warnings():
warnings.simplefilter("ignore", BytesWarning)
for filename, _ in TEST_FILES:
filename = findfile(filename, subdir='imghdrdata')
with open(filename, 'rb') as stream:
data = stream.read().decode('latin1')
with self.assertRaises(TypeError):
imghdr.what(io.StringIO(data))
with self.assertRaises(TypeError):
imghdr.what(None, data)
def test_missing_file(self):
with self.assertRaises(FileNotFoundError):
imghdr.what('missing')
def test_closed_file(self):
stream = open(self.testfile, 'rb')
stream.close()
with self.assertRaises(ValueError) as cm:
imghdr.what(stream)
stream = io.BytesIO(self.testdata)
stream.close()
with self.assertRaises(ValueError) as cm:
imghdr.what(stream)
def test_unseekable(self):
with open(TESTFN, 'wb') as stream:
stream.write(self.testdata)
with UnseekableIO(TESTFN, 'rb') as stream:
with self.assertRaises(io.UnsupportedOperation):
imghdr.what(stream)
def test_output_stream(self):
with open(TESTFN, 'wb') as stream:
stream.write(self.testdata)
stream.seek(0)
with self.assertRaises(OSError) as cm:
imghdr.what(stream)
if __name__ == '__main__':
unittest.main()
| gpl-3.0 |
ibazzi/rk3288-kernel | tools/perf/scripts/python/Perf-Trace-Util/lib/Perf/Trace/EventClass.py | 4653 | 3596 | # EventClass.py
#
# This is a library defining some events types classes, which could
# be used by other scripts to analyzing the perf samples.
#
# Currently there are just a few classes defined for examples,
# PerfEvent is the base class for all perf event sample, PebsEvent
# is a HW base Intel x86 PEBS event, and user could add more SW/HW
# event classes based on requirements.
import struct
# Event types, user could add more here
EVTYPE_GENERIC = 0
EVTYPE_PEBS = 1 # Basic PEBS event
EVTYPE_PEBS_LL = 2 # PEBS event with load latency info
EVTYPE_IBS = 3
#
# Currently we don't have good way to tell the event type, but by
# the size of raw buffer, raw PEBS event with load latency data's
# size is 176 bytes, while the pure PEBS event's size is 144 bytes.
#
def create_event(name, comm, dso, symbol, raw_buf):
if (len(raw_buf) == 144):
event = PebsEvent(name, comm, dso, symbol, raw_buf)
elif (len(raw_buf) == 176):
event = PebsNHM(name, comm, dso, symbol, raw_buf)
else:
event = PerfEvent(name, comm, dso, symbol, raw_buf)
return event
class PerfEvent(object):
event_num = 0
def __init__(self, name, comm, dso, symbol, raw_buf, ev_type=EVTYPE_GENERIC):
self.name = name
self.comm = comm
self.dso = dso
self.symbol = symbol
self.raw_buf = raw_buf
self.ev_type = ev_type
PerfEvent.event_num += 1
def show(self):
print "PMU event: name=%12s, symbol=%24s, comm=%8s, dso=%12s" % (self.name, self.symbol, self.comm, self.dso)
#
# Basic Intel PEBS (Precise Event-based Sampling) event, whose raw buffer
# contains the context info when that event happened: the EFLAGS and
# linear IP info, as well as all the registers.
#
class PebsEvent(PerfEvent):
pebs_num = 0
def __init__(self, name, comm, dso, symbol, raw_buf, ev_type=EVTYPE_PEBS):
tmp_buf=raw_buf[0:80]
flags, ip, ax, bx, cx, dx, si, di, bp, sp = struct.unpack('QQQQQQQQQQ', tmp_buf)
self.flags = flags
self.ip = ip
self.ax = ax
self.bx = bx
self.cx = cx
self.dx = dx
self.si = si
self.di = di
self.bp = bp
self.sp = sp
PerfEvent.__init__(self, name, comm, dso, symbol, raw_buf, ev_type)
PebsEvent.pebs_num += 1
del tmp_buf
#
# Intel Nehalem and Westmere support PEBS plus Load Latency info which lie
# in the four 64 bit words write after the PEBS data:
# Status: records the IA32_PERF_GLOBAL_STATUS register value
# DLA: Data Linear Address (EIP)
# DSE: Data Source Encoding, where the latency happens, hit or miss
# in L1/L2/L3 or IO operations
# LAT: the actual latency in cycles
#
class PebsNHM(PebsEvent):
pebs_nhm_num = 0
def __init__(self, name, comm, dso, symbol, raw_buf, ev_type=EVTYPE_PEBS_LL):
tmp_buf=raw_buf[144:176]
status, dla, dse, lat = struct.unpack('QQQQ', tmp_buf)
self.status = status
self.dla = dla
self.dse = dse
self.lat = lat
PebsEvent.__init__(self, name, comm, dso, symbol, raw_buf, ev_type)
PebsNHM.pebs_nhm_num += 1
del tmp_buf
| gpl-2.0 |
shivam1111/odoo | addons/account/wizard/account_report_general_ledger.py | 267 | 3191 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv
class account_report_general_ledger(osv.osv_memory):
_inherit = "account.common.account.report"
_name = "account.report.general.ledger"
_description = "General Ledger Report"
_columns = {
'landscape': fields.boolean("Landscape Mode"),
'initial_balance': fields.boolean('Include Initial Balances',
help='If you selected to filter by date or period, this field allow you to add a row to display the amount of debit/credit/balance that precedes the filter you\'ve set.'),
'amount_currency': fields.boolean("With Currency", help="It adds the currency column on report if the currency differs from the company currency."),
'sortby': fields.selection([('sort_date', 'Date'), ('sort_journal_partner', 'Journal & Partner')], 'Sort by', required=True),
'journal_ids': fields.many2many('account.journal', 'account_report_general_ledger_journal_rel', 'account_id', 'journal_id', 'Journals', required=True),
}
_defaults = {
'landscape': True,
'amount_currency': True,
'sortby': 'sort_date',
'initial_balance': False,
}
def onchange_fiscalyear(self, cr, uid, ids, fiscalyear=False, context=None):
res = {}
if not fiscalyear:
res['value'] = {'initial_balance': False}
return res
def _print_report(self, cr, uid, ids, data, context=None):
if context is None:
context = {}
data = self.pre_print_report(cr, uid, ids, data, context=context)
data['form'].update(self.read(cr, uid, ids, ['landscape', 'initial_balance', 'amount_currency', 'sortby'])[0])
if not data['form']['fiscalyear_id']:# GTK client problem onchange does not consider in save record
data['form'].update({'initial_balance': False})
if data['form']['landscape'] is False:
data['form'].pop('landscape')
else:
context['landscape'] = data['form']['landscape']
return self.pool['report'].get_action(cr, uid, [], 'account.report_generalledger', data=data, context=context)
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
mezz64/home-assistant | homeassistant/components/zha/device_tracker.py | 9 | 3321 | """Support for the ZHA platform."""
import functools
import time
from homeassistant.components.device_tracker import DOMAIN, SOURCE_TYPE_ROUTER
from homeassistant.components.device_tracker.config_entry import ScannerEntity
from homeassistant.core import callback
from homeassistant.helpers.dispatcher import async_dispatcher_connect
from .core import discovery
from .core.const import (
CHANNEL_POWER_CONFIGURATION,
DATA_ZHA,
DATA_ZHA_DISPATCHERS,
SIGNAL_ADD_ENTITIES,
SIGNAL_ATTR_UPDATED,
)
from .core.registries import ZHA_ENTITIES
from .entity import ZhaEntity
from .sensor import Battery
STRICT_MATCH = functools.partial(ZHA_ENTITIES.strict_match, DOMAIN)
async def async_setup_entry(hass, config_entry, async_add_entities):
"""Set up the Zigbee Home Automation device tracker from config entry."""
entities_to_create = hass.data[DATA_ZHA][DOMAIN]
unsub = async_dispatcher_connect(
hass,
SIGNAL_ADD_ENTITIES,
functools.partial(
discovery.async_add_entities, async_add_entities, entities_to_create
),
)
hass.data[DATA_ZHA][DATA_ZHA_DISPATCHERS].append(unsub)
@STRICT_MATCH(channel_names=CHANNEL_POWER_CONFIGURATION)
class ZHADeviceScannerEntity(ScannerEntity, ZhaEntity):
"""Represent a tracked device."""
def __init__(self, unique_id, zha_device, channels, **kwargs):
"""Initialize the ZHA device tracker."""
super().__init__(unique_id, zha_device, channels, **kwargs)
self._battery_channel = self.cluster_channels.get(CHANNEL_POWER_CONFIGURATION)
self._connected = False
self._keepalive_interval = 60
self._should_poll = True
self._battery_level = None
async def async_added_to_hass(self):
"""Run when about to be added to hass."""
await super().async_added_to_hass()
if self._battery_channel:
self.async_accept_signal(
self._battery_channel,
SIGNAL_ATTR_UPDATED,
self.async_battery_percentage_remaining_updated,
)
async def async_update(self):
"""Handle polling."""
if self.zha_device.last_seen is None:
self._connected = False
else:
difference = time.time() - self.zha_device.last_seen
if difference > self._keepalive_interval:
self._connected = False
else:
self._connected = True
@property
def is_connected(self):
"""Return true if the device is connected to the network."""
return self._connected
@property
def source_type(self):
"""Return the source type, eg gps or router, of the device."""
return SOURCE_TYPE_ROUTER
@callback
def async_battery_percentage_remaining_updated(self, attr_id, attr_name, value):
"""Handle tracking."""
if not attr_name == "battery_percentage_remaining":
return
self.debug("battery_percentage_remaining updated: %s", value)
self._connected = True
self._battery_level = Battery.formatter(value)
self.async_write_ha_state()
@property
def battery_level(self):
"""Return the battery level of the device.
Percentage from 0-100.
"""
return self._battery_level
| apache-2.0 |
ErickMurillo/ciat_plataforma | guias_cacao/admin.py | 2 | 21034 | # -*- coding: utf-8 -*-
from django.contrib import admin
from .forms import *
from .models import *
from import_export.admin import ImportExportModelAdmin
class Foto1Inline(admin.TabularInline):
model = Foto1
extra = 1
max_num = 1
class Punto1Inline(admin.TabularInline):
model = Punto1
extra = 1
max_num = 12
class Cobertura1Inline(admin.TabularInline):
model = Cobertura1
extra = 1
max_num = 1
#----------------------------------------------------------------
class Foto2Inline(admin.TabularInline):
model = Foto2
extra = 1
max_num = 1
class Punto2Inline(admin.TabularInline):
model = Punto2
extra = 1
max_num = 12
class Cobertura2Inline(admin.TabularInline):
model = Cobertura2
extra = 1
max_num = 1
#----------------------------------------------------------------
class Foto3Inline(admin.TabularInline):
model = Foto3
extra = 1
max_num = 1
class Punto3Inline(admin.TabularInline):
model = Punto3
extra = 1
max_num = 12
class Cobertura3Inline(admin.TabularInline):
model = Cobertura3
extra = 1
max_num = 1
#----------------------------------------
class AnalisisSombraInline(admin.TabularInline):
model = AnalisisSombra
extra = 1
max_num = 1
class AccionesSombraInline(admin.TabularInline):
model = AccionesSombra
extra = 1
max_num = 1
class ReducirSombraInline(admin.TabularInline):
model = ReducirSombra
extra = 1
max_num = 1
class AumentarSombraInline(admin.TabularInline):
model = AumentarSombra
extra = 1
max_num = 1
class ManejoSombraInline(admin.TabularInline):
model = ManejoSombra
extra = 1
max_num = 1
class FichaSombraAdmin(admin.ModelAdmin):
form = ProductorSombraAdminForm
inlines = [Foto1Inline,Punto1Inline,Cobertura1Inline,
Foto2Inline,Punto2Inline,Cobertura2Inline,
Foto3Inline,Punto3Inline,Cobertura3Inline,
AnalisisSombraInline,AccionesSombraInline,
ReducirSombraInline,AumentarSombraInline,
ManejoSombraInline]
list_display = ('fecha_visita', 'productor', 'tecnico',)
search_fields = ('productor__nombre',)
date_hierarchy = 'fecha_visita'
class Media:
css = {
'all': ('monitoreo/css/adminSombra.css',)
}
js = ('monitoreo/js/fichaSombra.js',)
# Register your models here.
admin.site.register(FichaSombra, FichaSombraAdmin)
#--------------------- admin ficha poda ---------------------------
class Punto1AInline(admin.TabularInline):
model = Punto1A
extra = 1
max_num = 2
class Punto1BInline(admin.TabularInline):
model = Punto1B
extra = 1
max_num = 7
class Punto1CInline(admin.TabularInline):
model = Punto1C
extra = 1
max_num = 1
class Punto2AInline(admin.TabularInline):
model = Punto2A
extra = 1
max_num = 2
class Punto2BInline(admin.TabularInline):
model = Punto2B
extra = 1
max_num = 7
class Punto2CInline(admin.TabularInline):
model = Punto2C
extra = 1
max_num = 1
class Punto3AInline(admin.TabularInline):
model = Punto3A
extra = 1
max_num = 2
class Punto3BInline(admin.TabularInline):
model = Punto3B
extra = 1
max_num = 7
class Punto3CInline(admin.TabularInline):
model = Punto3C
extra = 1
max_num = 1
class AnalisisPodaInline(admin.TabularInline):
model = AnalisisPoda
extra = 1
max_num = 1
class ManejoPodaInline(admin.TabularInline):
model = ManejoPoda
extra = 1
max_num = 1
class FichaPodaAdmin(admin.ModelAdmin):
form = ProductorPodaAdminForm
inlines = [Punto1AInline, Punto1BInline, Punto1CInline,
Punto2AInline, Punto2BInline, Punto2CInline,
Punto3AInline, Punto3BInline, Punto3CInline,
AnalisisPodaInline, ManejoPodaInline]
list_display = ('fecha_visita', 'productor', 'tecnico',)
search_fields = ('productor__nombre',)
date_hierarchy = 'fecha_visita'
class Media:
css = {
'all': ('monitoreo/css/adminPoda.css',)
}
js = ('monitoreo/js/fichaPoda.js',)
# Register your models here.
admin.site.register(FichaPoda, FichaPodaAdmin)
#-------- admin ficha plaga ----------
class PlagasEnfermedadInline(admin.TabularInline):
model = PlagasEnfermedad
extra = 1
max_num = 13
class AccionesEnfermedadInline(admin.TabularInline):
model = AccionesEnfermedad
extra = 1
max_num = 9
class OrientacionInline(admin.TabularInline):
model = Orientacion
extra = 1
max_num = 1
class ObservacionPunto1Inline(admin.TabularInline):
model = ObservacionPunto1
extra = 1
max_num = 13
class ObservacionPunto1NivelInline(admin.TabularInline):
model = ObservacionPunto1Nivel
extra = 1
max_num = 1
class ObservacionPunto2Inline(admin.TabularInline):
model = ObservacionPunto2
extra = 1
max_num = 13
class ObservacionPunto2NivelInline(admin.TabularInline):
model = ObservacionPunto2Nivel
extra = 1
max_num = 1
class ObservacionPunto3Inline(admin.TabularInline):
model = ObservacionPunto3
extra = 1
max_num = 13
class ObservacionPunto3NivelInline(admin.TabularInline):
model = ObservacionPunto3Nivel
extra = 1
max_num = 1
class ProblemasPrincipalesInline(admin.TabularInline):
model = ProblemasPrincipales
extra = 1
max_num = 1
class Punto6PlagasInline(admin.TabularInline):
model = Punto6Plagas
extra = 1
max_num = 1
class Punto7PlagasInline(admin.TabularInline):
model = Punto7Plagas
extra = 1
max_num = 9
class Punto8y9PlagasInline(admin.TabularInline):
model = Punto8y9Plagas
extra = 1
max_num = 1
class FichaPlagaAdmin(admin.ModelAdmin):
form = ProductorPlagaAdminForm
inlines = [PlagasEnfermedadInline,AccionesEnfermedadInline,OrientacionInline,
ObservacionPunto1Inline,ObservacionPunto1NivelInline,
ObservacionPunto2Inline,ObservacionPunto2NivelInline,
ObservacionPunto3Inline,ObservacionPunto3NivelInline,
ProblemasPrincipalesInline,Punto6PlagasInline,Punto7PlagasInline,
Punto8y9PlagasInline]
list_display = ('fecha_visita', 'productor', 'tecnico',)
search_fields = ('productor__nombre',)
date_hierarchy = 'fecha_visita'
class Media:
css = {
'all': ('monitoreo/css/adminPlaga.css',)
}
js = ('monitoreo/js/fichaPlaga.js',)
admin.site.register(FichaPlaga, FichaPlagaAdmin)
# ----------------- ficha piso -------------------------------
class PisoPunto1Inline(admin.TabularInline):
model = PisoPunto1
extra = 1
max_num = 1
class PisoPunto3Inline(admin.TabularInline):
model = PisoPunto3
extra = 1
max_num = 8
class PisoPunto4Inline(admin.TabularInline):
model = PisoPunto4
extra = 1
max_num = 1
class PisoPunto5Inline(admin.TabularInline):
model = PisoPunto5
extra = 1
max_num = 13
class PisoPunto6Inline(admin.TabularInline):
model = PisoPunto6
extra = 1
max_num = 1
class PisoPunto7Inline(admin.TabularInline):
model = PisoPunto7
extra = 1
max_num = 1
class PisoPunto8Inline(admin.TabularInline):
model = PisoPunto8
extra = 1
max_num = 8
class PisoPunto10Inline(admin.TabularInline):
model = PisoPunto10
extra = 1
max_num = 1
class FichaPisoAdmin(admin.ModelAdmin):
form = ProductorPisoAdminForm
inlines = [PisoPunto1Inline,PisoPunto3Inline,PisoPunto4Inline, PisoPunto5Inline,
PisoPunto6Inline,PisoPunto7Inline,PisoPunto8Inline,PisoPunto10Inline]
list_display = ('fecha_visita', 'productor', 'tecnico',)
search_fields = ('productor__nombre',)
date_hierarchy = 'fecha_visita'
class Media:
css = {
'all': ('monitoreo/css/adminPiso.css',)
}
js = ('monitoreo/js/fichaPiso.js',)
admin.site.register(FichaPiso, FichaPisoAdmin)
class EspeciesAdmin(ImportExportModelAdmin):
fieldsets = (
(None, {
'fields': (('nombre','nombre_cientifico'), 'tipo','tipo_uso', 'foto' )
}),
('PEQUEÑO', {
'classes': ('collapse',),
'fields': ('p_altura', 'p_diametro', 'p_ancho'),
}),
('MEDIANO', {
'classes': ('collapse',),
'fields': ('m_altura', 'm_diametro', 'm_ancho'),
}),
('GRANDE', {
'classes': ('collapse',),
'fields': ('g_altura', 'g_diametro', 'g_ancho'),
}),
)
list_display = ('nombre', 'nombre_cientifico','tipo')
#list_filter = ('tipo', 'tipo_uso')
search_fields = ['nombre', 'nombre_cientifico']
admin.site.register(Especies, EspeciesAdmin)
#----- ficha de suelo ----
class Punto1SueloInline(admin.TabularInline):
model = Punto1Suelo
extra = 1
max_num = 1
class PuntoASueloInline(admin.TabularInline):
model = PuntoASuelo
extra = 1
max_num = 6
class PuntoBSueloInline(admin.TabularInline):
model = PuntoBSuelo
extra = 1
max_num = 5
class Punto2ASueloInline(admin.TabularInline):
model = Punto2ASuelo
extra = 1
max_num = 3
class Punto2BSueloInline(admin.TabularInline):
model = Punto2BSuelo
extra = 1
max_num = 5
class Punto3SueloPunto1Inline(admin.TabularInline):
model = Punto3SueloPunto1
extra = 1
max_num = 2
class Punto3SueloPunto2Inline(admin.TabularInline):
model = Punto3SueloPunto2
extra = 1
max_num = 2
class Punto3SueloPunto3Inline(admin.TabularInline):
model = Punto3SueloPunto3
extra = 1
max_num = 2
class Punto4SueloInline(admin.TabularInline):
model = Punto4Suelo
extra = 1
max_num = 1
class Punto4SueloCosechaInline(admin.TabularInline):
model = Punto4SueloCosecha
extra = 1
max_num = 3
class Punto4SueloSIInline(admin.TabularInline):
model = Punto4SueloSI
extra = 1
max_num = 1
class Punto5SueloAbonosInline(admin.TabularInline):
model = Punto5SueloAbonos
extra = 1
max_num = 16
class Punto6AnalisisSueloInline(admin.TabularInline):
model = Punto6AnalisisSuelo
extra = 1
max_num = 8
class Punto7TipoSueloInline(admin.TabularInline):
model = Punto7TipoSuelo
extra = 1
max_num = 1
class Punto8SueloPropuestaInline(admin.TabularInline):
model = Punto8SueloPropuesta
extra = 1
max_num = 16
class Punto9ErosionInline(admin.TabularInline):
model = Punto9Erosion
extra = 1
max_num = 1
class Punto9DrenajeInline(admin.TabularInline):
model = Punto9Drenaje
extra = 1
max_num = 1
class Punto9NutrientesInline(admin.TabularInline):
model = Punto9Nutrientes
extra = 1
max_num = 1
class Punto9ExcesoInline(admin.TabularInline):
model = Punto9Exceso
extra = 1
max_num = 1
class Punto9DesbalanceInline(admin.TabularInline):
model = Punto9Desbalance
extra = 1
max_num = 1
class Punto9EnfermedadesInline(admin.TabularInline):
model = Punto9Enfermedades
extra = 1
max_num = 1
class FichaSueloAdmin(admin.ModelAdmin):
form = ProductorSueloAdminForm
inlines = [Punto1SueloInline,PuntoASueloInline,PuntoBSueloInline,
Punto2ASueloInline,Punto2BSueloInline,Punto3SueloPunto1Inline,
Punto3SueloPunto2Inline,Punto3SueloPunto3Inline,
Punto4SueloInline,Punto4SueloCosechaInline,Punto4SueloSIInline,
Punto5SueloAbonosInline,Punto6AnalisisSueloInline,
Punto7TipoSueloInline,Punto8SueloPropuestaInline,Punto9ErosionInline,
Punto9DrenajeInline,Punto9NutrientesInline,Punto9ExcesoInline,
Punto9DesbalanceInline,Punto9EnfermedadesInline]
list_display = ('fecha_visita', 'productor', 'tecnico',)
search_fields = ('productor__nombre',)
date_hierarchy = 'fecha_visita'
class Media:
css = {
'all': ('monitoreo/css/adminSombra.css',)
}
js = ('monitoreo/js/fichaSombra.js',)
# Register your models here.
admin.site.register(TipoFertilizantes)
admin.site.register(DatosAnalisis)
admin.site.register(FichaSuelo, FichaSueloAdmin)
#------ ficha vivero -------------------
class VivieroConversacionInline(admin.TabularInline):
model = VivieroConversacion
extra = 1
max_num = 1
class ViveroConversacion2Inline(admin.TabularInline):
model = ViveroConversacion2
extra = 1
max_num = 1
class VivieroObservacion1Inline(admin.TabularInline):
model = VivieroObservacion1
extra = 1
max_num = 1
class VivieroObservacion2Inline(admin.TabularInline):
model = VivieroObservacion2
extra = 1
max_num = 9
class VivieroObservacionProductosInline(admin.TabularInline):
model = VivieroObservacionProductos
extra = 1
class VivieroAnalisisSituacionInline(admin.TabularInline):
model = VivieroAnalisisSituacion
extra = 1
max_num = 1
class FichaViveroAdmin(admin.ModelAdmin):
form = ProductorViveroAdminForm
inlines = [VivieroConversacionInline,ViveroConversacion2Inline,VivieroObservacion1Inline,
VivieroObservacion2Inline,VivieroObservacionProductosInline,VivieroAnalisisSituacionInline]
list_display = ('fecha_visita', 'productor', 'tecnico',)
search_fields = ('productor__nombre',)
date_hierarchy = 'fecha_visita'
class Media:
css = {
'all': ('guiacacao/css/adminVivero.css',)
}
admin.site.register(FichaVivero, FichaViveroAdmin)
admin.site.register(ProductosVivero)
#--------- ficha cosecha -----------------
class CosechaConversacion1Inline(admin.TabularInline):
model = CosechaConversacion1
extra = 1
max_num = 1
class CosechaConversacion2Inline(admin.TabularInline):
model = CosechaConversacion2
extra = 1
max_num = 1
class CosechaMesesFloracionInline(admin.TabularInline):
model = CosechaMesesFloracion
extra = 1
max_num = 12
class CosechaMesesCosechaInline(admin.TabularInline):
model = CosechaMesesCosecha
extra = 1
max_num = 12
class CosechaPunto1Inline(admin.TabularInline):
model = CosechaPunto1
extra = 1
max_num = 3
class CosechaPunto2Inline(admin.TabularInline):
model = CosechaPunto2
extra = 1
max_num = 3
class CosechaPunto3Inline(admin.TabularInline):
model = CosechaPunto3
extra = 1
max_num = 3
class CosechaAreaPlantasInline(admin.TabularInline):
model = CosechaAreaPlantas
extra = 1
max_num = 1
class CosechaAnalisisInline(admin.TabularInline):
model = CosechaAnalisis
extra = 1
max_num = 1
class FichaCosechaAdmin(admin.ModelAdmin):
form = ProductorCosechaAdminForm
inlines = [CosechaConversacion1Inline,CosechaConversacion2Inline,CosechaMesesFloracionInline,
CosechaMesesCosechaInline,CosechaPunto1Inline,CosechaPunto2Inline,CosechaPunto3Inline,
CosechaAreaPlantasInline,CosechaAnalisisInline]
list_display = ('fecha_visita', 'productor', 'tecnico',)
search_fields = ('productor__nombre',)
date_hierarchy = 'fecha_visita'
class Media:
css = {
'all': ('guiacacao/css/adminCosecha.css',)
}
admin.site.register(FichaCosecha, FichaCosechaAdmin)
#----------------------- ficha saf ------------------------------------
class SafConversacion1Inline(admin.TabularInline):
model = SafConversacion1
extra = 1
max_num = 1
class SafConversacion2Inline(admin.TabularInline):
model = SafConversacion2
extra = 1
max_num = 12
class SafConversacion3Inline(admin.TabularInline):
model = SafConversacion3
extra = 1
max_num = 12
class SafConversacion4Inline(admin.TabularInline):
model = SafConversacion4
extra = 1
max_num = 12
class SafConversacion5Inline(admin.TabularInline):
model = SafConversacion5
extra = 1
max_num = 1
class SafConversacion6Inline(admin.TabularInline):
model = SafConversacion6
extra = 1
max_num = 1
class SafConversacion7Inline(admin.TabularInline):
model = SafConversacion7
extra = 1
max_num = 4
class SafConversacion8Inline(admin.TabularInline):
model = SafConversacion8
extra = 1
max_num = 6
class SafConversacion9Inline(admin.TabularInline):
model = SafConversacion9
extra = 1
max_num = 1
class SafObservacionesInline(admin.TabularInline):
model = SafObservaciones
extra = 1
max_num = 1
class SafObservaciones2Inline(admin.TabularInline):
model = SafObservaciones2
extra = 1
max_num = 1
class SafObservaciones3Inline(admin.TabularInline):
model = SafObservaciones3
extra = 1
max_num = 1
class SafObservacionPunto1Inline(admin.TabularInline):
model = SafObservacionPunto1
extra = 1
class SafObservacionPunto2Inline(admin.TabularInline):
model = SafObservacionPunto2
extra = 1
class SafObservacionPunto3Inline(admin.TabularInline):
model = SafObservacionPunto3
extra = 1
class SafObservaciones4Inline(admin.TabularInline):
model = SafObservaciones4
extra = 1
max_num = 1
class FichaSafAdmin(admin.ModelAdmin):
form = ProductorSafAdminForm
inlines = [SafConversacion1Inline,SafConversacion2Inline,SafConversacion3Inline,
SafConversacion4Inline,SafConversacion5Inline,SafConversacion6Inline,
SafConversacion7Inline,SafConversacion8Inline,SafConversacion9Inline,
SafObservacionesInline,SafObservaciones2Inline,SafObservaciones3Inline,
SafObservacionPunto1Inline,SafObservacionPunto2Inline,SafObservacionPunto3Inline,
SafObservaciones4Inline]
list_display = ('fecha_visita', 'productor', 'tecnico',)
search_fields = ('productor__nombre',)
date_hierarchy = 'fecha_visita'
admin.site.register(FichaSaf, FichaSafAdmin)
# ------- inicio admin Ficha cierre
class CierreManejo1Inline(admin.TabularInline):
model = CierreManejo1
extra = 1
max_num = 1
class CierreManejo2Inline(admin.TabularInline):
model = CierreManejo2
extra = 1
max_num = 1
class CierreManejo3Inline(admin.TabularInline):
model = CierreManejo3
extra = 1
max_num = 1
class CierreManejo4Inline(admin.TabularInline):
model = CierreManejo4
extra = 1
max_num = 1
class CierreManejo5Inline(admin.TabularInline):
model = CierreManejo5
extra = 1
max_num = 1
class CierreManejo6Inline(admin.TabularInline):
model = CierreManejo6
extra = 1
max_num = 1
class CierreManejo7Inline(admin.TabularInline):
model = CierreManejo7
extra = 1
max_num = 1
class CierreCosto1Inline(admin.TabularInline):
model = CierreCosto1
extra = 1
max_num = 1
class CierreActividadInline(admin.TabularInline):
model = CierreActividad
extra = 1
class CierreManejoInline(admin.TabularInline):
model = CierreManejo
extra = 1
class CierreConocimiento1Inline(admin.TabularInline):
model = CierreConocimiento1
extra = 1
max_num = 1
class CierreConocimiento2Inline(admin.TabularInline):
model = CierreConocimiento2
extra = 1
max_num = 1
class CierreConocimiento3Inline(admin.TabularInline):
model = CierreConocimiento3
extra = 1
max_num = 1
class CierreSuelo1Inline(admin.TabularInline):
model = CierreSuelo1
extra = 1
max_num = 1
class CierreSuelo2Inline(admin.TabularInline):
model = CierreSuelo2
extra = 1
max_num = 1
class CierreSuelo3Inline(admin.TabularInline):
model = CierreSuelo3
extra = 1
max_num = 1
class CierrePlaga1Inline(admin.TabularInline):
model = CierrePlaga1
extra = 1
max_num = 2
class CierrePlaga2Inline(admin.TabularInline):
model = CierrePlaga2
extra = 1
max_num = 1
class CierrePlaga3Inline(admin.TabularInline):
model = CierrePlaga3
extra = 1
max_num = 1
class CierreCicloTrabajoInline(admin.TabularInline):
model = CierreCicloTrabajo
extra = 1
max_num = 1
class CierreBabaRojaInline(admin.TabularInline):
model = CierreBabaRoja
extra = 1
max_num = 1
class FichaCierreAdmin(admin.ModelAdmin):
form = ProductorCierreAdminForm
inlines = [CierreManejo1Inline,CierreManejo2Inline,CierreManejo3Inline,CierreManejo4Inline,
CierreManejo5Inline,CierreManejo6Inline,CierreManejo7Inline,CierreCosto1Inline,
CierreActividadInline,CierreBabaRojaInline,CierreManejoInline,CierreConocimiento1Inline,CierreConocimiento2Inline,
CierreConocimiento3Inline,CierreSuelo1Inline,CierreSuelo2Inline,CierreSuelo3Inline,
CierrePlaga1Inline,CierrePlaga2Inline,CierrePlaga3Inline,CierreCicloTrabajoInline]
list_display = ('fecha_visita', 'productor', 'tecnico',)
search_fields = ('productor__nombre',)
date_hierarchy = 'fecha_visita'
admin.site.register(FichaCierre, FichaCierreAdmin)
admin.site.register(ActividadesCierre)
admin.site.register(ManejosCierre)
| mit |
0--key/lib | portfolio/Python/scrapy/johnlewis/johnlewis_spider.py | 2 | 4348 | from scrapy.spider import BaseSpider
from scrapy.selector import HtmlXPathSelector
from scrapy.http import Request, HtmlResponse, FormRequest
from scrapy.utils.url import urljoin_rfc
from scrapy.utils.response import get_base_url
from product_spiders.items import Product, ProductLoaderWithNameStrip as ProductLoader
class JohnLewisSpider(BaseSpider):
name = 'johnlewis.com-johnlewis'
allowed_domains = ['johnlewis.com']
start_urls = ['http://www.johnlewis.com/Electricals/Televisions/Televisions/All+TVs/5013/ProductCategory.aspx',
'http://www.johnlewis.com/Electricals/Blu-ray_2c+DVD+and+Home+Cinema/Blu-ray_2c+DVD+and+Home+Cinema/View+all+Cinema+Systems/78/ProductCategory.aspx',
'http://www.johnlewis.com/Electricals/Blu-ray_2c+DVD+and+Home+Cinema/Blu-ray_2c+DVD+and+Home+Cinema/Blu-ray+Players/4741/ProductCategory.aspx',
'http://www.johnlewis.com/Electricals/Blu-ray_2c+DVD+and+Home+Cinema/Blu-ray_2c+DVD+and+Home+Cinema/View+all+Digital+Recorders/24486/ ProductCategory.aspx',
'http://www.johnlewis.com/Electricals/Freeview+and+Freesat+Boxes/Freeview+and+Freesat+Boxes/Freeview+and+Freesat+Boxes/98/ProductCategory.aspx',
'http://www.johnlewis.com/Electricals/TV+Stands+and+Accessories/3D+Glasses+and+Transmitters/3D+Glasses+and+Transmitters/16022/ProductCategory.aspx',
'http://www.johnlewis.com/Electricals/Cameras+and+Camcorders/Cameras+and+Camcorders/All+Cameras/4002/ProductCategory.aspx',
'http://www.johnlewis.com/Electricals/Audio/All+Radios/View+all+Radios/1842/ProductCategory.aspx',
'http://www.johnlewis.com/Electricals/Audio/Speaker+Docks/Speaker+Docks/22775/ProductCategory.aspx',
'http://www.johnlewis.com/Electricals/Audio/Micro+Systems/Micro+Systems/52/ProductCategory.aspx',
'http://www.johnlewis.com/Electricals/Audio/Wireless+Music+Players/Wireless+Music+Players/1332/ProductCategory.aspx',
'http://www.johnlewis.com/Electricals/Audio/iPods+and+MP3+Players/Apple+iPods/1379/ProductCategory.aspx',
'http://www.johnlewis.com/Electricals/Headphones/Headphones/View+all+headphones/660/ProductCategory.aspx',
'http://www.johnlewis.com/Electricals/Kindle+and+eReaders/eBook+Readers/eBook+Readers/8957/ProductCategory.aspx',
'http://www.johnlewis.com/Electricals/iPad+and+Tablet+PCs/View+all+Tablets/View+all+Tablets/21438/ProductCategory.aspx',
'http://www.johnlewis.com/Electricals/Laptops+and+Netbooks/Laptops+and+Netbooks/Laptops/398/ProductCategory.aspx',
'http://www.johnlewis.com/Electricals/Telephones/All+Telephones/All+Telephones/22237/ProductCategory.aspx']
def parse(self, response):
hxs = HtmlXPathSelector(response)
categories = hxs.select('//*[@id="LHNCtl1_rptGp_ctl00_LHNGpCtl1_subnavac"]/ul/li/a/@href').extract()
if categories:
for category in categories:
url = urljoin_rfc(get_base_url(response), category)
yield Request(url, callback=self.parse_products)
else:
yield Request(response.url, dont_filter=True, callback=self.parse_products)
def parse_products(self, response):
hxs = HtmlXPathSelector(response)
products = hxs.select('//*[@id="SearchResultsGrid1_UpdatePanel1"]/div/div[@class="grid-item"]')
if products:
for product in products:
loader = ProductLoader(item=Product(), selector=product)
loader.add_xpath('name', 'div[@class="grid-copy"]/a[@class="gridtitle"]/text()')
url = urljoin_rfc(get_base_url(response), product.select('div[@class="grid-copy"]/a[@class="gridtitle"]/@href').extract()[0])
loader.add_value('url', url)
price = ''.join(product.select('div[@class="grid-copy"]/a[@class="price"]/text()').extract()).split()[-1]
loader.add_value('price', price)
yield loader.load_item()
next = hxs.select('//*[@id="paging"]/div[@class="pagenum"]/a[@class="next-pg"]/@href').extract()
if next:
url = urljoin_rfc(get_base_url(response), next[0])
yield Request(url, callback=self.parse_products)
| apache-2.0 |
masiqi/douquan | pyxn/djangoxn/__init__.py | 1 | 8968 | import logging
import pyxn
from django.http import HttpResponse, HttpResponseRedirect
from django.core.exceptions import ImproperlyConfigured
from django.conf import settings
from member.models import User
try:
from threading import local
except ImportError:
from django.utils._threading_local import local
__all__ = ['Xiaonei', 'XiaoneiMiddleware', 'get_xiaonei_client', 'require_login', 'require_add']
_thread_locals = local()
class Xiaonei(pyxn.Xiaonei):
def redirect(self, url):
"""
Helper for Django which redirects to another page. If inside a
canvas page, writes a <xn:redirect> instead to achieve the same effect.
"""
return HttpResponse('<xn:redirect url="%s" />' % (url, ))
def get_xiaonei_client():
"""
Get the current Xiaonei object for the calling thread.
"""
try:
return _thread_locals.xiaonei
except AttributeError:
raise ImproperlyConfigured('Make sure you have the Xiaonei middleware installed.')
def require_login(next=None, internal=None):
"""
Decorator for Django views that requires the user to be logged in.
The XiaoneiMiddleware must be installed.
Standard usage:
@require_login()
def some_view(request):
...
Redirecting after login:
To use the 'next' parameter to redirect to a specific page after login, a callable should
return a path relative to the Post-add URL. 'next' can also be an integer specifying how many
parts of request.path to strip to find the relative URL of the canvas page. If 'next' is None,
settings.callback_path and settings.app_name are checked to redirect to the same page after logging
in. (This is the default behavior.)
@require_login(next=some_callable)
def some_view(request):
...
"""
def decorator(view):
def newview(request, *args, **kwargs):
next = newview.next
internal = newview.internal
try:
xn = request.xiaonei
except:
raise ImproperlyConfigured('Make sure you have the Xiaonei middleware installed.')
if internal is None:
internal = request.xiaonei.internal
if callable(next):
next = next(request.path)
elif isinstance(next, int):
next = '/'.join(request.path.split('/')[next + 1:])
elif next is None and xn.callback_path and request.path.startswith(xn.callback_path):
next = request.path[len(xn.callback_path):]
elif not isinstance(next, str):
next = ''
if not xn.check_session(request):
#If user has never logged in before, the get_login_url will redirect to the TOS page
# logging.debug('user never logged in, redirect to login url')
return xn.redirect(xn.get_login_url(next=next))
if internal and request.method == 'GET' and xn.app_name:
return xn.redirect('%s%s' % (xn.get_app_url(), next))
return view(request, *args, **kwargs)
newview.next = next
newview.internal = internal
return newview
return decorator
def require_add(next=None, internal=None, on_install=None):
"""
Decorator for Django views that requires application installation.
The XiaoneiMiddleware must be installed.
Standard usage:
@require_add()
def some_view(request):
...
Redirecting after installation:
To use the 'next' parameter to redirect to a specific page after login, a callable should
return a path relative to the Post-add URL. 'next' can also be an integer specifying how many
parts of request.path to strip to find the relative URL of the canvas page. If 'next' is None,
settings.callback_path and settings.app_name are checked to redirect to the same page after logging
in. (This is the default behavior.)
@require_add(next=some_callable)
def some_view(request):
...
Post-install processing:
Set the on_install parameter to a callable in order to handle special post-install processing.
The callable should take a request object as the parameter.
@require_add(on_install=some_callable)
def some_view(request):
...
"""
def decorator(view):
def newview(request, *args, **kwargs):
next = newview.next
internal = newview.internal
try:
xn = request.xiaonei
except:
raise ImproperlyConfigured('Make sure you have the Xiaonei middleware installed.')
if internal is None:
internal = request.xiaonei.internal
if callable(next):
next = next(request.path)
elif isinstance(next, int):
next = '/'.join(request.path.split('/')[next + 1:])
elif next is None and xn.callback_path and request.path.startswith(xn.callback_path):
next = request.path[len(xn.callback_path):]
else:
next = ''
if not xn.check_session(request):
if xn.added:
if request.method == 'GET' and xn.app_name:
return xn.redirect('%s%s' % (xn.get_app_url(), next))
return xn.redirect(xn.get_login_url(next=next))
else:
return xn.redirect(xn.get_add_url(next=next))
if not xn.added:
return xn.redirect(xn.get_add_url(next=next))
xn.get_uid(request)
if not hasattr(request, '_cached_myuser'):
sns_userinfo = xn.users.getInfo(xn.uid)[0]
user, created = User.objects.get_or_create(email=settings.XIAONEI_EMAIL % xn.uid, defaults={'email':settings.XIAONEI_EMAIL % xn.uid, 'name':sns_userinfo["name"], 'password':'123qwe'})
if not created:
if user.name != sns_userinfo["name"]:
user.name = sns_userinfo["name"]
user.save()
request._cached_myuser = user
request.myuser = user
else:
request.myuser = request._cached_myuser
if 'installed' in request.GET and callable(on_install):
on_install(request)
if internal and request.method == 'GET' and xn.app_name:
return xn.redirect('%s%s' % (xn.get_app_url(), next))
return view(request, *args, **kwargs)
newview.next = next
newview.internal = internal
return newview
return decorator
# try to preserve the argspecs
try:
import decorator
except ImportError:
pass
else:
def updater(f):
def updated(*args, **kwargs):
original = f(*args, **kwargs)
def newdecorator(view):
return decorator.new_wrapper(original(view), view)
return decorator.new_wrapper(newdecorator, original)
return decorator.new_wrapper(updated, f)
require_login = updater(require_login)
require_add = updater(require_add)
class XiaoneiMiddleware(object):
"""
Middleware that attaches a Xiaonei object to every incoming request.
The Xiaonei object created can also be accessed from models for the
current thread by using get_xiaonei_client().
"""
def __init__(self, api_key=None, secret_key=None, app_name=None, callback_path=None, internal=None):
self.api_key = api_key or settings.XIAONEI_API_KEY
self.secret_key = secret_key or settings.XIAONEI_SECRET_KEY
self.app_name = app_name or getattr(settings, 'XIAONEI_APP_NAME', None)
self.callback_path = callback_path or getattr(settings, 'XIAONEI_CALLBACK_PATH', None)
self.internal = internal or getattr(settings, 'XIAONEI_INTERNAL', True)
self.proxy = None
if getattr(settings, 'USE_HTTP_PROXY', False):
self.proxy = settings.HTTP_PROXY
def process_request(self, request):
_thread_locals.xiaonei = request.xiaonei = Xiaonei(self.api_key, self.secret_key, app_name=self.app_name, callback_path=self.callback_path, internal=self.internal, proxy=self.proxy)
if not self.internal and 'xiaonei_session_key' in request.session and 'xiaonei_user_id' in request.session:
print('process_request: %s' % request.session)
request.xiaonei.session_key = request.session['xiaonei_session_key']
request.xiaonei.uid = request.session['xiaonei_user_id']
def process_response(self, request, response):
if not self.internal and request.xiaonei.session_key and request.xiaonei.uid:
request.session['xiaonei_session_key'] = request.xiaonei.session_key
request.session['xiaonei_user_id'] = request.xiaonei.uid
return response
| mit |
hn8841182/2015cd_midterm | static/Brython3.1.0-20150301-090019/Lib/xml/sax/saxutils.py | 730 | 11688 | """\
A library of useful helper classes to the SAX classes, for the
convenience of application and driver writers.
"""
import os, urllib.parse, urllib.request
import io
from . import handler
from . import xmlreader
def __dict_replace(s, d):
"""Replace substrings of a string using a dictionary."""
for key, value in d.items():
s = s.replace(key, value)
return s
def escape(data, entities={}):
"""Escape &, <, and > in a string of data.
You can escape other strings of data by passing a dictionary as
the optional entities parameter. The keys and values must all be
strings; each key will be replaced with its corresponding value.
"""
# must do ampersand first
data = data.replace("&", "&")
data = data.replace(">", ">")
data = data.replace("<", "<")
if entities:
data = __dict_replace(data, entities)
return data
def unescape(data, entities={}):
"""Unescape &, <, and > in a string of data.
You can unescape other strings of data by passing a dictionary as
the optional entities parameter. The keys and values must all be
strings; each key will be replaced with its corresponding value.
"""
data = data.replace("<", "<")
data = data.replace(">", ">")
if entities:
data = __dict_replace(data, entities)
# must do ampersand last
return data.replace("&", "&")
def quoteattr(data, entities={}):
"""Escape and quote an attribute value.
Escape &, <, and > in a string of data, then quote it for use as
an attribute value. The \" character will be escaped as well, if
necessary.
You can escape other strings of data by passing a dictionary as
the optional entities parameter. The keys and values must all be
strings; each key will be replaced with its corresponding value.
"""
entities = entities.copy()
entities.update({'\n': ' ', '\r': ' ', '\t':'	'})
data = escape(data, entities)
if '"' in data:
if "'" in data:
data = '"%s"' % data.replace('"', """)
else:
data = "'%s'" % data
else:
data = '"%s"' % data
return data
def _gettextwriter(out, encoding):
if out is None:
import sys
return sys.stdout
if isinstance(out, io.TextIOBase):
# use a text writer as is
return out
# wrap a binary writer with TextIOWrapper
if isinstance(out, io.RawIOBase):
# Keep the original file open when the TextIOWrapper is
# destroyed
class _wrapper:
__class__ = out.__class__
def __getattr__(self, name):
return getattr(out, name)
buffer = _wrapper()
buffer.close = lambda: None
else:
# This is to handle passed objects that aren't in the
# IOBase hierarchy, but just have a write method
buffer = io.BufferedIOBase()
buffer.writable = lambda: True
buffer.write = out.write
try:
# TextIOWrapper uses this methods to determine
# if BOM (for UTF-16, etc) should be added
buffer.seekable = out.seekable
buffer.tell = out.tell
except AttributeError:
pass
return io.TextIOWrapper(buffer, encoding=encoding,
errors='xmlcharrefreplace',
newline='\n',
write_through=True)
class XMLGenerator(handler.ContentHandler):
def __init__(self, out=None, encoding="iso-8859-1", short_empty_elements=False):
handler.ContentHandler.__init__(self)
out = _gettextwriter(out, encoding)
self._write = out.write
self._flush = out.flush
self._ns_contexts = [{}] # contains uri -> prefix dicts
self._current_context = self._ns_contexts[-1]
self._undeclared_ns_maps = []
self._encoding = encoding
self._short_empty_elements = short_empty_elements
self._pending_start_element = False
def _qname(self, name):
"""Builds a qualified name from a (ns_url, localname) pair"""
if name[0]:
# Per http://www.w3.org/XML/1998/namespace, The 'xml' prefix is
# bound by definition to http://www.w3.org/XML/1998/namespace. It
# does not need to be declared and will not usually be found in
# self._current_context.
if 'http://www.w3.org/XML/1998/namespace' == name[0]:
return 'xml:' + name[1]
# The name is in a non-empty namespace
prefix = self._current_context[name[0]]
if prefix:
# If it is not the default namespace, prepend the prefix
return prefix + ":" + name[1]
# Return the unqualified name
return name[1]
def _finish_pending_start_element(self,endElement=False):
if self._pending_start_element:
self._write('>')
self._pending_start_element = False
# ContentHandler methods
def startDocument(self):
self._write('<?xml version="1.0" encoding="%s"?>\n' %
self._encoding)
def endDocument(self):
self._flush()
def startPrefixMapping(self, prefix, uri):
self._ns_contexts.append(self._current_context.copy())
self._current_context[uri] = prefix
self._undeclared_ns_maps.append((prefix, uri))
def endPrefixMapping(self, prefix):
self._current_context = self._ns_contexts[-1]
del self._ns_contexts[-1]
def startElement(self, name, attrs):
self._finish_pending_start_element()
self._write('<' + name)
for (name, value) in attrs.items():
self._write(' %s=%s' % (name, quoteattr(value)))
if self._short_empty_elements:
self._pending_start_element = True
else:
self._write(">")
def endElement(self, name):
if self._pending_start_element:
self._write('/>')
self._pending_start_element = False
else:
self._write('</%s>' % name)
def startElementNS(self, name, qname, attrs):
self._finish_pending_start_element()
self._write('<' + self._qname(name))
for prefix, uri in self._undeclared_ns_maps:
if prefix:
self._write(' xmlns:%s="%s"' % (prefix, uri))
else:
self._write(' xmlns="%s"' % uri)
self._undeclared_ns_maps = []
for (name, value) in attrs.items():
self._write(' %s=%s' % (self._qname(name), quoteattr(value)))
if self._short_empty_elements:
self._pending_start_element = True
else:
self._write(">")
def endElementNS(self, name, qname):
if self._pending_start_element:
self._write('/>')
self._pending_start_element = False
else:
self._write('</%s>' % self._qname(name))
def characters(self, content):
if content:
self._finish_pending_start_element()
self._write(escape(content))
def ignorableWhitespace(self, content):
if content:
self._finish_pending_start_element()
self._write(content)
def processingInstruction(self, target, data):
self._finish_pending_start_element()
self._write('<?%s %s?>' % (target, data))
class XMLFilterBase(xmlreader.XMLReader):
"""This class is designed to sit between an XMLReader and the
client application's event handlers. By default, it does nothing
but pass requests up to the reader and events on to the handlers
unmodified, but subclasses can override specific methods to modify
the event stream or the configuration requests as they pass
through."""
def __init__(self, parent = None):
xmlreader.XMLReader.__init__(self)
self._parent = parent
# ErrorHandler methods
def error(self, exception):
self._err_handler.error(exception)
def fatalError(self, exception):
self._err_handler.fatalError(exception)
def warning(self, exception):
self._err_handler.warning(exception)
# ContentHandler methods
def setDocumentLocator(self, locator):
self._cont_handler.setDocumentLocator(locator)
def startDocument(self):
self._cont_handler.startDocument()
def endDocument(self):
self._cont_handler.endDocument()
def startPrefixMapping(self, prefix, uri):
self._cont_handler.startPrefixMapping(prefix, uri)
def endPrefixMapping(self, prefix):
self._cont_handler.endPrefixMapping(prefix)
def startElement(self, name, attrs):
self._cont_handler.startElement(name, attrs)
def endElement(self, name):
self._cont_handler.endElement(name)
def startElementNS(self, name, qname, attrs):
self._cont_handler.startElementNS(name, qname, attrs)
def endElementNS(self, name, qname):
self._cont_handler.endElementNS(name, qname)
def characters(self, content):
self._cont_handler.characters(content)
def ignorableWhitespace(self, chars):
self._cont_handler.ignorableWhitespace(chars)
def processingInstruction(self, target, data):
self._cont_handler.processingInstruction(target, data)
def skippedEntity(self, name):
self._cont_handler.skippedEntity(name)
# DTDHandler methods
def notationDecl(self, name, publicId, systemId):
self._dtd_handler.notationDecl(name, publicId, systemId)
def unparsedEntityDecl(self, name, publicId, systemId, ndata):
self._dtd_handler.unparsedEntityDecl(name, publicId, systemId, ndata)
# EntityResolver methods
def resolveEntity(self, publicId, systemId):
return self._ent_handler.resolveEntity(publicId, systemId)
# XMLReader methods
def parse(self, source):
self._parent.setContentHandler(self)
self._parent.setErrorHandler(self)
self._parent.setEntityResolver(self)
self._parent.setDTDHandler(self)
self._parent.parse(source)
def setLocale(self, locale):
self._parent.setLocale(locale)
def getFeature(self, name):
return self._parent.getFeature(name)
def setFeature(self, name, state):
self._parent.setFeature(name, state)
def getProperty(self, name):
return self._parent.getProperty(name)
def setProperty(self, name, value):
self._parent.setProperty(name, value)
# XMLFilter methods
def getParent(self):
return self._parent
def setParent(self, parent):
self._parent = parent
# --- Utility functions
def prepare_input_source(source, base=""):
"""This function takes an InputSource and an optional base URL and
returns a fully resolved InputSource object ready for reading."""
if isinstance(source, str):
source = xmlreader.InputSource(source)
elif hasattr(source, "read"):
f = source
source = xmlreader.InputSource()
source.setByteStream(f)
if hasattr(f, "name"):
source.setSystemId(f.name)
if source.getByteStream() is None:
sysid = source.getSystemId()
basehead = os.path.dirname(os.path.normpath(base))
sysidfilename = os.path.join(basehead, sysid)
if os.path.isfile(sysidfilename):
source.setSystemId(sysidfilename)
f = open(sysidfilename, "rb")
else:
source.setSystemId(urllib.parse.urljoin(base, sysid))
f = urllib.request.urlopen(source.getSystemId())
source.setByteStream(f)
return source
| gpl-3.0 |
noironetworks/nova | nova/api/metadata/base.py | 11 | 20638 | # Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Instance Metadata information."""
import base64
import os
import posixpath
from oslo_config import cfg
from oslo_log import log as logging
from oslo_serialization import jsonutils
from oslo_utils import importutils
from oslo_utils import timeutils
import six
from nova.api.ec2 import ec2utils
from nova.api.metadata import password
from nova import availability_zones as az
from nova import block_device
from nova.cells import opts as cells_opts
from nova.cells import rpcapi as cells_rpcapi
from nova import context
from nova import network
from nova import objects
from nova.objects import keypair as keypair_obj
from nova import utils
from nova.virt import netutils
metadata_opts = [
cfg.StrOpt('config_drive_skip_versions',
default=('1.0 2007-01-19 2007-03-01 2007-08-29 2007-10-10 '
'2007-12-15 2008-02-01 2008-09-01'),
help='List of metadata versions to skip placing into the '
'config drive'),
cfg.StrOpt('vendordata_driver',
default='nova.api.metadata.vendordata_json.JsonFileVendorData',
help='Driver to use for vendor data'),
]
CONF = cfg.CONF
CONF.register_opts(metadata_opts)
CONF.import_opt('dhcp_domain', 'nova.network.manager')
VERSIONS = [
'1.0',
'2007-01-19',
'2007-03-01',
'2007-08-29',
'2007-10-10',
'2007-12-15',
'2008-02-01',
'2008-09-01',
'2009-04-04',
]
FOLSOM = '2012-08-10'
GRIZZLY = '2013-04-04'
HAVANA = '2013-10-17'
LIBERTY = '2015-10-15'
OPENSTACK_VERSIONS = [
FOLSOM,
GRIZZLY,
HAVANA,
LIBERTY,
]
VERSION = "version"
CONTENT = "content"
CONTENT_DIR = "content"
MD_JSON_NAME = "meta_data.json"
VD_JSON_NAME = "vendor_data.json"
NW_JSON_NAME = "network_data.json"
UD_NAME = "user_data"
PASS_NAME = "password"
MIME_TYPE_TEXT_PLAIN = "text/plain"
MIME_TYPE_APPLICATION_JSON = "application/json"
LOG = logging.getLogger(__name__)
class InvalidMetadataVersion(Exception):
pass
class InvalidMetadataPath(Exception):
pass
class InstanceMetadata(object):
"""Instance metadata."""
def __init__(self, instance, address=None, content=None, extra_md=None,
network_info=None, vd_driver=None, network_metadata=None):
"""Creation of this object should basically cover all time consuming
collection. Methods after that should not cause time delays due to
network operations or lengthy cpu operations.
The user should then get a single instance and make multiple method
calls on it.
"""
if not content:
content = []
ctxt = context.get_admin_context()
# The default value of mimeType is set to MIME_TYPE_TEXT_PLAIN
self.set_mimetype(MIME_TYPE_TEXT_PLAIN)
self.instance = instance
self.extra_md = extra_md
self.availability_zone = az.get_instance_availability_zone(ctxt,
instance)
self.security_groups = objects.SecurityGroupList.get_by_instance(
ctxt, instance)
self.mappings = _format_instance_mapping(ctxt, instance)
if instance.user_data is not None:
self.userdata_raw = base64.b64decode(instance.user_data)
else:
self.userdata_raw = None
self.address = address
# expose instance metadata.
self.launch_metadata = utils.instance_meta(instance)
self.password = password.extract_password(instance)
self.uuid = instance.uuid
self.content = {}
self.files = []
# get network info, and the rendered network template
if network_info is None:
network_info = instance.info_cache.network_info
# expose network metadata
if network_metadata is None:
self.network_metadata = netutils.get_network_metadata(network_info)
else:
self.network_metadata = network_metadata
self.ip_info = \
ec2utils.get_ip_info_for_instance_from_nw_info(network_info)
self.network_config = None
cfg = netutils.get_injected_network_template(network_info)
if cfg:
key = "%04i" % len(self.content)
self.content[key] = cfg
self.network_config = {"name": "network_config",
'content_path': "/%s/%s" % (CONTENT_DIR, key)}
# 'content' is passed in from the configdrive code in
# nova/virt/libvirt/driver.py. That's how we get the injected files
# (personalities) in. AFAIK they're not stored in the db at all,
# so are not available later (web service metadata time).
for (path, contents) in content:
key = "%04i" % len(self.content)
self.files.append({'path': path,
'content_path': "/%s/%s" % (CONTENT_DIR, key)})
self.content[key] = contents
if vd_driver is None:
vdclass = importutils.import_class(CONF.vendordata_driver)
else:
vdclass = vd_driver
self.vddriver = vdclass(instance=instance, address=address,
extra_md=extra_md, network_info=network_info)
self.route_configuration = None
def _route_configuration(self):
if self.route_configuration:
return self.route_configuration
path_handlers = {UD_NAME: self._user_data,
PASS_NAME: self._password,
VD_JSON_NAME: self._vendor_data,
MD_JSON_NAME: self._metadata_as_json,
NW_JSON_NAME: self._network_data,
VERSION: self._handle_version,
CONTENT: self._handle_content}
self.route_configuration = RouteConfiguration(path_handlers)
return self.route_configuration
def set_mimetype(self, mime_type):
self.md_mimetype = mime_type
def get_mimetype(self):
return self.md_mimetype
def get_ec2_metadata(self, version):
if version == "latest":
version = VERSIONS[-1]
if version not in VERSIONS:
raise InvalidMetadataVersion(version)
hostname = self._get_hostname()
floating_ips = self.ip_info['floating_ips']
floating_ip = floating_ips and floating_ips[0] or ''
fixed_ips = self.ip_info['fixed_ips']
fixed_ip = fixed_ips and fixed_ips[0] or ''
fmt_sgroups = [x['name'] for x in self.security_groups]
meta_data = {
'ami-id': self.instance.ec2_ids.ami_id,
'ami-launch-index': self.instance.launch_index,
'ami-manifest-path': 'FIXME',
'instance-id': self.instance.ec2_ids.instance_id,
'hostname': hostname,
'local-ipv4': fixed_ip or self.address,
'reservation-id': self.instance.reservation_id,
'security-groups': fmt_sgroups}
# public keys are strangely rendered in ec2 metadata service
# meta-data/public-keys/ returns '0=keyname' (with no trailing /)
# and only if there is a public key given.
# '0=keyname' means there is a normally rendered dict at
# meta-data/public-keys/0
#
# meta-data/public-keys/ : '0=%s' % keyname
# meta-data/public-keys/0/ : 'openssh-key'
# meta-data/public-keys/0/openssh-key : '%s' % publickey
if self.instance.key_name:
meta_data['public-keys'] = {
'0': {'_name': "0=" + self.instance.key_name,
'openssh-key': self.instance.key_data}}
if self._check_version('2007-01-19', version):
meta_data['local-hostname'] = hostname
meta_data['public-hostname'] = hostname
meta_data['public-ipv4'] = floating_ip
if False and self._check_version('2007-03-01', version):
# TODO(vish): store product codes
meta_data['product-codes'] = []
if self._check_version('2007-08-29', version):
instance_type = self.instance.get_flavor()
meta_data['instance-type'] = instance_type['name']
if False and self._check_version('2007-10-10', version):
# TODO(vish): store ancestor ids
meta_data['ancestor-ami-ids'] = []
if self._check_version('2007-12-15', version):
meta_data['block-device-mapping'] = self.mappings
if self.instance.ec2_ids.kernel_id:
meta_data['kernel-id'] = self.instance.ec2_ids.kernel_id
if self.instance.ec2_ids.ramdisk_id:
meta_data['ramdisk-id'] = self.instance.ec2_ids.ramdisk_id
if self._check_version('2008-02-01', version):
meta_data['placement'] = {'availability-zone':
self.availability_zone}
if self._check_version('2008-09-01', version):
meta_data['instance-action'] = 'none'
data = {'meta-data': meta_data}
if self.userdata_raw is not None:
data['user-data'] = self.userdata_raw
return data
def get_ec2_item(self, path_tokens):
# get_ec2_metadata returns dict without top level version
data = self.get_ec2_metadata(path_tokens[0])
return find_path_in_tree(data, path_tokens[1:])
def get_openstack_item(self, path_tokens):
if path_tokens[0] == CONTENT_DIR:
return self._handle_content(path_tokens)
return self._route_configuration().handle_path(path_tokens)
def _metadata_as_json(self, version, path):
metadata = {'uuid': self.uuid}
if self.launch_metadata:
metadata['meta'] = self.launch_metadata
if self.files:
metadata['files'] = self.files
if self.extra_md:
metadata.update(self.extra_md)
if self.network_config:
metadata['network_config'] = self.network_config
if self.instance.key_name:
metadata['public_keys'] = {
self.instance.key_name: self.instance.key_data
}
if cells_opts.get_cell_type() == 'compute':
cells_api = cells_rpcapi.CellsAPI()
keypair = cells_api.get_keypair_at_top(
context.get_admin_context(), self.instance.user_id,
self.instance.key_name)
else:
keypair = keypair_obj.KeyPair.get_by_name(
context.get_admin_context(), self.instance.user_id,
self.instance.key_name)
metadata['keys'] = [
{'name': keypair.name,
'type': keypair.type,
'data': keypair.public_key}
]
metadata['hostname'] = self._get_hostname()
metadata['name'] = self.instance.display_name
metadata['launch_index'] = self.instance.launch_index
metadata['availability_zone'] = self.availability_zone
if self._check_os_version(GRIZZLY, version):
metadata['random_seed'] = base64.b64encode(os.urandom(512))
if self._check_os_version(LIBERTY, version):
metadata['project_id'] = self.instance.project_id
self.set_mimetype(MIME_TYPE_APPLICATION_JSON)
return jsonutils.dumps(metadata)
def _handle_content(self, path_tokens):
if len(path_tokens) == 1:
raise KeyError("no listing for %s" % "/".join(path_tokens))
if len(path_tokens) != 2:
raise KeyError("Too many tokens for /%s" % CONTENT_DIR)
return self.content[path_tokens[1]]
def _handle_version(self, version, path):
# request for /version, give a list of what is available
ret = [MD_JSON_NAME]
if self.userdata_raw is not None:
ret.append(UD_NAME)
if self._check_os_version(GRIZZLY, version):
ret.append(PASS_NAME)
if self._check_os_version(HAVANA, version):
ret.append(VD_JSON_NAME)
if self._check_os_version(LIBERTY, version):
ret.append(NW_JSON_NAME)
return ret
def _user_data(self, version, path):
if self.userdata_raw is None:
raise KeyError(path)
return self.userdata_raw
def _network_data(self, version, path):
if self.network_metadata is None:
return jsonutils.dumps({})
return jsonutils.dumps(self.network_metadata)
def _password(self, version, path):
if self._check_os_version(GRIZZLY, version):
return password.handle_password
raise KeyError(path)
def _vendor_data(self, version, path):
if self._check_os_version(HAVANA, version):
self.set_mimetype(MIME_TYPE_APPLICATION_JSON)
return jsonutils.dumps(self.vddriver.get())
raise KeyError(path)
def _check_version(self, required, requested, versions=VERSIONS):
return versions.index(requested) >= versions.index(required)
def _check_os_version(self, required, requested):
return self._check_version(required, requested, OPENSTACK_VERSIONS)
def _get_hostname(self):
return "%s%s%s" % (self.instance.hostname,
'.' if CONF.dhcp_domain else '',
CONF.dhcp_domain)
def lookup(self, path):
if path == "" or path[0] != "/":
path = posixpath.normpath("/" + path)
else:
path = posixpath.normpath(path)
# Set default mimeType. It will be modified only if there is a change
self.set_mimetype(MIME_TYPE_TEXT_PLAIN)
# fix up requests, prepending /ec2 to anything that does not match
path_tokens = path.split('/')[1:]
if path_tokens[0] not in ("ec2", "openstack"):
if path_tokens[0] == "":
# request for /
path_tokens = ["ec2"]
else:
path_tokens = ["ec2"] + path_tokens
path = "/" + "/".join(path_tokens)
# all values of 'path' input starts with '/' and have no trailing /
# specifically handle the top level request
if len(path_tokens) == 1:
if path_tokens[0] == "openstack":
# NOTE(vish): don't show versions that are in the future
today = timeutils.utcnow().strftime("%Y-%m-%d")
versions = [v for v in OPENSTACK_VERSIONS if v <= today]
if OPENSTACK_VERSIONS != versions:
LOG.debug("future versions %s hidden in version list",
[v for v in OPENSTACK_VERSIONS
if v not in versions])
versions += ["latest"]
else:
versions = VERSIONS + ["latest"]
return versions
try:
if path_tokens[0] == "openstack":
data = self.get_openstack_item(path_tokens[1:])
else:
data = self.get_ec2_item(path_tokens[1:])
except (InvalidMetadataVersion, KeyError):
raise InvalidMetadataPath(path)
return data
def metadata_for_config_drive(self):
"""Yields (path, value) tuples for metadata elements."""
# EC2 style metadata
for version in VERSIONS + ["latest"]:
if version in CONF.config_drive_skip_versions.split(' '):
continue
data = self.get_ec2_metadata(version)
if 'user-data' in data:
filepath = os.path.join('ec2', version, 'user-data')
yield (filepath, data['user-data'])
del data['user-data']
try:
del data['public-keys']['0']['_name']
except KeyError:
pass
filepath = os.path.join('ec2', version, 'meta-data.json')
yield (filepath, jsonutils.dumps(data['meta-data']))
ALL_OPENSTACK_VERSIONS = OPENSTACK_VERSIONS + ["latest"]
for version in ALL_OPENSTACK_VERSIONS:
path = 'openstack/%s/%s' % (version, MD_JSON_NAME)
yield (path, self.lookup(path))
path = 'openstack/%s/%s' % (version, UD_NAME)
if self.userdata_raw is not None:
yield (path, self.lookup(path))
if self._check_version(HAVANA, version, ALL_OPENSTACK_VERSIONS):
path = 'openstack/%s/%s' % (version, VD_JSON_NAME)
yield (path, self.lookup(path))
for (cid, content) in six.iteritems(self.content):
if self._check_version(LIBERTY, version, ALL_OPENSTACK_VERSIONS):
path = 'openstack/%s/%s' % (version, NW_JSON_NAME)
yield (path, self.lookup(path))
yield ('%s/%s/%s' % ("openstack", CONTENT_DIR, cid), content)
class RouteConfiguration(object):
"""Routes metadata paths to request handlers."""
def __init__(self, path_handler):
self.path_handlers = path_handler
def _version(self, version):
if version == "latest":
version = OPENSTACK_VERSIONS[-1]
if version not in OPENSTACK_VERSIONS:
raise InvalidMetadataVersion(version)
return version
def handle_path(self, path_tokens):
version = self._version(path_tokens[0])
if len(path_tokens) == 1:
path = VERSION
else:
path = '/'.join(path_tokens[1:])
path_handler = self.path_handlers[path]
if path_handler is None:
raise KeyError(path)
return path_handler(version, path)
class VendorDataDriver(object):
"""The base VendorData Drivers should inherit from."""
def __init__(self, *args, **kwargs):
"""Init method should do all expensive operations."""
self._data = {}
def get(self):
"""Return a dictionary of primitives to be rendered in metadata
:return: A dictionary or primitives.
"""
return self._data
def get_metadata_by_address(address):
ctxt = context.get_admin_context()
fixed_ip = network.API().get_fixed_ip_by_address(ctxt, address)
return get_metadata_by_instance_id(fixed_ip['instance_uuid'],
address,
ctxt)
def get_metadata_by_instance_id(instance_id, address, ctxt=None):
ctxt = ctxt or context.get_admin_context()
instance = objects.Instance.get_by_uuid(
ctxt, instance_id, expected_attrs=['ec2_ids', 'flavor', 'info_cache'])
return InstanceMetadata(instance, address)
def _format_instance_mapping(ctxt, instance):
bdms = objects.BlockDeviceMappingList.get_by_instance_uuid(
ctxt, instance.uuid)
return block_device.instance_block_mapping(instance, bdms)
def ec2_md_print(data):
if isinstance(data, dict):
output = ''
for key in sorted(data.keys()):
if key == '_name':
continue
if isinstance(data[key], dict):
if '_name' in data[key]:
output += str(data[key]['_name'])
else:
output += key + '/'
else:
output += key
output += '\n'
return output[:-1]
elif isinstance(data, list):
return '\n'.join(data)
else:
return str(data)
def find_path_in_tree(data, path_tokens):
# given a dict/list tree, and a path in that tree, return data found there.
for i in range(0, len(path_tokens)):
if isinstance(data, dict) or isinstance(data, list):
if path_tokens[i] in data:
data = data[path_tokens[i]]
else:
raise KeyError("/".join(path_tokens[0:i]))
else:
if i != len(path_tokens) - 1:
raise KeyError("/".join(path_tokens[0:i]))
data = data[path_tokens[i]]
return data
| apache-2.0 |
bryanl/doit | vendor/github.com/ugorji/go/codec/test.py | 1516 | 4019 | #!/usr/bin/env python
# This will create golden files in a directory passed to it.
# A Test calls this internally to create the golden files
# So it can process them (so we don't have to checkin the files).
# Ensure msgpack-python and cbor are installed first, using:
# sudo apt-get install python-dev
# sudo apt-get install python-pip
# pip install --user msgpack-python msgpack-rpc-python cbor
# Ensure all "string" keys are utf strings (else encoded as bytes)
import cbor, msgpack, msgpackrpc, sys, os, threading
def get_test_data_list():
# get list with all primitive types, and a combo type
l0 = [
-8,
-1616,
-32323232,
-6464646464646464,
192,
1616,
32323232,
6464646464646464,
192,
-3232.0,
-6464646464.0,
3232.0,
6464.0,
6464646464.0,
False,
True,
u"null",
None,
u"someday",
1328176922000002000,
u"",
-2206187877999998000,
u"bytestring",
270,
u"none",
-2013855847999995777,
#-6795364578871345152,
]
l1 = [
{ "true": True,
"false": False },
{ "true": u"True",
"false": False,
"uint16(1616)": 1616 },
{ "list": [1616, 32323232, True, -3232.0, {"TRUE":True, "FALSE":False}, [True, False] ],
"int32":32323232, "bool": True,
"LONG STRING": u"123456789012345678901234567890123456789012345678901234567890",
"SHORT STRING": u"1234567890" },
{ True: "true", 138: False, "false": 200 }
]
l = []
l.extend(l0)
l.append(l0)
l.append(1)
l.extend(l1)
return l
def build_test_data(destdir):
l = get_test_data_list()
for i in range(len(l)):
# packer = msgpack.Packer()
serialized = msgpack.dumps(l[i])
f = open(os.path.join(destdir, str(i) + '.msgpack.golden'), 'wb')
f.write(serialized)
f.close()
serialized = cbor.dumps(l[i])
f = open(os.path.join(destdir, str(i) + '.cbor.golden'), 'wb')
f.write(serialized)
f.close()
def doRpcServer(port, stopTimeSec):
class EchoHandler(object):
def Echo123(self, msg1, msg2, msg3):
return ("1:%s 2:%s 3:%s" % (msg1, msg2, msg3))
def EchoStruct(self, msg):
return ("%s" % msg)
addr = msgpackrpc.Address('localhost', port)
server = msgpackrpc.Server(EchoHandler())
server.listen(addr)
# run thread to stop it after stopTimeSec seconds if > 0
if stopTimeSec > 0:
def myStopRpcServer():
server.stop()
t = threading.Timer(stopTimeSec, myStopRpcServer)
t.start()
server.start()
def doRpcClientToPythonSvc(port):
address = msgpackrpc.Address('localhost', port)
client = msgpackrpc.Client(address, unpack_encoding='utf-8')
print client.call("Echo123", "A1", "B2", "C3")
print client.call("EchoStruct", {"A" :"Aa", "B":"Bb", "C":"Cc"})
def doRpcClientToGoSvc(port):
# print ">>>> port: ", port, " <<<<<"
address = msgpackrpc.Address('localhost', port)
client = msgpackrpc.Client(address, unpack_encoding='utf-8')
print client.call("TestRpcInt.Echo123", ["A1", "B2", "C3"])
print client.call("TestRpcInt.EchoStruct", {"A" :"Aa", "B":"Bb", "C":"Cc"})
def doMain(args):
if len(args) == 2 and args[0] == "testdata":
build_test_data(args[1])
elif len(args) == 3 and args[0] == "rpc-server":
doRpcServer(int(args[1]), int(args[2]))
elif len(args) == 2 and args[0] == "rpc-client-python-service":
doRpcClientToPythonSvc(int(args[1]))
elif len(args) == 2 and args[0] == "rpc-client-go-service":
doRpcClientToGoSvc(int(args[1]))
else:
print("Usage: test.py " +
"[testdata|rpc-server|rpc-client-python-service|rpc-client-go-service] ...")
if __name__ == "__main__":
doMain(sys.argv[1:])
| apache-2.0 |
percyfal/bokeh | examples/howto/layouts/words_and_plots.py | 3 | 3845 | from bokeh.sampledata.glucose import data
from bokeh.sampledata.iris import flowers
from bokeh.io import show, output_file
from bokeh.layouts import layout
from bokeh.models import ColumnDataSource, Paragraph, HoverTool, Div
from bokeh.plotting import figure
output_file("words_and_plots.html")
def text():
return Paragraph(text="""
Bacon ipsum dolor amet hamburger brisket prosciutto, pork ball tip andouille
sausage landjaeger filet mignon ribeye ground round. Jerky fatback cupim
landjaeger meatball pork loin corned beef, frankfurter short ribs short loin
bresaola capicola chuck kevin. Andouille biltong turkey, tail t-bone ribeye
short loin tongue prosciutto kielbasa short ribs boudin. Swine beef ribs
tri-tip filet mignon bresaola boudin beef meatball venison leberkas fatback
strip steak landjaeger drumstick prosciutto.
Bacon ipsum dolor amet hamburger brisket prosciutto, pork ball tip andouille
sausage landjaeger filet mignon ribeye ground round. Jerky fatback cupim
landjaeger meatball pork loin corned beef, frankfurter short ribs short loin
bresaola capicola chuck kevin. Andouille biltong turkey, tail t-bone ribeye
short loin tongue prosciutto kielbasa short ribs boudin. Swine beef ribs
tri-tip filet mignon bresaola boudin beef meatball venison leberkas fatback
strip steak landjaeger drumstick prosciutto.
""")
def scatter():
colormap = {'setosa': 'red', 'versicolor': 'green', 'virginica': 'blue'}
source = ColumnDataSource(flowers)
source.data['colors'] = [colormap[x] for x in flowers['species']]
s = figure(title = "Iris Morphology")
s.xaxis.axis_label = 'Petal Length'
s.yaxis.axis_label = 'Petal Width'
s.circle("petal_length", "petal_width", color="colors", source=source,
fill_alpha=0.2, size=10, legend="species")
# Lets move the legend off-canvas!
legend = s.legend[0]
legend.border_line_color = None
legend.orientation = 'horizontal'
legend.location = (0, 0)
s.above.append(legend)
return s
def hover_plot():
x = data.ix['2010-10-06'].index.to_series()
y = data.ix['2010-10-06']['glucose']
p = figure(
plot_width=800, plot_height=400, x_axis_type="datetime",
tools="", toolbar_location=None, title='Hover over points'
)
p.line(x, y, line_dash="4 4", line_width=1, color='gray')
cr = p.circle(
x, y, size=20, fill_color="grey", alpha=0.1, line_color=None,
hover_fill_color="firebrick", hover_alpha=0.5, hover_line_color=None
)
p.add_tools(HoverTool(tooltips=None, renderers=[cr], mode='hline'))
return p
def intro():
return Div(text="""
<h3>Welcome to Layout!</h3>
<p>Hopefully you'll see from the code, that the layout tries to get out of your way
and do the right thing. Of course, it might not always, so please report bugs as you
find them and attach them to the epic we're creating <a href="">here</a>.</p>
<p>This is an example of <code>scale_width</code> mode (happy to continue the conversations
about what to name the modes). In <code>scale_width</code> everything responds to the width
that's available to it. Plots alter their height to maintain their aspect ratio, and widgets
are allowed to grow as tall as they need to accomodate themselves. Often times widgets
stay the same height, but text is a good example of a widget that doesn't.</p>
<h4>I want to stress that this was all written in python. There is no templating or
use of <code>bokeh.embed</code>.</h4>
""")
l = layout(
[
[intro()],
[text(), scatter()],
[text()],
[hover_plot(), text()],
],
sizing_mode='scale_width'
)
show(l)
| bsd-3-clause |
kidburglar/youtube-dl | youtube_dl/extractor/amcnetworks.py | 23 | 4838 | # coding: utf-8
from __future__ import unicode_literals
from .theplatform import ThePlatformIE
from ..utils import (
int_or_none,
parse_age_limit,
try_get,
update_url_query,
)
class AMCNetworksIE(ThePlatformIE):
_VALID_URL = r'https?://(?:www\.)?(?:amc|bbcamerica|ifc|(?:we|sundance)tv)\.com/(?:movies|shows(?:/[^/]+)+)/(?P<id>[^/?#]+)'
_TESTS = [{
'url': 'http://www.ifc.com/shows/maron/season-04/episode-01/step-1',
'md5': '',
'info_dict': {
'id': 's3MX01Nl4vPH',
'ext': 'mp4',
'title': 'Maron - Season 4 - Step 1',
'description': 'In denial about his current situation, Marc is reluctantly convinced by his friends to enter rehab. Starring Marc Maron and Constance Zimmer.',
'age_limit': 17,
'upload_date': '20160505',
'timestamp': 1462468831,
'uploader': 'AMCN',
},
'params': {
# m3u8 download
'skip_download': True,
},
'skip': 'Requires TV provider accounts',
}, {
'url': 'http://www.bbcamerica.com/shows/the-hunt/full-episodes/season-1/episode-01-the-hardest-challenge',
'only_matching': True,
}, {
'url': 'http://www.amc.com/shows/preacher/full-episodes/season-01/episode-00/pilot',
'only_matching': True,
}, {
'url': 'http://www.wetv.com/shows/million-dollar-matchmaker/season-01/episode-06-the-dumped-dj-and-shallow-hal',
'only_matching': True,
}, {
'url': 'http://www.ifc.com/movies/chaos',
'only_matching': True,
}, {
'url': 'http://www.bbcamerica.com/shows/doctor-who/full-episodes/the-power-of-the-daleks/episode-01-episode-1-color-version',
'only_matching': True,
}, {
'url': 'http://www.wetv.com/shows/mama-june-from-not-to-hot/full-episode/season-01/thin-tervention',
'only_matching': True,
}, {
'url': 'http://www.wetv.com/shows/la-hair/videos/season-05/episode-09-episode-9-2/episode-9-sneak-peek-3',
'only_matching': True,
}, {
'url': 'https://www.sundancetv.com/shows/riviera/full-episodes/season-1/episode-01-episode-1',
'only_matching': True,
}]
def _real_extract(self, url):
display_id = self._match_id(url)
webpage = self._download_webpage(url, display_id)
query = {
'mbr': 'true',
'manifest': 'm3u',
}
media_url = self._search_regex(
r'window\.platformLinkURL\s*=\s*[\'"]([^\'"]+)',
webpage, 'media url')
theplatform_metadata = self._download_theplatform_metadata(self._search_regex(
r'link\.theplatform\.com/s/([^?]+)',
media_url, 'theplatform_path'), display_id)
info = self._parse_theplatform_metadata(theplatform_metadata)
video_id = theplatform_metadata['pid']
title = theplatform_metadata['title']
rating = try_get(
theplatform_metadata, lambda x: x['ratings'][0]['rating'])
auth_required = self._search_regex(
r'window\.authRequired\s*=\s*(true|false);',
webpage, 'auth required')
if auth_required == 'true':
requestor_id = self._search_regex(
r'window\.requestor_id\s*=\s*[\'"]([^\'"]+)',
webpage, 'requestor id')
resource = self._get_mvpd_resource(
requestor_id, title, video_id, rating)
query['auth'] = self._extract_mvpd_auth(
url, video_id, requestor_id, resource)
media_url = update_url_query(media_url, query)
formats, subtitles = self._extract_theplatform_smil(
media_url, video_id)
self._sort_formats(formats)
info.update({
'id': video_id,
'subtitles': subtitles,
'formats': formats,
'age_limit': parse_age_limit(parse_age_limit(rating)),
})
ns_keys = theplatform_metadata.get('$xmlns', {}).keys()
if ns_keys:
ns = list(ns_keys)[0]
series = theplatform_metadata.get(ns + '$show')
season_number = int_or_none(
theplatform_metadata.get(ns + '$season'))
episode = theplatform_metadata.get(ns + '$episodeTitle')
episode_number = int_or_none(
theplatform_metadata.get(ns + '$episode'))
if season_number:
title = 'Season %d - %s' % (season_number, title)
if series:
title = '%s - %s' % (series, title)
info.update({
'title': title,
'series': series,
'season_number': season_number,
'episode': episode,
'episode_number': episode_number,
})
return info
| unlicense |
millosh/docker-tools | bootstrap-docker.py | 1 | 5698 | #!/usr/bin/python
import sys, os, time
import simplejson as json
from os.path import *
def exec_cmd(cmd,toprint,toexec):
if toprint:
print cmd
if toexec:
os.system(cmd)
def gm_argument(switch,argtype,default,message_prefix):
if switch in sys.argv:
switch_index = sys.argv.index(switch)
if argtype in [ 'negative', ]:
argument = False
else:
if len(sys.argv) > switch_index+1:
argument = sys.argv[switch_index+1]
else:
sys.exit(message_prefix + " not defined properly! Exiting.")
else:
if argtype in [ 'negative', ]:
argument = default
else:
if default:
argument = default
else:
sys.exit(message_prefix + " not defined and it's mandatory! Exiting.")
if argtype == "dir":
if not isdir(argument):
exec_cmd('mkdir -p "' + argument + '"',tp,te)
elif argtype == "file":
if not isfile(argument):
sys.exit(message_prefix + " does not exist! Exiting.")
return argument
def system_install():
# create dir
cmd = "mkdir -p " + odir
exec_cmd(cmd,tp,te)
# install the system
cmd = "debootstrap " + distros[distro][release]['bootstrap release'] + " " + odir
exec_cmd(cmd,tp,te)
def config_update():
# fix things inside of the chroot
cmd = "cp -a " + sysdir_all + "/* " + odir
exec_cmd(cmd,tp,te)
cmd = "cp -a " + sysdir_dist + "/* " + odir
exec_cmd(cmd,tp,te)
cmd = "chroot " + odir + " apt-get update"
exec_cmd(cmd,tp,te)
cmd = "chroot " + odir + " apt-get install -y locales"
exec_cmd(cmd,tp,te)
cmd = "echo 'en_US.UTF-8 UTF-8' > " + odir + "/etc/locale.gen"
exec_cmd(cmd,tp,te)
cmd = "chroot " + odir + " locale-gen"
exec_cmd(cmd,tp,te)
def update_software():
# update chroot
cmd = "chroot " + odir + " apt-get update"
exec_cmd(cmd,tp,te)
cmd = "chroot " + odir + " apt-get upgrade -y"
exec_cmd(cmd,tp,te)
cmd = "chroot " + odir + " apt-get autoremove -y"
exec_cmd(cmd,tp,te)
cmd = "chroot " + odir + " apt-get clean -y"
exec_cmd(cmd,tp,te)
def install_software():
if new_software != []:
cmd = "chroot " + odir + " apt-get update"
exec_cmd(cmd,tp,te)
cmd = "chroot " + odir + " apt-get install -y " + " ".join(new_software[:-1])
exec_cmd(cmd,tp,te)
cmd = "chroot " + odir + " apt-get autoremove -y"
exec_cmd(cmd,tp,te)
cmd = "chroot " + odir + " apt-get clean -y"
exec_cmd(cmd,tp,te)
def create_docker_image():
# create docker image
cmd = "tar -C " + odir + " -c . | docker import - " + fullname
exec_cmd(cmd,tp,te)
def push_to_cloud():
# tag it
cmd = "docker tag " + fullname + " " + distros[distro][release]['docker repository'] + ":" + full_tag
exec_cmd(cmd,tp,te)
# push to the cloud.docker
cmd = "docker push " + distros[distro][release]['docker repository'] + ":" + full_tag
exec_cmd(cmd,tp,te)
def run_docker():
# run docker
cmd = "docker run -d --name " + name + " " + fullname + " init"
exec_cmd(cmd,tp,te)
tp = gm_argument("--noprint","negative",True,"Someting is rotten in the state of Denmark! Variable 'tp'. Exiting.")
te = gm_argument("--noexec","negative",True,"Someting is rotten in the state of Denmark! Variable 'te'. Exiting.")
distro = gm_argument("--distro","string",False,"Distribution")
release = gm_argument("--release","string",False,"Release")
tag = gm_argument("--tag","string","default","Tag")
tag_suffix = time.strftime("%Y%m%d%H%M%S")
full_tag = tag + "-" + tag_suffix
simple_name = distro + '-' + release + '-' + tag
name = simple_name + '-' + tag_suffix
fullname = distro + '-' + release + ':' + full_tag
root_dir = gm_argument("--root","dir",".","Root directory")
chroot_dir = root_dir + "/chroots"
odir = chroot_dir + "/" + simple_name
sysdir = root_dir + "/system"
sysdir_all = sysdir + "/all"
sysdir_dist = sysdir + "/" + distro + "/" + release
distros_config = gm_argument("--distros-config","json file",1,"Distributions config")
if distros_config == 1:
distros = {
distro: {
release: {
"docker repository": False,
"bootstrap release": False,
},
},
}
else:
distros = json.loads(open(distros_config).read())
distros[distro][release]['docker repository'] = gm_argument("--docker-repository","string",distros[distro][release]['docker repository'],"Docker repository")
distros[distro][release]['bootstrap release'] = gm_argument("--bootstrap-release","string",distros[distro][release]['bootstrap release'],"Bootstrap release")
new_software_config = gm_argument("--software-config","json file",1,"Software config")
if new_software_config == 1:
new_software = []
else:
new_software = json.loads(open(new_software_config).read())
additional_software = gm_argument("--packages","comma-separated","0","Additional software").split(",")
try:
new_software.remove("0")
except ValueError:
pass
new_software += additional_software
if "--full-init" in sys.argv:
system_install()
config_update()
update_software()
install_software()
create_docker_image()
push_to_cloud()
run_docker()
elif "--init" in sys.argv:
system_install()
config_update()
update_software()
install_software()
create_docker_image()
run_docker()
elif "--full-update" in sys.argv:
update_software()
install_software()
create_docker_image()
push_to_cloud()
elif "--update" in sys.argv:
update_software()
install_software()
create_docker_image()
| agpl-3.0 |
openstack/murano | murano/tests/unit/db/services/environment_templates.py | 1 | 6144 | # Copyright (c) 2015 Telefonica I+D.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import fixtures
class EmptyEnvironmentFixture(fixtures.Fixture):
def setUp(self):
super(EmptyEnvironmentFixture, self).setUp()
self.env_desc = {
"tenant_id": "tenant_id",
"name": "my_environment",
"id": "template_id"
}
self.addCleanup(delattr, self, 'env_desc')
class EmptyEnvironmentTemplateFixture(fixtures.Fixture):
def setUp(self):
super(EmptyEnvironmentTemplateFixture, self).setUp()
self.environment_template_desc = {
"tenant_id": "tenant_id",
"name": "my_template",
"id": "template_id"
}
self.addCleanup(delattr, self, 'environment_template_desc')
class AppEnvTemplateFixture(fixtures.Fixture):
def setUp(self):
super(AppEnvTemplateFixture, self).setUp()
self.env_template_desc = \
{
"services": [
{
"instance": {
"assignFloatingIp": "true",
"keyname": "mykeyname",
"image": "cloud-fedora-v3",
"flavor": "m1.medium",
"?": {
"type": "io.murano.resources.LinuxInstance",
"id": "ef984a74-29a4-45c0-b1dc-2ab9f075732e"
}
},
"name": "orion",
"?":
{
"_26411a1861294160833743e45d0eaad9": {
"name": "tomcat"
},
"type": "io.murano.apps.apache.Tomcat",
"id": "tomcat_id"
},
"port": "8080"
}, {
"instance": "ef984a74-29a4-45c0-b1dc-2ab9f075732e",
"password": "XXX", "name":
"mysql",
"?": {
"_26411a1861294160833743e45d0eaad9": {
"name": "mysql"
},
"type": "io.murano.apps.database.MySQL",
"id": "54aaa43d-5970"
}
}
],
"tenant_id": "tenant_id",
"name": "template_name",
'id': 'template_id'
}
self.addCleanup(delattr, self, 'env_template_desc')
class ApplicationsFixture(fixtures.Fixture):
def setUp(self):
super(ApplicationsFixture, self).setUp()
self.applications_desc = [
{
"instance": {
"assignFloatingIp": "true",
"keyname": "mykeyname",
"image": "cloud-fedora-v3",
"flavor": "m1.medium",
"?": {
"type": "io.murano.resources.LinuxInstance",
"id": "ef984a74-29a4-45c0-b1dc-2ab9f075732e"
}
},
"name": "orion",
"?":
{
"_26411a1861294160833743e45d0eaad9": {
"name": "tomcat"
},
"type": "io.murano.apps.apache.Tomcat",
"id": "tomcat_id"
},
"port": "8080"
},
{
"instance": "ef984a74-29a4-45c0-b1dc-2ab9f075732e",
"password": "XXX", "name":
"mysql",
"?": {
"_26411a1861294160833743e45d0eaad9": {
"name": "mysql"
},
"type": "io.murano.apps.database.MySQL",
"id": "54aaa43d-5970"
}
}
]
self.addCleanup(delattr, self, 'applications_desc')
class ApplicationTomcatFixture(fixtures.Fixture):
def setUp(self):
super(ApplicationTomcatFixture, self).setUp()
self.application_tomcat_desc = {
"instance": {
"assignFloatingIp": "true",
"keyname": "mykeyname",
"image": "cloud-fedora-v3",
"flavor": "m1.medium",
"?": {
"type": "io.murano.resources.LinuxInstance",
"id": "ef984a74-29a4-45c0-b1dc-2ab9f075732e"
}
},
"name": "orion",
"?":
{
"_26411a1861294160833743e45d0eaad9": {
"name": "tomcat"
},
"type": "io.murano.apps.apache.Tomcat",
"id": "tomcat_id"
},
"port": "8080"
}
self.addCleanup(delattr, self, 'application_tomcat_desc')
class ApplicationMysqlFixture(fixtures.Fixture):
def setUp(self):
super(ApplicationMysqlFixture, self).setUp()
self.application_mysql_desc = {
"instance": "ef984a74-29a4-45c0-b1dc-2ab9f075732e",
"password": "XXX", "name":
"mysql",
"?": {
"_26411a1861294160833743e45d0eaad9": {
"name": "mysql"
},
"type": "io.murano.apps.database.MySQL",
"id": "54aaa43d-5970"
}
}
self.addCleanup(delattr, self, 'application_mysql_desc')
| apache-2.0 |
Alexander-M-Waldman/local_currency_site | lib/python2.7/site-packages/django/db/models/base.py | 24 | 71465 | from __future__ import unicode_literals
import copy
import inspect
import warnings
from itertools import chain
from django.apps import apps
from django.conf import settings
from django.core import checks
from django.core.exceptions import (
NON_FIELD_ERRORS, FieldDoesNotExist, FieldError, MultipleObjectsReturned,
ObjectDoesNotExist, ValidationError,
)
from django.db import (
DEFAULT_DB_ALIAS, DJANGO_VERSION_PICKLE_KEY, DatabaseError, connections,
router, transaction,
)
from django.db.models import signals
from django.db.models.constants import LOOKUP_SEP
from django.db.models.deletion import CASCADE, Collector
from django.db.models.fields import AutoField
from django.db.models.fields.related import (
ForeignObjectRel, ManyToOneRel, OneToOneField, lazy_related_operation,
resolve_relation,
)
from django.db.models.manager import ensure_default_manager
from django.db.models.options import Options
from django.db.models.query import Q
from django.db.models.query_utils import (
DeferredAttribute, deferred_class_factory,
)
from django.db.models.utils import make_model_tuple
from django.utils import six
from django.utils.encoding import force_str, force_text
from django.utils.functional import curry
from django.utils.six.moves import zip
from django.utils.text import capfirst, get_text_list
from django.utils.translation import ugettext_lazy as _
from django.utils.version import get_version
def subclass_exception(name, parents, module, attached_to=None):
"""
Create exception subclass. Used by ModelBase below.
If 'attached_to' is supplied, the exception will be created in a way that
allows it to be pickled, assuming the returned exception class will be added
as an attribute to the 'attached_to' class.
"""
class_dict = {'__module__': module}
if attached_to is not None:
def __reduce__(self):
# Exceptions are special - they've got state that isn't
# in self.__dict__. We assume it is all in self.args.
return (unpickle_inner_exception, (attached_to, name), self.args)
def __setstate__(self, args):
self.args = args
class_dict['__reduce__'] = __reduce__
class_dict['__setstate__'] = __setstate__
return type(name, parents, class_dict)
class ModelBase(type):
"""
Metaclass for all models.
"""
def __new__(cls, name, bases, attrs):
super_new = super(ModelBase, cls).__new__
# Also ensure initialization is only performed for subclasses of Model
# (excluding Model class itself).
parents = [b for b in bases if isinstance(b, ModelBase)]
if not parents:
return super_new(cls, name, bases, attrs)
# Create the class.
module = attrs.pop('__module__')
new_class = super_new(cls, name, bases, {'__module__': module})
attr_meta = attrs.pop('Meta', None)
abstract = getattr(attr_meta, 'abstract', False)
if not attr_meta:
meta = getattr(new_class, 'Meta', None)
else:
meta = attr_meta
base_meta = getattr(new_class, '_meta', None)
app_label = None
# Look for an application configuration to attach the model to.
app_config = apps.get_containing_app_config(module)
if getattr(meta, 'app_label', None) is None:
if app_config is None:
if not abstract:
raise RuntimeError(
"Model class %s.%s doesn't declare an explicit "
"app_label and isn't in an application in "
"INSTALLED_APPS." % (module, name)
)
else:
app_label = app_config.label
new_class.add_to_class('_meta', Options(meta, app_label))
if not abstract:
new_class.add_to_class(
'DoesNotExist',
subclass_exception(
str('DoesNotExist'),
tuple(
x.DoesNotExist for x in parents if hasattr(x, '_meta') and not x._meta.abstract
) or (ObjectDoesNotExist,),
module,
attached_to=new_class))
new_class.add_to_class(
'MultipleObjectsReturned',
subclass_exception(
str('MultipleObjectsReturned'),
tuple(
x.MultipleObjectsReturned for x in parents if hasattr(x, '_meta') and not x._meta.abstract
) or (MultipleObjectsReturned,),
module,
attached_to=new_class))
if base_meta and not base_meta.abstract:
# Non-abstract child classes inherit some attributes from their
# non-abstract parent (unless an ABC comes before it in the
# method resolution order).
if not hasattr(meta, 'ordering'):
new_class._meta.ordering = base_meta.ordering
if not hasattr(meta, 'get_latest_by'):
new_class._meta.get_latest_by = base_meta.get_latest_by
is_proxy = new_class._meta.proxy
# If the model is a proxy, ensure that the base class
# hasn't been swapped out.
if is_proxy and base_meta and base_meta.swapped:
raise TypeError("%s cannot proxy the swapped model '%s'." % (name, base_meta.swapped))
if getattr(new_class, '_default_manager', None):
if not is_proxy:
# Multi-table inheritance doesn't inherit default manager from
# parents.
new_class._default_manager = None
new_class._base_manager = None
else:
# Proxy classes do inherit parent's default manager, if none is
# set explicitly.
new_class._default_manager = new_class._default_manager._copy_to_model(new_class)
new_class._base_manager = new_class._base_manager._copy_to_model(new_class)
# Add all attributes to the class.
for obj_name, obj in attrs.items():
new_class.add_to_class(obj_name, obj)
# All the fields of any type declared on this model
new_fields = chain(
new_class._meta.local_fields,
new_class._meta.local_many_to_many,
new_class._meta.virtual_fields
)
field_names = {f.name for f in new_fields}
# Basic setup for proxy models.
if is_proxy:
base = None
for parent in [kls for kls in parents if hasattr(kls, '_meta')]:
if parent._meta.abstract:
if parent._meta.fields:
raise TypeError(
"Abstract base class containing model fields not "
"permitted for proxy model '%s'." % name
)
else:
continue
if base is not None:
raise TypeError("Proxy model '%s' has more than one non-abstract model base class." % name)
else:
base = parent
if base is None:
raise TypeError("Proxy model '%s' has no non-abstract model base class." % name)
new_class._meta.setup_proxy(base)
new_class._meta.concrete_model = base._meta.concrete_model
base._meta.concrete_model._meta.proxied_children.append(new_class._meta)
else:
new_class._meta.concrete_model = new_class
# Collect the parent links for multi-table inheritance.
parent_links = {}
for base in reversed([new_class] + parents):
# Conceptually equivalent to `if base is Model`.
if not hasattr(base, '_meta'):
continue
# Skip concrete parent classes.
if base != new_class and not base._meta.abstract:
continue
# Locate OneToOneField instances.
for field in base._meta.local_fields:
if isinstance(field, OneToOneField):
related = resolve_relation(new_class, field.remote_field.model)
parent_links[make_model_tuple(related)] = field
# Do the appropriate setup for any model parents.
for base in parents:
original_base = base
if not hasattr(base, '_meta'):
# Things without _meta aren't functional models, so they're
# uninteresting parents.
continue
parent_fields = base._meta.local_fields + base._meta.local_many_to_many
# Check for clashes between locally declared fields and those
# on the base classes (we cannot handle shadowed fields at the
# moment).
for field in parent_fields:
if field.name in field_names:
raise FieldError(
'Local field %r in class %r clashes '
'with field of similar name from '
'base class %r' % (field.name, name, base.__name__)
)
if not base._meta.abstract:
# Concrete classes...
base = base._meta.concrete_model
base_key = make_model_tuple(base)
if base_key in parent_links:
field = parent_links[base_key]
elif not is_proxy:
attr_name = '%s_ptr' % base._meta.model_name
field = OneToOneField(
base,
on_delete=CASCADE,
name=attr_name,
auto_created=True,
parent_link=True,
)
# Only add the ptr field if it's not already present;
# e.g. migrations will already have it specified
if not hasattr(new_class, attr_name):
new_class.add_to_class(attr_name, field)
else:
field = None
new_class._meta.parents[base] = field
else:
base_parents = base._meta.parents.copy()
# .. and abstract ones.
for field in parent_fields:
new_field = copy.deepcopy(field)
new_class.add_to_class(field.name, new_field)
# Replace parent links defined on this base by the new
# field as it will be appropriately resolved if required.
if field.one_to_one:
for parent, parent_link in base_parents.items():
if field == parent_link:
base_parents[parent] = new_field
# Pass any non-abstract parent classes onto child.
new_class._meta.parents.update(base_parents)
# Inherit managers from the abstract base classes.
new_class.copy_managers(base._meta.abstract_managers)
# Proxy models inherit the non-abstract managers from their base,
# unless they have redefined any of them.
if is_proxy:
new_class.copy_managers(original_base._meta.concrete_managers)
# Inherit virtual fields (like GenericForeignKey) from the parent
# class
for field in base._meta.virtual_fields:
if base._meta.abstract and field.name in field_names:
raise FieldError(
'Local field %r in class %r clashes '
'with field of similar name from '
'abstract base class %r' % (field.name, name, base.__name__)
)
new_class.add_to_class(field.name, copy.deepcopy(field))
if abstract:
# Abstract base models can't be instantiated and don't appear in
# the list of models for an app. We do the final setup for them a
# little differently from normal models.
attr_meta.abstract = False
new_class.Meta = attr_meta
return new_class
new_class._prepare()
new_class._meta.apps.register_model(new_class._meta.app_label, new_class)
return new_class
def copy_managers(cls, base_managers):
# This is in-place sorting of an Options attribute, but that's fine.
base_managers.sort()
for _, mgr_name, manager in base_managers: # NOQA (redefinition of _)
val = getattr(cls, mgr_name, None)
if not val or val is manager:
new_manager = manager._copy_to_model(cls)
cls.add_to_class(mgr_name, new_manager)
def add_to_class(cls, name, value):
# We should call the contribute_to_class method only if it's bound
if not inspect.isclass(value) and hasattr(value, 'contribute_to_class'):
value.contribute_to_class(cls, name)
else:
setattr(cls, name, value)
def _prepare(cls):
"""
Creates some methods once self._meta has been populated.
"""
opts = cls._meta
opts._prepare(cls)
if opts.order_with_respect_to:
cls.get_next_in_order = curry(cls._get_next_or_previous_in_order, is_next=True)
cls.get_previous_in_order = curry(cls._get_next_or_previous_in_order, is_next=False)
# Defer creating accessors on the foreign class until it has been
# created and registered. If remote_field is None, we're ordering
# with respect to a GenericForeignKey and don't know what the
# foreign class is - we'll add those accessors later in
# contribute_to_class().
if opts.order_with_respect_to.remote_field:
wrt = opts.order_with_respect_to
remote = wrt.remote_field.model
lazy_related_operation(make_foreign_order_accessors, cls, remote)
# Give the class a docstring -- its definition.
if cls.__doc__ is None:
cls.__doc__ = "%s(%s)" % (cls.__name__, ", ".join(f.name for f in opts.fields))
get_absolute_url_override = settings.ABSOLUTE_URL_OVERRIDES.get(opts.label_lower)
if get_absolute_url_override:
setattr(cls, 'get_absolute_url', get_absolute_url_override)
ensure_default_manager(cls)
signals.class_prepared.send(sender=cls)
class ModelState(object):
"""
A class for storing instance state
"""
def __init__(self, db=None):
self.db = db
# If true, uniqueness validation checks will consider this a new, as-yet-unsaved object.
# Necessary for correct validation of new instances of objects with explicit (non-auto) PKs.
# This impacts validation only; it has no effect on the actual save.
self.adding = True
class Model(six.with_metaclass(ModelBase)):
_deferred = False
def __init__(self, *args, **kwargs):
signals.pre_init.send(sender=self.__class__, args=args, kwargs=kwargs)
# Set up the storage for instance state
self._state = ModelState()
# There is a rather weird disparity here; if kwargs, it's set, then args
# overrides it. It should be one or the other; don't duplicate the work
# The reason for the kwargs check is that standard iterator passes in by
# args, and instantiation for iteration is 33% faster.
args_len = len(args)
if args_len > len(self._meta.concrete_fields):
# Daft, but matches old exception sans the err msg.
raise IndexError("Number of args exceeds number of fields")
if not kwargs:
fields_iter = iter(self._meta.concrete_fields)
# The ordering of the zip calls matter - zip throws StopIteration
# when an iter throws it. So if the first iter throws it, the second
# is *not* consumed. We rely on this, so don't change the order
# without changing the logic.
for val, field in zip(args, fields_iter):
setattr(self, field.attname, val)
else:
# Slower, kwargs-ready version.
fields_iter = iter(self._meta.fields)
for val, field in zip(args, fields_iter):
setattr(self, field.attname, val)
kwargs.pop(field.name, None)
# Maintain compatibility with existing calls.
if isinstance(field.remote_field, ManyToOneRel):
kwargs.pop(field.attname, None)
# Now we're left with the unprocessed fields that *must* come from
# keywords, or default.
for field in fields_iter:
is_related_object = False
# This slightly odd construct is so that we can access any
# data-descriptor object (DeferredAttribute) without triggering its
# __get__ method.
if (field.attname not in kwargs and
(isinstance(self.__class__.__dict__.get(field.attname), DeferredAttribute)
or field.column is None)):
# This field will be populated on request.
continue
if kwargs:
if isinstance(field.remote_field, ForeignObjectRel):
try:
# Assume object instance was passed in.
rel_obj = kwargs.pop(field.name)
is_related_object = True
except KeyError:
try:
# Object instance wasn't passed in -- must be an ID.
val = kwargs.pop(field.attname)
except KeyError:
val = field.get_default()
else:
# Object instance was passed in. Special case: You can
# pass in "None" for related objects if it's allowed.
if rel_obj is None and field.null:
val = None
else:
try:
val = kwargs.pop(field.attname)
except KeyError:
# This is done with an exception rather than the
# default argument on pop because we don't want
# get_default() to be evaluated, and then not used.
# Refs #12057.
val = field.get_default()
else:
val = field.get_default()
if is_related_object:
# If we are passed a related instance, set it using the
# field.name instead of field.attname (e.g. "user" instead of
# "user_id") so that the object gets properly cached (and type
# checked) by the RelatedObjectDescriptor.
setattr(self, field.name, rel_obj)
else:
setattr(self, field.attname, val)
if kwargs:
for prop in list(kwargs):
try:
if isinstance(getattr(self.__class__, prop), property):
setattr(self, prop, kwargs.pop(prop))
except AttributeError:
pass
if kwargs:
raise TypeError("'%s' is an invalid keyword argument for this function" % list(kwargs)[0])
super(Model, self).__init__()
signals.post_init.send(sender=self.__class__, instance=self)
@classmethod
def from_db(cls, db, field_names, values):
if cls._deferred:
new = cls(**dict(zip(field_names, values)))
else:
new = cls(*values)
new._state.adding = False
new._state.db = db
return new
def __repr__(self):
try:
u = six.text_type(self)
except (UnicodeEncodeError, UnicodeDecodeError):
u = '[Bad Unicode data]'
return force_str('<%s: %s>' % (self.__class__.__name__, u))
def __str__(self):
if six.PY2 and hasattr(self, '__unicode__'):
return force_text(self).encode('utf-8')
return str('%s object' % self.__class__.__name__)
def __eq__(self, other):
if not isinstance(other, Model):
return False
if self._meta.concrete_model != other._meta.concrete_model:
return False
my_pk = self._get_pk_val()
if my_pk is None:
return self is other
return my_pk == other._get_pk_val()
def __ne__(self, other):
return not self.__eq__(other)
def __hash__(self):
if self._get_pk_val() is None:
raise TypeError("Model instances without primary key value are unhashable")
return hash(self._get_pk_val())
def __reduce__(self):
"""
Provides pickling support. Normally, this just dispatches to Python's
standard handling. However, for models with deferred field loading, we
need to do things manually, as they're dynamically created classes and
only module-level classes can be pickled by the default path.
"""
data = self.__dict__
data[DJANGO_VERSION_PICKLE_KEY] = get_version()
if not self._deferred:
class_id = self._meta.app_label, self._meta.object_name
return model_unpickle, (class_id, [], simple_class_factory), data
defers = []
for field in self._meta.fields:
if isinstance(self.__class__.__dict__.get(field.attname),
DeferredAttribute):
defers.append(field.attname)
model = self._meta.proxy_for_model
class_id = model._meta.app_label, model._meta.object_name
return (model_unpickle, (class_id, defers, deferred_class_factory), data)
def __setstate__(self, state):
msg = None
pickled_version = state.get(DJANGO_VERSION_PICKLE_KEY)
if pickled_version:
current_version = get_version()
if current_version != pickled_version:
msg = ("Pickled model instance's Django version %s does"
" not match the current version %s."
% (pickled_version, current_version))
else:
msg = "Pickled model instance's Django version is not specified."
if msg:
warnings.warn(msg, RuntimeWarning, stacklevel=2)
self.__dict__.update(state)
def _get_pk_val(self, meta=None):
if not meta:
meta = self._meta
return getattr(self, meta.pk.attname)
def _set_pk_val(self, value):
return setattr(self, self._meta.pk.attname, value)
pk = property(_get_pk_val, _set_pk_val)
def get_deferred_fields(self):
"""
Returns a set containing names of deferred fields for this instance.
"""
return {
f.attname for f in self._meta.concrete_fields
if isinstance(self.__class__.__dict__.get(f.attname), DeferredAttribute)
}
def refresh_from_db(self, using=None, fields=None, **kwargs):
"""
Reloads field values from the database.
By default, the reloading happens from the database this instance was
loaded from, or by the read router if this instance wasn't loaded from
any database. The using parameter will override the default.
Fields can be used to specify which fields to reload. The fields
should be an iterable of field attnames. If fields is None, then
all non-deferred fields are reloaded.
When accessing deferred fields of an instance, the deferred loading
of the field will call this method.
"""
if fields is not None:
if len(fields) == 0:
return
if any(LOOKUP_SEP in f for f in fields):
raise ValueError(
'Found "%s" in fields argument. Relations and transforms '
'are not allowed in fields.' % LOOKUP_SEP)
db = using if using is not None else self._state.db
if self._deferred:
non_deferred_model = self._meta.proxy_for_model
else:
non_deferred_model = self.__class__
db_instance_qs = non_deferred_model._default_manager.using(db).filter(pk=self.pk)
# Use provided fields, if not set then reload all non-deferred fields.
if fields is not None:
fields = list(fields)
db_instance_qs = db_instance_qs.only(*fields)
elif self._deferred:
deferred_fields = self.get_deferred_fields()
fields = [f.attname for f in self._meta.concrete_fields
if f.attname not in deferred_fields]
db_instance_qs = db_instance_qs.only(*fields)
db_instance = db_instance_qs.get()
non_loaded_fields = db_instance.get_deferred_fields()
for field in self._meta.concrete_fields:
if field.attname in non_loaded_fields:
# This field wasn't refreshed - skip ahead.
continue
setattr(self, field.attname, getattr(db_instance, field.attname))
# Throw away stale foreign key references.
if field.is_relation and field.get_cache_name() in self.__dict__:
rel_instance = getattr(self, field.get_cache_name())
local_val = getattr(db_instance, field.attname)
related_val = None if rel_instance is None else getattr(rel_instance, field.target_field.attname)
if local_val != related_val or (local_val is None and related_val is None):
del self.__dict__[field.get_cache_name()]
self._state.db = db_instance._state.db
def serializable_value(self, field_name):
"""
Returns the value of the field name for this instance. If the field is
a foreign key, returns the id value, instead of the object. If there's
no Field object with this name on the model, the model attribute's
value is returned directly.
Used to serialize a field's value (in the serializer, or form output,
for example). Normally, you would just access the attribute directly
and not use this method.
"""
try:
field = self._meta.get_field(field_name)
except FieldDoesNotExist:
return getattr(self, field_name)
return getattr(self, field.attname)
def save(self, force_insert=False, force_update=False, using=None,
update_fields=None):
"""
Saves the current instance. Override this in a subclass if you want to
control the saving process.
The 'force_insert' and 'force_update' parameters can be used to insist
that the "save" must be an SQL insert or update (or equivalent for
non-SQL backends), respectively. Normally, they should not be set.
"""
# Ensure that a model instance without a PK hasn't been assigned to
# a ForeignKey or OneToOneField on this model. If the field is
# nullable, allowing the save() would result in silent data loss.
for field in self._meta.concrete_fields:
if field.is_relation:
# If the related field isn't cached, then an instance hasn't
# been assigned and there's no need to worry about this check.
try:
getattr(self, field.get_cache_name())
except AttributeError:
continue
obj = getattr(self, field.name, None)
# A pk may have been assigned manually to a model instance not
# saved to the database (or auto-generated in a case like
# UUIDField), but we allow the save to proceed and rely on the
# database to raise an IntegrityError if applicable. If
# constraints aren't supported by the database, there's the
# unavoidable risk of data corruption.
if obj and obj.pk is None:
# Remove the object from a related instance cache.
if not field.remote_field.multiple:
delattr(obj, field.remote_field.get_cache_name())
raise ValueError(
"save() prohibited to prevent data loss due to "
"unsaved related object '%s'." % field.name
)
using = using or router.db_for_write(self.__class__, instance=self)
if force_insert and (force_update or update_fields):
raise ValueError("Cannot force both insert and updating in model saving.")
if update_fields is not None:
# If update_fields is empty, skip the save. We do also check for
# no-op saves later on for inheritance cases. This bailout is
# still needed for skipping signal sending.
if len(update_fields) == 0:
return
update_fields = frozenset(update_fields)
field_names = set()
for field in self._meta.fields:
if not field.primary_key:
field_names.add(field.name)
if field.name != field.attname:
field_names.add(field.attname)
non_model_fields = update_fields.difference(field_names)
if non_model_fields:
raise ValueError("The following fields do not exist in this "
"model or are m2m fields: %s"
% ', '.join(non_model_fields))
# If saving to the same database, and this model is deferred, then
# automatically do a "update_fields" save on the loaded fields.
elif not force_insert and self._deferred and using == self._state.db:
field_names = set()
for field in self._meta.concrete_fields:
if not field.primary_key and not hasattr(field, 'through'):
field_names.add(field.attname)
deferred_fields = [
f.attname for f in self._meta.fields
if (f.attname not in self.__dict__ and
isinstance(self.__class__.__dict__[f.attname], DeferredAttribute))
]
loaded_fields = field_names.difference(deferred_fields)
if loaded_fields:
update_fields = frozenset(loaded_fields)
self.save_base(using=using, force_insert=force_insert,
force_update=force_update, update_fields=update_fields)
save.alters_data = True
def save_base(self, raw=False, force_insert=False,
force_update=False, using=None, update_fields=None):
"""
Handles the parts of saving which should be done only once per save,
yet need to be done in raw saves, too. This includes some sanity
checks and signal sending.
The 'raw' argument is telling save_base not to save any parent
models and not to do any changes to the values before save. This
is used by fixture loading.
"""
using = using or router.db_for_write(self.__class__, instance=self)
assert not (force_insert and (force_update or update_fields))
assert update_fields is None or len(update_fields) > 0
cls = origin = self.__class__
# Skip proxies, but keep the origin as the proxy model.
if cls._meta.proxy:
cls = cls._meta.concrete_model
meta = cls._meta
if not meta.auto_created:
signals.pre_save.send(sender=origin, instance=self, raw=raw, using=using,
update_fields=update_fields)
with transaction.atomic(using=using, savepoint=False):
if not raw:
self._save_parents(cls, using, update_fields)
updated = self._save_table(raw, cls, force_insert, force_update, using, update_fields)
# Store the database on which the object was saved
self._state.db = using
# Once saved, this is no longer a to-be-added instance.
self._state.adding = False
# Signal that the save is complete
if not meta.auto_created:
signals.post_save.send(sender=origin, instance=self, created=(not updated),
update_fields=update_fields, raw=raw, using=using)
save_base.alters_data = True
def _save_parents(self, cls, using, update_fields):
"""
Saves all the parents of cls using values from self.
"""
meta = cls._meta
for parent, field in meta.parents.items():
# Make sure the link fields are synced between parent and self.
if (field and getattr(self, parent._meta.pk.attname) is None
and getattr(self, field.attname) is not None):
setattr(self, parent._meta.pk.attname, getattr(self, field.attname))
self._save_parents(cls=parent, using=using, update_fields=update_fields)
self._save_table(cls=parent, using=using, update_fields=update_fields)
# Set the parent's PK value to self.
if field:
setattr(self, field.attname, self._get_pk_val(parent._meta))
# Since we didn't have an instance of the parent handy set
# attname directly, bypassing the descriptor. Invalidate
# the related object cache, in case it's been accidentally
# populated. A fresh instance will be re-built from the
# database if necessary.
cache_name = field.get_cache_name()
if hasattr(self, cache_name):
delattr(self, cache_name)
def _save_table(self, raw=False, cls=None, force_insert=False,
force_update=False, using=None, update_fields=None):
"""
Does the heavy-lifting involved in saving. Updates or inserts the data
for a single table.
"""
meta = cls._meta
non_pks = [f for f in meta.local_concrete_fields if not f.primary_key]
if update_fields:
non_pks = [f for f in non_pks
if f.name in update_fields or f.attname in update_fields]
pk_val = self._get_pk_val(meta)
if pk_val is None:
pk_val = meta.pk.get_pk_value_on_save(self)
setattr(self, meta.pk.attname, pk_val)
pk_set = pk_val is not None
if not pk_set and (force_update or update_fields):
raise ValueError("Cannot force an update in save() with no primary key.")
updated = False
# If possible, try an UPDATE. If that doesn't update anything, do an INSERT.
if pk_set and not force_insert:
base_qs = cls._base_manager.using(using)
values = [(f, None, (getattr(self, f.attname) if raw else f.pre_save(self, False)))
for f in non_pks]
forced_update = update_fields or force_update
updated = self._do_update(base_qs, using, pk_val, values, update_fields,
forced_update)
if force_update and not updated:
raise DatabaseError("Forced update did not affect any rows.")
if update_fields and not updated:
raise DatabaseError("Save with update_fields did not affect any rows.")
if not updated:
if meta.order_with_respect_to:
# If this is a model with an order_with_respect_to
# autopopulate the _order field
field = meta.order_with_respect_to
filter_args = field.get_filter_kwargs_for_object(self)
order_value = cls._base_manager.using(using).filter(**filter_args).count()
self._order = order_value
fields = meta.local_concrete_fields
if not pk_set:
fields = [f for f in fields if not isinstance(f, AutoField)]
update_pk = bool(meta.has_auto_field and not pk_set)
result = self._do_insert(cls._base_manager, using, fields, update_pk, raw)
if update_pk:
setattr(self, meta.pk.attname, result)
return updated
def _do_update(self, base_qs, using, pk_val, values, update_fields, forced_update):
"""
This method will try to update the model. If the model was updated (in
the sense that an update query was done and a matching row was found
from the DB) the method will return True.
"""
filtered = base_qs.filter(pk=pk_val)
if not values:
# We can end up here when saving a model in inheritance chain where
# update_fields doesn't target any field in current model. In that
# case we just say the update succeeded. Another case ending up here
# is a model with just PK - in that case check that the PK still
# exists.
return update_fields is not None or filtered.exists()
if self._meta.select_on_save and not forced_update:
if filtered.exists():
# It may happen that the object is deleted from the DB right after
# this check, causing the subsequent UPDATE to return zero matching
# rows. The same result can occur in some rare cases when the
# database returns zero despite the UPDATE being executed
# successfully (a row is matched and updated). In order to
# distinguish these two cases, the object's existence in the
# database is again checked for if the UPDATE query returns 0.
return filtered._update(values) > 0 or filtered.exists()
else:
return False
return filtered._update(values) > 0
def _do_insert(self, manager, using, fields, update_pk, raw):
"""
Do an INSERT. If update_pk is defined then this method should return
the new pk for the model.
"""
return manager._insert([self], fields=fields, return_id=update_pk,
using=using, raw=raw)
def delete(self, using=None, keep_parents=False):
using = using or router.db_for_write(self.__class__, instance=self)
assert self._get_pk_val() is not None, (
"%s object can't be deleted because its %s attribute is set to None." %
(self._meta.object_name, self._meta.pk.attname)
)
collector = Collector(using=using)
collector.collect([self], keep_parents=keep_parents)
return collector.delete()
delete.alters_data = True
def _get_FIELD_display(self, field):
value = getattr(self, field.attname)
return force_text(dict(field.flatchoices).get(value, value), strings_only=True)
def _get_next_or_previous_by_FIELD(self, field, is_next, **kwargs):
if not self.pk:
raise ValueError("get_next/get_previous cannot be used on unsaved objects.")
op = 'gt' if is_next else 'lt'
order = '' if is_next else '-'
param = force_text(getattr(self, field.attname))
q = Q(**{'%s__%s' % (field.name, op): param})
q = q | Q(**{field.name: param, 'pk__%s' % op: self.pk})
qs = self.__class__._default_manager.using(self._state.db).filter(**kwargs).filter(q).order_by(
'%s%s' % (order, field.name), '%spk' % order
)
try:
return qs[0]
except IndexError:
raise self.DoesNotExist("%s matching query does not exist." % self.__class__._meta.object_name)
def _get_next_or_previous_in_order(self, is_next):
cachename = "__%s_order_cache" % is_next
if not hasattr(self, cachename):
op = 'gt' if is_next else 'lt'
order = '_order' if is_next else '-_order'
order_field = self._meta.order_with_respect_to
filter_args = order_field.get_filter_kwargs_for_object(self)
obj = self._default_manager.filter(**filter_args).filter(**{
'_order__%s' % op: self._default_manager.values('_order').filter(**{
self._meta.pk.name: self.pk
})
}).order_by(order)[:1].get()
setattr(self, cachename, obj)
return getattr(self, cachename)
def prepare_database_save(self, field):
if self.pk is None:
raise ValueError("Unsaved model instance %r cannot be used in an ORM query." % self)
return getattr(self, field.remote_field.get_related_field().attname)
def clean(self):
"""
Hook for doing any extra model-wide validation after clean() has been
called on every field by self.clean_fields. Any ValidationError raised
by this method will not be associated with a particular field; it will
have a special-case association with the field defined by NON_FIELD_ERRORS.
"""
pass
def validate_unique(self, exclude=None):
"""
Checks unique constraints on the model and raises ``ValidationError``
if any failed.
"""
unique_checks, date_checks = self._get_unique_checks(exclude=exclude)
errors = self._perform_unique_checks(unique_checks)
date_errors = self._perform_date_checks(date_checks)
for k, v in date_errors.items():
errors.setdefault(k, []).extend(v)
if errors:
raise ValidationError(errors)
def _get_unique_checks(self, exclude=None):
"""
Gather a list of checks to perform. Since validate_unique could be
called from a ModelForm, some fields may have been excluded; we can't
perform a unique check on a model that is missing fields involved
in that check.
Fields that did not validate should also be excluded, but they need
to be passed in via the exclude argument.
"""
if exclude is None:
exclude = []
unique_checks = []
unique_togethers = [(self.__class__, self._meta.unique_together)]
for parent_class in self._meta.get_parent_list():
if parent_class._meta.unique_together:
unique_togethers.append((parent_class, parent_class._meta.unique_together))
for model_class, unique_together in unique_togethers:
for check in unique_together:
for name in check:
# If this is an excluded field, don't add this check.
if name in exclude:
break
else:
unique_checks.append((model_class, tuple(check)))
# These are checks for the unique_for_<date/year/month>.
date_checks = []
# Gather a list of checks for fields declared as unique and add them to
# the list of checks.
fields_with_class = [(self.__class__, self._meta.local_fields)]
for parent_class in self._meta.get_parent_list():
fields_with_class.append((parent_class, parent_class._meta.local_fields))
for model_class, fields in fields_with_class:
for f in fields:
name = f.name
if name in exclude:
continue
if f.unique:
unique_checks.append((model_class, (name,)))
if f.unique_for_date and f.unique_for_date not in exclude:
date_checks.append((model_class, 'date', name, f.unique_for_date))
if f.unique_for_year and f.unique_for_year not in exclude:
date_checks.append((model_class, 'year', name, f.unique_for_year))
if f.unique_for_month and f.unique_for_month not in exclude:
date_checks.append((model_class, 'month', name, f.unique_for_month))
return unique_checks, date_checks
def _perform_unique_checks(self, unique_checks):
errors = {}
for model_class, unique_check in unique_checks:
# Try to look up an existing object with the same values as this
# object's values for all the unique field.
lookup_kwargs = {}
for field_name in unique_check:
f = self._meta.get_field(field_name)
lookup_value = getattr(self, f.attname)
if lookup_value is None:
# no value, skip the lookup
continue
if f.primary_key and not self._state.adding:
# no need to check for unique primary key when editing
continue
lookup_kwargs[str(field_name)] = lookup_value
# some fields were skipped, no reason to do the check
if len(unique_check) != len(lookup_kwargs):
continue
qs = model_class._default_manager.filter(**lookup_kwargs)
# Exclude the current object from the query if we are editing an
# instance (as opposed to creating a new one)
# Note that we need to use the pk as defined by model_class, not
# self.pk. These can be different fields because model inheritance
# allows single model to have effectively multiple primary keys.
# Refs #17615.
model_class_pk = self._get_pk_val(model_class._meta)
if not self._state.adding and model_class_pk is not None:
qs = qs.exclude(pk=model_class_pk)
if qs.exists():
if len(unique_check) == 1:
key = unique_check[0]
else:
key = NON_FIELD_ERRORS
errors.setdefault(key, []).append(self.unique_error_message(model_class, unique_check))
return errors
def _perform_date_checks(self, date_checks):
errors = {}
for model_class, lookup_type, field, unique_for in date_checks:
lookup_kwargs = {}
# there's a ticket to add a date lookup, we can remove this special
# case if that makes it's way in
date = getattr(self, unique_for)
if date is None:
continue
if lookup_type == 'date':
lookup_kwargs['%s__day' % unique_for] = date.day
lookup_kwargs['%s__month' % unique_for] = date.month
lookup_kwargs['%s__year' % unique_for] = date.year
else:
lookup_kwargs['%s__%s' % (unique_for, lookup_type)] = getattr(date, lookup_type)
lookup_kwargs[field] = getattr(self, field)
qs = model_class._default_manager.filter(**lookup_kwargs)
# Exclude the current object from the query if we are editing an
# instance (as opposed to creating a new one)
if not self._state.adding and self.pk is not None:
qs = qs.exclude(pk=self.pk)
if qs.exists():
errors.setdefault(field, []).append(
self.date_error_message(lookup_type, field, unique_for)
)
return errors
def date_error_message(self, lookup_type, field_name, unique_for):
opts = self._meta
field = opts.get_field(field_name)
return ValidationError(
message=field.error_messages['unique_for_date'],
code='unique_for_date',
params={
'model': self,
'model_name': six.text_type(capfirst(opts.verbose_name)),
'lookup_type': lookup_type,
'field': field_name,
'field_label': six.text_type(capfirst(field.verbose_name)),
'date_field': unique_for,
'date_field_label': six.text_type(capfirst(opts.get_field(unique_for).verbose_name)),
}
)
def unique_error_message(self, model_class, unique_check):
opts = model_class._meta
params = {
'model': self,
'model_class': model_class,
'model_name': six.text_type(capfirst(opts.verbose_name)),
'unique_check': unique_check,
}
# A unique field
if len(unique_check) == 1:
field = opts.get_field(unique_check[0])
params['field_label'] = six.text_type(capfirst(field.verbose_name))
return ValidationError(
message=field.error_messages['unique'],
code='unique',
params=params,
)
# unique_together
else:
field_labels = [capfirst(opts.get_field(f).verbose_name) for f in unique_check]
params['field_labels'] = six.text_type(get_text_list(field_labels, _('and')))
return ValidationError(
message=_("%(model_name)s with this %(field_labels)s already exists."),
code='unique_together',
params=params,
)
def full_clean(self, exclude=None, validate_unique=True):
"""
Calls clean_fields, clean, and validate_unique, on the model,
and raises a ``ValidationError`` for any errors that occurred.
"""
errors = {}
if exclude is None:
exclude = []
else:
exclude = list(exclude)
try:
self.clean_fields(exclude=exclude)
except ValidationError as e:
errors = e.update_error_dict(errors)
# Form.clean() is run even if other validation fails, so do the
# same with Model.clean() for consistency.
try:
self.clean()
except ValidationError as e:
errors = e.update_error_dict(errors)
# Run unique checks, but only for fields that passed validation.
if validate_unique:
for name in errors.keys():
if name != NON_FIELD_ERRORS and name not in exclude:
exclude.append(name)
try:
self.validate_unique(exclude=exclude)
except ValidationError as e:
errors = e.update_error_dict(errors)
if errors:
raise ValidationError(errors)
def clean_fields(self, exclude=None):
"""
Cleans all fields and raises a ValidationError containing a dict
of all validation errors if any occur.
"""
if exclude is None:
exclude = []
errors = {}
for f in self._meta.fields:
if f.name in exclude:
continue
# Skip validation for empty fields with blank=True. The developer
# is responsible for making sure they have a valid value.
raw_value = getattr(self, f.attname)
if f.blank and raw_value in f.empty_values:
continue
try:
setattr(self, f.attname, f.clean(raw_value, self))
except ValidationError as e:
errors[f.name] = e.error_list
if errors:
raise ValidationError(errors)
@classmethod
def check(cls, **kwargs):
errors = []
errors.extend(cls._check_swappable())
errors.extend(cls._check_model())
errors.extend(cls._check_managers(**kwargs))
if not cls._meta.swapped:
errors.extend(cls._check_fields(**kwargs))
errors.extend(cls._check_m2m_through_same_relationship())
errors.extend(cls._check_long_column_names())
clash_errors = cls._check_id_field() + cls._check_field_name_clashes()
errors.extend(clash_errors)
# If there are field name clashes, hide consequent column name
# clashes.
if not clash_errors:
errors.extend(cls._check_column_name_clashes())
errors.extend(cls._check_index_together())
errors.extend(cls._check_unique_together())
errors.extend(cls._check_ordering())
return errors
@classmethod
def _check_swappable(cls):
""" Check if the swapped model exists. """
errors = []
if cls._meta.swapped:
try:
apps.get_model(cls._meta.swapped)
except ValueError:
errors.append(
checks.Error(
"'%s' is not of the form 'app_label.app_name'." % cls._meta.swappable,
hint=None,
obj=None,
id='models.E001',
)
)
except LookupError:
app_label, model_name = cls._meta.swapped.split('.')
errors.append(
checks.Error(
"'%s' references '%s.%s', which has not been "
"installed, or is abstract." % (
cls._meta.swappable, app_label, model_name
),
hint=None,
obj=None,
id='models.E002',
)
)
return errors
@classmethod
def _check_model(cls):
errors = []
if cls._meta.proxy:
if cls._meta.local_fields or cls._meta.local_many_to_many:
errors.append(
checks.Error(
"Proxy model '%s' contains model fields." % cls.__name__,
hint=None,
obj=None,
id='models.E017',
)
)
return errors
@classmethod
def _check_managers(cls, **kwargs):
""" Perform all manager checks. """
errors = []
for __, manager, __ in cls._meta.managers:
errors.extend(manager.check(**kwargs))
return errors
@classmethod
def _check_fields(cls, **kwargs):
""" Perform all field checks. """
errors = []
for field in cls._meta.local_fields:
errors.extend(field.check(**kwargs))
for field in cls._meta.local_many_to_many:
errors.extend(field.check(from_model=cls, **kwargs))
return errors
@classmethod
def _check_m2m_through_same_relationship(cls):
""" Check if no relationship model is used by more than one m2m field.
"""
errors = []
seen_intermediary_signatures = []
fields = cls._meta.local_many_to_many
# Skip when the target model wasn't found.
fields = (f for f in fields if isinstance(f.remote_field.model, ModelBase))
# Skip when the relationship model wasn't found.
fields = (f for f in fields if isinstance(f.remote_field.through, ModelBase))
for f in fields:
signature = (f.remote_field.model, cls, f.remote_field.through)
if signature in seen_intermediary_signatures:
errors.append(
checks.Error(
"The model has two many-to-many relations through "
"the intermediate model '%s'." % f.remote_field.through._meta.label,
hint=None,
obj=cls,
id='models.E003',
)
)
else:
seen_intermediary_signatures.append(signature)
return errors
@classmethod
def _check_id_field(cls):
""" Check if `id` field is a primary key. """
fields = list(f for f in cls._meta.local_fields
if f.name == 'id' and f != cls._meta.pk)
# fields is empty or consists of the invalid "id" field
if fields and not fields[0].primary_key and cls._meta.pk.name == 'id':
return [
checks.Error(
"'id' can only be used as a field name if the field also "
"sets 'primary_key=True'.",
hint=None,
obj=cls,
id='models.E004',
)
]
else:
return []
@classmethod
def _check_field_name_clashes(cls):
""" Ref #17673. """
errors = []
used_fields = {} # name or attname -> field
# Check that multi-inheritance doesn't cause field name shadowing.
for parent in cls._meta.get_parent_list():
for f in parent._meta.local_fields:
clash = used_fields.get(f.name) or used_fields.get(f.attname) or None
if clash:
errors.append(
checks.Error(
"The field '%s' from parent model "
"'%s' clashes with the field '%s' "
"from parent model '%s'." % (
clash.name, clash.model._meta,
f.name, f.model._meta
),
hint=None,
obj=cls,
id='models.E005',
)
)
used_fields[f.name] = f
used_fields[f.attname] = f
# Check that fields defined in the model don't clash with fields from
# parents, including auto-generated fields like multi-table inheritance
# child accessors.
for parent in cls._meta.get_parent_list():
for f in parent._meta.get_fields():
if f not in used_fields:
used_fields[f.name] = f
for f in cls._meta.local_fields:
clash = used_fields.get(f.name) or used_fields.get(f.attname) or None
# Note that we may detect clash between user-defined non-unique
# field "id" and automatically added unique field "id", both
# defined at the same model. This special case is considered in
# _check_id_field and here we ignore it.
id_conflict = (f.name == "id" and
clash and clash.name == "id" and clash.model == cls)
if clash and not id_conflict:
errors.append(
checks.Error(
"The field '%s' clashes with the field '%s' "
"from model '%s'." % (
f.name, clash.name, clash.model._meta
),
hint=None,
obj=f,
id='models.E006',
)
)
used_fields[f.name] = f
used_fields[f.attname] = f
return errors
@classmethod
def _check_column_name_clashes(cls):
# Store a list of column names which have already been used by other fields.
used_column_names = []
errors = []
for f in cls._meta.local_fields:
_, column_name = f.get_attname_column()
# Ensure the column name is not already in use.
if column_name and column_name in used_column_names:
errors.append(
checks.Error(
"Field '%s' has column name '%s' that is used by "
"another field." % (f.name, column_name),
hint="Specify a 'db_column' for the field.",
obj=cls,
id='models.E007'
)
)
else:
used_column_names.append(column_name)
return errors
@classmethod
def _check_index_together(cls):
""" Check the value of "index_together" option. """
if not isinstance(cls._meta.index_together, (tuple, list)):
return [
checks.Error(
"'index_together' must be a list or tuple.",
hint=None,
obj=cls,
id='models.E008',
)
]
elif any(not isinstance(fields, (tuple, list))
for fields in cls._meta.index_together):
return [
checks.Error(
"All 'index_together' elements must be lists or tuples.",
hint=None,
obj=cls,
id='models.E009',
)
]
else:
errors = []
for fields in cls._meta.index_together:
errors.extend(cls._check_local_fields(fields, "index_together"))
return errors
@classmethod
def _check_unique_together(cls):
""" Check the value of "unique_together" option. """
if not isinstance(cls._meta.unique_together, (tuple, list)):
return [
checks.Error(
"'unique_together' must be a list or tuple.",
hint=None,
obj=cls,
id='models.E010',
)
]
elif any(not isinstance(fields, (tuple, list))
for fields in cls._meta.unique_together):
return [
checks.Error(
"All 'unique_together' elements must be lists or tuples.",
hint=None,
obj=cls,
id='models.E011',
)
]
else:
errors = []
for fields in cls._meta.unique_together:
errors.extend(cls._check_local_fields(fields, "unique_together"))
return errors
@classmethod
def _check_local_fields(cls, fields, option):
from django.db import models
# In order to avoid hitting the relation tree prematurely, we use our
# own fields_map instead of using get_field()
forward_fields_map = {
field.name: field for field in cls._meta._get_fields(reverse=False)
}
errors = []
for field_name in fields:
try:
field = forward_fields_map[field_name]
except KeyError:
errors.append(
checks.Error(
"'%s' refers to the non-existent field '%s'." % (
option, field_name,
),
hint=None,
obj=cls,
id='models.E012',
)
)
else:
if isinstance(field.remote_field, models.ManyToManyRel):
errors.append(
checks.Error(
"'%s' refers to a ManyToManyField '%s', but "
"ManyToManyFields are not permitted in '%s'." % (
option, field_name, option,
),
hint=None,
obj=cls,
id='models.E013',
)
)
elif field not in cls._meta.local_fields:
errors.append(
checks.Error(
("'%s' refers to field '%s' which is not local "
"to model '%s'.") % (
option, field_name, cls._meta.object_name,
),
hint=("This issue may be caused by multi-table "
"inheritance."),
obj=cls,
id='models.E016',
)
)
return errors
@classmethod
def _check_ordering(cls):
""" Check "ordering" option -- is it a list of strings and do all fields
exist? """
if cls._meta._ordering_clash:
return [
checks.Error(
"'ordering' and 'order_with_respect_to' cannot be used together.",
hint=None,
obj=cls,
id='models.E021',
),
]
if cls._meta.order_with_respect_to or not cls._meta.ordering:
return []
if not isinstance(cls._meta.ordering, (list, tuple)):
return [
checks.Error(
("'ordering' must be a tuple or list "
"(even if you want to order by only one field)."),
hint=None,
obj=cls,
id='models.E014',
)
]
errors = []
fields = cls._meta.ordering
# Skip '?' fields.
fields = (f for f in fields if f != '?')
# Convert "-field" to "field".
fields = ((f[1:] if f.startswith('-') else f) for f in fields)
# Skip ordering in the format field1__field2 (FIXME: checking
# this format would be nice, but it's a little fiddly).
fields = (f for f in fields if '__' not in f)
# Skip ordering on pk. This is always a valid order_by field
# but is an alias and therefore won't be found by opts.get_field.
fields = {f for f in fields if f != 'pk'}
# Check for invalid or non-existent fields in ordering.
invalid_fields = []
# Any field name that is not present in field_names does not exist.
# Also, ordering by m2m fields is not allowed.
opts = cls._meta
valid_fields = set(chain.from_iterable(
(f.name, f.attname) if not (f.auto_created and not f.concrete) else (f.field.related_query_name(),)
for f in chain(opts.fields, opts.related_objects)
))
invalid_fields.extend(fields - valid_fields)
for invalid_field in invalid_fields:
errors.append(
checks.Error(
"'ordering' refers to the non-existent field '%s'." % invalid_field,
hint=None,
obj=cls,
id='models.E015',
)
)
return errors
@classmethod
def _check_long_column_names(cls):
"""
Check that any auto-generated column names are shorter than the limits
for each database in which the model will be created.
"""
errors = []
allowed_len = None
db_alias = None
# Find the minimum max allowed length among all specified db_aliases.
for db in settings.DATABASES.keys():
# skip databases where the model won't be created
if not router.allow_migrate_model(db, cls):
continue
connection = connections[db]
max_name_length = connection.ops.max_name_length()
if max_name_length is None or connection.features.truncates_names:
continue
else:
if allowed_len is None:
allowed_len = max_name_length
db_alias = db
elif max_name_length < allowed_len:
allowed_len = max_name_length
db_alias = db
if allowed_len is None:
return errors
for f in cls._meta.local_fields:
_, column_name = f.get_attname_column()
# Check if auto-generated name for the field is too long
# for the database.
if (f.db_column is None and column_name is not None
and len(column_name) > allowed_len):
errors.append(
checks.Error(
'Autogenerated column name too long for field "%s". '
'Maximum length is "%s" for database "%s".'
% (column_name, allowed_len, db_alias),
hint="Set the column name manually using 'db_column'.",
obj=cls,
id='models.E018',
)
)
for f in cls._meta.local_many_to_many:
# Check if auto-generated name for the M2M field is too long
# for the database.
for m2m in f.remote_field.through._meta.local_fields:
_, rel_name = m2m.get_attname_column()
if (m2m.db_column is None and rel_name is not None
and len(rel_name) > allowed_len):
errors.append(
checks.Error(
'Autogenerated column name too long for M2M field '
'"%s". Maximum length is "%s" for database "%s".'
% (rel_name, allowed_len, db_alias),
hint=("Use 'through' to create a separate model "
"for M2M and then set column_name using "
"'db_column'."),
obj=cls,
id='models.E019',
)
)
return errors
############################################
# HELPER FUNCTIONS (CURRIED MODEL METHODS) #
############################################
# ORDERING METHODS #########################
def method_set_order(ordered_obj, self, id_list, using=None):
if using is None:
using = DEFAULT_DB_ALIAS
order_wrt = ordered_obj._meta.order_with_respect_to
filter_args = order_wrt.get_forward_related_filter(self)
# FIXME: It would be nice if there was an "update many" version of update
# for situations like this.
with transaction.atomic(using=using, savepoint=False):
for i, j in enumerate(id_list):
ordered_obj.objects.filter(pk=j, **filter_args).update(_order=i)
def method_get_order(ordered_obj, self):
order_wrt = ordered_obj._meta.order_with_respect_to
filter_args = order_wrt.get_forward_related_filter(self)
pk_name = ordered_obj._meta.pk.name
return ordered_obj.objects.filter(**filter_args).values_list(pk_name, flat=True)
def make_foreign_order_accessors(model, related_model):
setattr(
related_model,
'get_%s_order' % model.__name__.lower(),
curry(method_get_order, model)
)
setattr(
related_model,
'set_%s_order' % model.__name__.lower(),
curry(method_set_order, model)
)
########
# MISC #
########
def simple_class_factory(model, attrs):
"""
Needed for dynamic classes.
"""
return model
def model_unpickle(model_id, attrs, factory):
"""
Used to unpickle Model subclasses with deferred fields.
"""
if isinstance(model_id, tuple):
if not apps.ready:
apps.populate(settings.INSTALLED_APPS)
model = apps.get_model(*model_id)
else:
# Backwards compat - the model was cached directly in earlier versions.
model = model_id
cls = factory(model, attrs)
return cls.__new__(cls)
model_unpickle.__safe_for_unpickle__ = True
def unpickle_inner_exception(klass, exception_name):
# Get the exception class from the class it is attached to:
exception = getattr(klass, exception_name)
return exception.__new__(exception)
| gpl-3.0 |
bwrsandman/OpenUpgrade | openerp/sql_db.py | 168 | 23783 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
# Copyright (C) 2010-2014 OpenERP s.a. (<http://openerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
"""
The PostgreSQL connector is a connectivity layer between the OpenERP code and
the database, *not* a database abstraction toolkit. Database abstraction is what
the ORM does, in fact.
"""
from contextlib import contextmanager
from functools import wraps
import logging
import urlparse
import uuid
import psycopg2.extras
import psycopg2.extensions
from psycopg2.extensions import ISOLATION_LEVEL_AUTOCOMMIT, ISOLATION_LEVEL_READ_COMMITTED, ISOLATION_LEVEL_REPEATABLE_READ
from psycopg2.pool import PoolError
psycopg2.extensions.register_type(psycopg2.extensions.UNICODE)
_logger = logging.getLogger(__name__)
types_mapping = {
'date': (1082,),
'time': (1083,),
'datetime': (1114,),
}
def unbuffer(symb, cr):
if symb is None:
return None
return str(symb)
def undecimalize(symb, cr):
if symb is None:
return None
return float(symb)
for name, typeoid in types_mapping.items():
psycopg2.extensions.register_type(psycopg2.extensions.new_type(typeoid, name, lambda x, cr: x))
psycopg2.extensions.register_type(psycopg2.extensions.new_type((700, 701, 1700,), 'float', undecimalize))
import tools
from tools.func import frame_codeinfo
from datetime import datetime as mdt
from datetime import timedelta
import threading
from inspect import currentframe
import re
re_from = re.compile('.* from "?([a-zA-Z_0-9]+)"? .*$')
re_into = re.compile('.* into "?([a-zA-Z_0-9]+)"? .*$')
sql_counter = 0
class Cursor(object):
"""Represents an open transaction to the PostgreSQL DB backend,
acting as a lightweight wrapper around psycopg2's
``cursor`` objects.
``Cursor`` is the object behind the ``cr`` variable used all
over the OpenERP code.
.. rubric:: Transaction Isolation
One very important property of database transactions is the
level of isolation between concurrent transactions.
The SQL standard defines four levels of transaction isolation,
ranging from the most strict *Serializable* level, to the least
strict *Read Uncommitted* level. These levels are defined in
terms of the phenomena that must not occur between concurrent
transactions, such as *dirty read*, etc.
In the context of a generic business data management software
such as OpenERP, we need the best guarantees that no data
corruption can ever be cause by simply running multiple
transactions in parallel. Therefore, the preferred level would
be the *serializable* level, which ensures that a set of
transactions is guaranteed to produce the same effect as
running them one at a time in some order.
However, most database management systems implement a limited
serializable isolation in the form of
`snapshot isolation <http://en.wikipedia.org/wiki/Snapshot_isolation>`_,
providing most of the same advantages as True Serializability,
with a fraction of the performance cost.
With PostgreSQL up to version 9.0, this snapshot isolation was
the implementation of both the ``REPEATABLE READ`` and
``SERIALIZABLE`` levels of the SQL standard.
As of PostgreSQL 9.1, the previous snapshot isolation implementation
was kept for ``REPEATABLE READ``, while a new ``SERIALIZABLE``
level was introduced, providing some additional heuristics to
detect a concurrent update by parallel transactions, and forcing
one of them to rollback.
OpenERP implements its own level of locking protection
for transactions that are highly likely to provoke concurrent
updates, such as stock reservations or document sequences updates.
Therefore we mostly care about the properties of snapshot isolation,
but we don't really need additional heuristics to trigger transaction
rollbacks, as we are taking care of triggering instant rollbacks
ourselves when it matters (and we can save the additional performance
hit of these heuristics).
As a result of the above, we have selected ``REPEATABLE READ`` as
the default transaction isolation level for OpenERP cursors, as
it will be mapped to the desired ``snapshot isolation`` level for
all supported PostgreSQL version (8.3 - 9.x).
Note: up to psycopg2 v.2.4.2, psycopg2 itself remapped the repeatable
read level to serializable before sending it to the database, so it would
actually select the new serializable mode on PostgreSQL 9.1. Make
sure you use psycopg2 v2.4.2 or newer if you use PostgreSQL 9.1 and
the performance hit is a concern for you.
.. attribute:: cache
Cache dictionary with a "request" (-ish) lifecycle, only lives as
long as the cursor itself does and proactively cleared when the
cursor is closed.
This cache should *only* be used to store repeatable reads as it
ignores rollbacks and savepoints, it should not be used to store
*any* data which may be modified during the life of the cursor.
"""
IN_MAX = 1000 # decent limit on size of IN queries - guideline = Oracle limit
def check(f):
@wraps(f)
def wrapper(self, *args, **kwargs):
if self._closed:
msg = 'Unable to use a closed cursor.'
if self.__closer:
msg += ' It was closed at %s, line %s' % self.__closer
raise psycopg2.OperationalError(msg)
return f(self, *args, **kwargs)
return wrapper
def __init__(self, pool, dbname, dsn, serialized=True):
self.sql_from_log = {}
self.sql_into_log = {}
# default log level determined at cursor creation, could be
# overridden later for debugging purposes
self.sql_log = _logger.isEnabledFor(logging.DEBUG)
self.sql_log_count = 0
# avoid the call of close() (by __del__) if an exception
# is raised by any of the following initialisations
self._closed = True
self.__pool = pool
self.dbname = dbname
# Whether to enable snapshot isolation level for this cursor.
# see also the docstring of Cursor.
self._serialized = serialized
self._cnx = pool.borrow(dsn)
self._obj = self._cnx.cursor()
if self.sql_log:
self.__caller = frame_codeinfo(currentframe(), 2)
else:
self.__caller = False
self._closed = False # real initialisation value
self.autocommit(False)
self.__closer = False
self._default_log_exceptions = True
self.cache = {}
def __build_dict(self, row):
return {d.name: row[i] for i, d in enumerate(self._obj.description)}
def dictfetchone(self):
row = self._obj.fetchone()
return row and self.__build_dict(row)
def dictfetchmany(self, size):
return map(self.__build_dict, self._obj.fetchmany(size))
def dictfetchall(self):
return map(self.__build_dict, self._obj.fetchall())
def __del__(self):
if not self._closed and not self._cnx.closed:
# Oops. 'self' has not been closed explicitly.
# The cursor will be deleted by the garbage collector,
# but the database connection is not put back into the connection
# pool, preventing some operation on the database like dropping it.
# This can also lead to a server overload.
msg = "Cursor not closed explicitly\n"
if self.__caller:
msg += "Cursor was created at %s:%s" % self.__caller
else:
msg += "Please enable sql debugging to trace the caller."
_logger.warning(msg)
self._close(True)
@check
def execute(self, query, params=None, log_exceptions=None):
if '%d' in query or '%f' in query:
_logger.warning(query)
_logger.warning("SQL queries cannot contain %d or %f anymore. Use only %s")
if params and not isinstance(params, (tuple, list, dict)):
_logger.error("SQL query parameters should be a tuple, list or dict; got %r", params)
raise ValueError("SQL query parameters should be a tuple, list or dict; got %r" % (params,))
if self.sql_log:
now = mdt.now()
try:
params = params or None
res = self._obj.execute(query, params)
except psycopg2.ProgrammingError, pe:
if self._default_log_exceptions if log_exceptions is None else log_exceptions:
_logger.error("Programming error: %s, in query %s", pe, query)
raise
except Exception:
if self._default_log_exceptions if log_exceptions is None else log_exceptions:
_logger.exception("bad query: %s", self._obj.query or query)
raise
# simple query count is always computed
self.sql_log_count += 1
# advanced stats only if sql_log is enabled
if self.sql_log:
delay = mdt.now() - now
delay = delay.seconds * 1E6 + delay.microseconds
_logger.debug("query: %s", self._obj.query)
res_from = re_from.match(query.lower())
if res_from:
self.sql_from_log.setdefault(res_from.group(1), [0, 0])
self.sql_from_log[res_from.group(1)][0] += 1
self.sql_from_log[res_from.group(1)][1] += delay
res_into = re_into.match(query.lower())
if res_into:
self.sql_into_log.setdefault(res_into.group(1), [0, 0])
self.sql_into_log[res_into.group(1)][0] += 1
self.sql_into_log[res_into.group(1)][1] += delay
return res
def split_for_in_conditions(self, ids):
"""Split a list of identifiers into one or more smaller tuples
safe for IN conditions, after uniquifying them."""
return tools.misc.split_every(self.IN_MAX, ids)
def print_log(self):
global sql_counter
if not self.sql_log:
return
def process(type):
sqllogs = {'from': self.sql_from_log, 'into': self.sql_into_log}
sum = 0
if sqllogs[type]:
sqllogitems = sqllogs[type].items()
sqllogitems.sort(key=lambda k: k[1][1])
_logger.debug("SQL LOG %s:", type)
sqllogitems.sort(lambda x, y: cmp(x[1][0], y[1][0]))
for r in sqllogitems:
delay = timedelta(microseconds=r[1][1])
_logger.debug("table: %s: %s/%s", r[0], delay, r[1][0])
sum += r[1][1]
sqllogs[type].clear()
sum = timedelta(microseconds=sum)
_logger.debug("SUM %s:%s/%d [%d]", type, sum, self.sql_log_count, sql_counter)
sqllogs[type].clear()
process('from')
process('into')
self.sql_log_count = 0
self.sql_log = False
@check
def close(self):
return self._close(False)
def _close(self, leak=False):
global sql_counter
if not self._obj:
return
del self.cache
if self.sql_log:
self.__closer = frame_codeinfo(currentframe(), 3)
# simple query count is always computed
sql_counter += self.sql_log_count
# advanced stats only if sql_log is enabled
self.print_log()
self._obj.close()
# This force the cursor to be freed, and thus, available again. It is
# important because otherwise we can overload the server very easily
# because of a cursor shortage (because cursors are not garbage
# collected as fast as they should). The problem is probably due in
# part because browse records keep a reference to the cursor.
del self._obj
self._closed = True
# Clean the underlying connection.
self._cnx.rollback()
if leak:
self._cnx.leaked = True
else:
chosen_template = tools.config['db_template']
templates_list = tuple(set(['template0', 'template1', 'postgres', chosen_template]))
keep_in_pool = self.dbname not in templates_list
self.__pool.give_back(self._cnx, keep_in_pool=keep_in_pool)
@check
def autocommit(self, on):
if on:
isolation_level = ISOLATION_LEVEL_AUTOCOMMIT
else:
# If a serializable cursor was requested, we
# use the appropriate PotsgreSQL isolation level
# that maps to snaphsot isolation.
# For all supported PostgreSQL versions (8.3-9.x),
# this is currently the ISOLATION_REPEATABLE_READ.
# See also the docstring of this class.
# NOTE: up to psycopg 2.4.2, repeatable read
# is remapped to serializable before being
# sent to the database, so it is in fact
# unavailable for use with pg 9.1.
isolation_level = \
ISOLATION_LEVEL_REPEATABLE_READ \
if self._serialized \
else ISOLATION_LEVEL_READ_COMMITTED
self._cnx.set_isolation_level(isolation_level)
@check
def commit(self):
""" Perform an SQL `COMMIT`
"""
return self._cnx.commit()
@check
def rollback(self):
""" Perform an SQL `ROLLBACK`
"""
return self._cnx.rollback()
def __enter__(self):
""" Using the cursor as a contextmanager automatically commits and
closes it::
with cr:
cr.execute(...)
# cr is committed if no failure occurred
# cr is closed in any case
"""
return self
def __exit__(self, exc_type, exc_value, traceback):
if exc_type is None:
self.commit()
self.close()
@contextmanager
@check
def savepoint(self):
"""context manager entering in a new savepoint"""
name = uuid.uuid1().hex
self.execute('SAVEPOINT "%s"' % name)
try:
yield
self.execute('RELEASE SAVEPOINT "%s"' % name)
except:
self.execute('ROLLBACK TO SAVEPOINT "%s"' % name)
raise
@check
def __getattr__(self, name):
return getattr(self._obj, name)
@property
def closed(self):
return self._closed
class TestCursor(Cursor):
""" A cursor to be used for tests. It keeps the transaction open across
several requests, and simulates committing, rolling back, and closing.
"""
def __init__(self, *args, **kwargs):
super(TestCursor, self).__init__(*args, **kwargs)
# in order to simulate commit and rollback, the cursor maintains a
# savepoint at its last commit
self.execute("SAVEPOINT test_cursor")
# we use a lock to serialize concurrent requests
self._lock = threading.RLock()
def acquire(self):
self._lock.acquire()
def release(self):
self._lock.release()
def force_close(self):
super(TestCursor, self).close()
def close(self):
if not self._closed:
self.rollback() # for stuff that has not been committed
self.release()
def autocommit(self, on):
_logger.debug("TestCursor.autocommit(%r) does nothing", on)
def commit(self):
self.execute("RELEASE SAVEPOINT test_cursor")
self.execute("SAVEPOINT test_cursor")
def rollback(self):
self.execute("ROLLBACK TO SAVEPOINT test_cursor")
self.execute("SAVEPOINT test_cursor")
class PsycoConnection(psycopg2.extensions.connection):
pass
class ConnectionPool(object):
""" The pool of connections to database(s)
Keep a set of connections to pg databases open, and reuse them
to open cursors for all transactions.
The connections are *not* automatically closed. Only a close_db()
can trigger that.
"""
def locked(fun):
@wraps(fun)
def _locked(self, *args, **kwargs):
self._lock.acquire()
try:
return fun(self, *args, **kwargs)
finally:
self._lock.release()
return _locked
def __init__(self, maxconn=64):
self._connections = []
self._maxconn = max(maxconn, 1)
self._lock = threading.Lock()
def __repr__(self):
used = len([1 for c, u in self._connections[:] if u])
count = len(self._connections)
return "ConnectionPool(used=%d/count=%d/max=%d)" % (used, count, self._maxconn)
def _debug(self, msg, *args):
_logger.debug(('%r ' + msg), self, *args)
@locked
def borrow(self, dsn):
# free dead and leaked connections
for i, (cnx, _) in tools.reverse_enumerate(self._connections):
if cnx.closed:
self._connections.pop(i)
self._debug('Removing closed connection at index %d: %r', i, cnx.dsn)
continue
if getattr(cnx, 'leaked', False):
delattr(cnx, 'leaked')
self._connections.pop(i)
self._connections.append((cnx, False))
_logger.warning('%r: Free leaked connection to %r', self, cnx.dsn)
for i, (cnx, used) in enumerate(self._connections):
if not used and cnx._original_dsn == dsn:
try:
cnx.reset()
except psycopg2.OperationalError:
self._debug('Cannot reset connection at index %d: %r', i, cnx.dsn)
# psycopg2 2.4.4 and earlier do not allow closing a closed connection
if not cnx.closed:
cnx.close()
continue
self._connections.pop(i)
self._connections.append((cnx, True))
self._debug('Borrow existing connection to %r at index %d', cnx.dsn, i)
return cnx
if len(self._connections) >= self._maxconn:
# try to remove the oldest connection not used
for i, (cnx, used) in enumerate(self._connections):
if not used:
self._connections.pop(i)
if not cnx.closed:
cnx.close()
self._debug('Removing old connection at index %d: %r', i, cnx.dsn)
break
else:
# note: this code is called only if the for loop has completed (no break)
raise PoolError('The Connection Pool Is Full')
try:
result = psycopg2.connect(dsn=dsn, connection_factory=PsycoConnection)
except psycopg2.Error:
_logger.exception('Connection to the database failed')
raise
result._original_dsn = dsn
self._connections.append((result, True))
self._debug('Create new connection')
return result
@locked
def give_back(self, connection, keep_in_pool=True):
self._debug('Give back connection to %r', connection.dsn)
for i, (cnx, used) in enumerate(self._connections):
if cnx is connection:
self._connections.pop(i)
if keep_in_pool:
self._connections.append((cnx, False))
self._debug('Put connection to %r in pool', cnx.dsn)
else:
self._debug('Forgot connection to %r', cnx.dsn)
cnx.close()
break
else:
raise PoolError('This connection does not below to the pool')
@locked
def close_all(self, dsn=None):
count = 0
last = None
for i, (cnx, used) in tools.reverse_enumerate(self._connections):
if dsn is None or cnx._original_dsn == dsn:
cnx.close()
last = self._connections.pop(i)[0]
count += 1
_logger.info('%r: Closed %d connections %s', self, count,
(dsn and last and 'to %r' % last.dsn) or '')
class Connection(object):
""" A lightweight instance of a connection to postgres
"""
def __init__(self, pool, dbname, dsn):
self.dbname = dbname
self.dsn = dsn
self.__pool = pool
def cursor(self, serialized=True):
cursor_type = serialized and 'serialized ' or ''
_logger.debug('create %scursor to %r', cursor_type, self.dsn)
return Cursor(self.__pool, self.dbname, self.dsn, serialized=serialized)
def test_cursor(self, serialized=True):
cursor_type = serialized and 'serialized ' or ''
_logger.debug('create test %scursor to %r', cursor_type, self.dsn)
return TestCursor(self.__pool, self.dbname, self.dsn, serialized=serialized)
# serialized_cursor is deprecated - cursors are serialized by default
serialized_cursor = cursor
def __nonzero__(self):
"""Check if connection is possible"""
try:
_logger.warning("__nonzero__() is deprecated. (It is too expensive to test a connection.)")
cr = self.cursor()
cr.close()
return True
except Exception:
return False
def dsn(db_or_uri):
"""parse the given `db_or_uri` and return a 2-tuple (dbname, uri)"""
if db_or_uri.startswith(('postgresql://', 'postgres://')):
# extract db from uri
us = urlparse.urlsplit(db_or_uri)
if len(us.path) > 1:
db_name = us.path[1:]
elif us.username:
db_name = us.username
else:
db_name = us.hostname
return db_name, db_or_uri
_dsn = ''
for p in ('host', 'port', 'user', 'password'):
cfg = tools.config['db_' + p]
if cfg:
_dsn += '%s=%s ' % (p, cfg)
return db_or_uri, '%sdbname=%s' % (_dsn, db_or_uri)
_Pool = None
def db_connect(to, allow_uri=False):
global _Pool
if _Pool is None:
_Pool = ConnectionPool(int(tools.config['db_maxconn']))
db, uri = dsn(to)
if not allow_uri and db != to:
raise ValueError('URI connections not allowed')
return Connection(_Pool, db, uri)
def close_db(db_name):
""" You might want to call openerp.modules.registry.RegistryManager.delete(db_name) along this function."""
global _Pool
if _Pool:
_Pool.close_all(dsn(db_name)[1])
def close_all():
global _Pool
if _Pool:
_Pool.close_all()
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
dermoth/gramps | gramps/gen/plug/menu/_number.py | 11 | 2809 | #
# Gramps - a GTK+/GNOME based genealogy program
#
# Copyright (C) 2007-2008 Brian G. Matherly
# Copyright (C) 2008 Gary Burton
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
"""
Option class representing a number.
"""
#-------------------------------------------------------------------------
#
# gramps modules
#
#-------------------------------------------------------------------------
from . import Option
#-------------------------------------------------------------------------
#
# NumberOption class
#
#-------------------------------------------------------------------------
class NumberOption(Option):
"""
This class describes an option that is a simple number with defined maximum
and minimum values.
"""
def __init__(self, label, value, min_val, max_val, step = 1):
"""
:param label: A friendly label to be applied to this option.
Example: "Number of generations to include"
:type label: string
:param value: An initial value for this option.
Example: 5
:type value: int
:param min: The minimum value for this option.
Example: 1
:type min: int
:param max: The maximum value for this option.
Example: 10
:type value: int
:param step: The step size for this option.
Example: 0.01
:type value: int or float
:return: nothing
"""
Option.__init__(self, label, value)
self.__min = min_val
self.__max = max_val
self.__step = step
def get_min(self):
"""
Get the minimum value for this option.
:return: an int that represents the minimum value for this option.
"""
return self.__min
def get_max(self):
"""
Get the maximum value for this option.
:return: an int that represents the maximum value for this option.
"""
return self.__max
def get_step(self):
"""
Get the step size for this option.
:return: an int that represents the step size for this option.
"""
return self.__step
| gpl-2.0 |
fevxie/odoo | openerp/tools/win32.py | 457 | 1993 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import locale
import time
import datetime
if not hasattr(locale, 'D_FMT'):
locale.D_FMT = 1
if not hasattr(locale, 'T_FMT'):
locale.T_FMT = 2
if not hasattr(locale, 'nl_langinfo'):
def nl_langinfo(param):
if param == locale.D_FMT:
val = time.strptime('30/12/2004', '%d/%m/%Y')
dt = datetime.datetime(*val[:-2])
format_date = dt.strftime('%x')
for x, y in [('30', '%d'),('12', '%m'),('2004','%Y'),('04', '%Y')]:
format_date = format_date.replace(x, y)
return format_date
if param == locale.T_FMT:
val = time.strptime('13:24:56', '%H:%M:%S')
dt = datetime.datetime(*val[:-2])
format_time = dt.strftime('%X')
for x, y in [('13', '%H'),('24', '%M'),('56','%S')]:
format_time = format_time.replace(x, y)
return format_time
locale.nl_langinfo = nl_langinfo
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
irisfeng/CodeScanner | SZQRCodeViewController/Pods/AVOSCloudCrashReporting/Breakpad/src/tools/gyp/test/subdirectory/gyptest-top-default.py | 261 | 1363 | #!/usr/bin/env python
# Copyright (c) 2009 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Verifies building a target and a subsidiary dependent target from a
.gyp file in a subdirectory, without specifying an explicit output build
directory, and using the generated solution or project file at the top
of the tree as the entry point.
There is a difference here in the default behavior of the underlying
build tools. Specifically, when building the entire "solution", Xcode
puts the output of each project relative to the .xcodeproj directory,
while Visual Studio (and our implementation of Make) put it
in a build directory relative to the "solution"--that is, the entry-point
from which you built the entire tree.
"""
import TestGyp
test = TestGyp.TestGyp()
test.run_gyp('prog1.gyp', chdir='src')
test.relocate('src', 'relocate/src')
test.build('prog1.gyp', chdir='relocate/src')
test.run_built_executable('prog1',
stdout="Hello from prog1.c\n",
chdir='relocate/src')
if test.format == 'xcode':
chdir = 'relocate/src/subdir'
else:
chdir = 'relocate/src'
test.run_built_executable('prog2',
chdir=chdir,
stdout="Hello from prog2.c\n")
test.pass_test()
| mit |
wetneb/django | django/contrib/gis/gdal/geomtype.py | 100 | 2979 | from django.contrib.gis.gdal.error import GDALException
from django.utils import six
class OGRGeomType(object):
"Encapulates OGR Geometry Types."
wkb25bit = -2147483648
# Dictionary of acceptable OGRwkbGeometryType s and their string names.
_types = {0: 'Unknown',
1: 'Point',
2: 'LineString',
3: 'Polygon',
4: 'MultiPoint',
5: 'MultiLineString',
6: 'MultiPolygon',
7: 'GeometryCollection',
100: 'None',
101: 'LinearRing',
1 + wkb25bit: 'Point25D',
2 + wkb25bit: 'LineString25D',
3 + wkb25bit: 'Polygon25D',
4 + wkb25bit: 'MultiPoint25D',
5 + wkb25bit: 'MultiLineString25D',
6 + wkb25bit: 'MultiPolygon25D',
7 + wkb25bit: 'GeometryCollection25D',
}
# Reverse type dictionary, keyed by lower-case of the name.
_str_types = {v.lower(): k for k, v in _types.items()}
def __init__(self, type_input):
"Figures out the correct OGR Type based upon the input."
if isinstance(type_input, OGRGeomType):
num = type_input.num
elif isinstance(type_input, six.string_types):
type_input = type_input.lower()
if type_input == 'geometry':
type_input = 'unknown'
num = self._str_types.get(type_input, None)
if num is None:
raise GDALException('Invalid OGR String Type "%s"' % type_input)
elif isinstance(type_input, int):
if type_input not in self._types:
raise GDALException('Invalid OGR Integer Type: %d' % type_input)
num = type_input
else:
raise TypeError('Invalid OGR input type given.')
# Setting the OGR geometry type number.
self.num = num
def __str__(self):
"Returns the value of the name property."
return self.name
def __eq__(self, other):
"""
Does an equivalence test on the OGR type with the given
other OGRGeomType, the short-hand string, or the integer.
"""
if isinstance(other, OGRGeomType):
return self.num == other.num
elif isinstance(other, six.string_types):
return self.name.lower() == other.lower()
elif isinstance(other, int):
return self.num == other
else:
return False
def __ne__(self, other):
return not (self == other)
@property
def name(self):
"Returns a short-hand string form of the OGR Geometry type."
return self._types[self.num]
@property
def django(self):
"Returns the Django GeometryField for this OGR Type."
s = self.name.replace('25D', '')
if s in ('LinearRing', 'None'):
return None
elif s == 'Unknown':
s = 'Geometry'
return s + 'Field'
| bsd-3-clause |
kapiljituri/ConnectX-Travis-CI | googletest/googletest/test/gtest_output_test.py | 363 | 12259 | #!/usr/bin/env python
#
# Copyright 2008, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Tests the text output of Google C++ Testing Framework.
SYNOPSIS
gtest_output_test.py --build_dir=BUILD/DIR --gengolden
# where BUILD/DIR contains the built gtest_output_test_ file.
gtest_output_test.py --gengolden
gtest_output_test.py
"""
__author__ = 'wan@google.com (Zhanyong Wan)'
import difflib
import os
import re
import sys
import gtest_test_utils
# The flag for generating the golden file
GENGOLDEN_FLAG = '--gengolden'
CATCH_EXCEPTIONS_ENV_VAR_NAME = 'GTEST_CATCH_EXCEPTIONS'
IS_WINDOWS = os.name == 'nt'
# TODO(vladl@google.com): remove the _lin suffix.
GOLDEN_NAME = 'gtest_output_test_golden_lin.txt'
PROGRAM_PATH = gtest_test_utils.GetTestExecutablePath('gtest_output_test_')
# At least one command we exercise must not have the
# 'internal_skip_environment_and_ad_hoc_tests' argument.
COMMAND_LIST_TESTS = ({}, [PROGRAM_PATH, '--gtest_list_tests'])
COMMAND_WITH_COLOR = ({}, [PROGRAM_PATH, '--gtest_color=yes'])
COMMAND_WITH_TIME = ({}, [PROGRAM_PATH,
'--gtest_print_time',
'internal_skip_environment_and_ad_hoc_tests',
'--gtest_filter=FatalFailureTest.*:LoggingTest.*'])
COMMAND_WITH_DISABLED = (
{}, [PROGRAM_PATH,
'--gtest_also_run_disabled_tests',
'internal_skip_environment_and_ad_hoc_tests',
'--gtest_filter=*DISABLED_*'])
COMMAND_WITH_SHARDING = (
{'GTEST_SHARD_INDEX': '1', 'GTEST_TOTAL_SHARDS': '2'},
[PROGRAM_PATH,
'internal_skip_environment_and_ad_hoc_tests',
'--gtest_filter=PassingTest.*'])
GOLDEN_PATH = os.path.join(gtest_test_utils.GetSourceDir(), GOLDEN_NAME)
def ToUnixLineEnding(s):
"""Changes all Windows/Mac line endings in s to UNIX line endings."""
return s.replace('\r\n', '\n').replace('\r', '\n')
def RemoveLocations(test_output):
"""Removes all file location info from a Google Test program's output.
Args:
test_output: the output of a Google Test program.
Returns:
output with all file location info (in the form of
'DIRECTORY/FILE_NAME:LINE_NUMBER: 'or
'DIRECTORY\\FILE_NAME(LINE_NUMBER): ') replaced by
'FILE_NAME:#: '.
"""
return re.sub(r'.*[/\\](.+)(\:\d+|\(\d+\))\: ', r'\1:#: ', test_output)
def RemoveStackTraceDetails(output):
"""Removes all stack traces from a Google Test program's output."""
# *? means "find the shortest string that matches".
return re.sub(r'Stack trace:(.|\n)*?\n\n',
'Stack trace: (omitted)\n\n', output)
def RemoveStackTraces(output):
"""Removes all traces of stack traces from a Google Test program's output."""
# *? means "find the shortest string that matches".
return re.sub(r'Stack trace:(.|\n)*?\n\n', '', output)
def RemoveTime(output):
"""Removes all time information from a Google Test program's output."""
return re.sub(r'\(\d+ ms', '(? ms', output)
def RemoveTypeInfoDetails(test_output):
"""Removes compiler-specific type info from Google Test program's output.
Args:
test_output: the output of a Google Test program.
Returns:
output with type information normalized to canonical form.
"""
# some compilers output the name of type 'unsigned int' as 'unsigned'
return re.sub(r'unsigned int', 'unsigned', test_output)
def NormalizeToCurrentPlatform(test_output):
"""Normalizes platform specific output details for easier comparison."""
if IS_WINDOWS:
# Removes the color information that is not present on Windows.
test_output = re.sub('\x1b\\[(0;3\d)?m', '', test_output)
# Changes failure message headers into the Windows format.
test_output = re.sub(r': Failure\n', r': error: ', test_output)
# Changes file(line_number) to file:line_number.
test_output = re.sub(r'((\w|\.)+)\((\d+)\):', r'\1:\3:', test_output)
return test_output
def RemoveTestCounts(output):
"""Removes test counts from a Google Test program's output."""
output = re.sub(r'\d+ tests?, listed below',
'? tests, listed below', output)
output = re.sub(r'\d+ FAILED TESTS',
'? FAILED TESTS', output)
output = re.sub(r'\d+ tests? from \d+ test cases?',
'? tests from ? test cases', output)
output = re.sub(r'\d+ tests? from ([a-zA-Z_])',
r'? tests from \1', output)
return re.sub(r'\d+ tests?\.', '? tests.', output)
def RemoveMatchingTests(test_output, pattern):
"""Removes output of specified tests from a Google Test program's output.
This function strips not only the beginning and the end of a test but also
all output in between.
Args:
test_output: A string containing the test output.
pattern: A regex string that matches names of test cases or
tests to remove.
Returns:
Contents of test_output with tests whose names match pattern removed.
"""
test_output = re.sub(
r'.*\[ RUN \] .*%s(.|\n)*?\[( FAILED | OK )\] .*%s.*\n' % (
pattern, pattern),
'',
test_output)
return re.sub(r'.*%s.*\n' % pattern, '', test_output)
def NormalizeOutput(output):
"""Normalizes output (the output of gtest_output_test_.exe)."""
output = ToUnixLineEnding(output)
output = RemoveLocations(output)
output = RemoveStackTraceDetails(output)
output = RemoveTime(output)
return output
def GetShellCommandOutput(env_cmd):
"""Runs a command in a sub-process, and returns its output in a string.
Args:
env_cmd: The shell command. A 2-tuple where element 0 is a dict of extra
environment variables to set, and element 1 is a string with
the command and any flags.
Returns:
A string with the command's combined standard and diagnostic output.
"""
# Spawns cmd in a sub-process, and gets its standard I/O file objects.
# Set and save the environment properly.
environ = os.environ.copy()
environ.update(env_cmd[0])
p = gtest_test_utils.Subprocess(env_cmd[1], env=environ)
return p.output
def GetCommandOutput(env_cmd):
"""Runs a command and returns its output with all file location
info stripped off.
Args:
env_cmd: The shell command. A 2-tuple where element 0 is a dict of extra
environment variables to set, and element 1 is a string with
the command and any flags.
"""
# Disables exception pop-ups on Windows.
environ, cmdline = env_cmd
environ = dict(environ) # Ensures we are modifying a copy.
environ[CATCH_EXCEPTIONS_ENV_VAR_NAME] = '1'
return NormalizeOutput(GetShellCommandOutput((environ, cmdline)))
def GetOutputOfAllCommands():
"""Returns concatenated output from several representative commands."""
return (GetCommandOutput(COMMAND_WITH_COLOR) +
GetCommandOutput(COMMAND_WITH_TIME) +
GetCommandOutput(COMMAND_WITH_DISABLED) +
GetCommandOutput(COMMAND_WITH_SHARDING))
test_list = GetShellCommandOutput(COMMAND_LIST_TESTS)
SUPPORTS_DEATH_TESTS = 'DeathTest' in test_list
SUPPORTS_TYPED_TESTS = 'TypedTest' in test_list
SUPPORTS_THREADS = 'ExpectFailureWithThreadsTest' in test_list
SUPPORTS_STACK_TRACES = False
CAN_GENERATE_GOLDEN_FILE = (SUPPORTS_DEATH_TESTS and
SUPPORTS_TYPED_TESTS and
SUPPORTS_THREADS and
not IS_WINDOWS)
class GTestOutputTest(gtest_test_utils.TestCase):
def RemoveUnsupportedTests(self, test_output):
if not SUPPORTS_DEATH_TESTS:
test_output = RemoveMatchingTests(test_output, 'DeathTest')
if not SUPPORTS_TYPED_TESTS:
test_output = RemoveMatchingTests(test_output, 'TypedTest')
test_output = RemoveMatchingTests(test_output, 'TypedDeathTest')
test_output = RemoveMatchingTests(test_output, 'TypeParamDeathTest')
if not SUPPORTS_THREADS:
test_output = RemoveMatchingTests(test_output,
'ExpectFailureWithThreadsTest')
test_output = RemoveMatchingTests(test_output,
'ScopedFakeTestPartResultReporterTest')
test_output = RemoveMatchingTests(test_output,
'WorksConcurrently')
if not SUPPORTS_STACK_TRACES:
test_output = RemoveStackTraces(test_output)
return test_output
def testOutput(self):
output = GetOutputOfAllCommands()
golden_file = open(GOLDEN_PATH, 'r')
# A mis-configured source control system can cause \r appear in EOL
# sequences when we read the golden file irrespective of an operating
# system used. Therefore, we need to strip those \r's from newlines
# unconditionally.
golden = ToUnixLineEnding(golden_file.read())
golden_file.close()
# We want the test to pass regardless of certain features being
# supported or not.
# We still have to remove type name specifics in all cases.
normalized_actual = RemoveTypeInfoDetails(output)
normalized_golden = RemoveTypeInfoDetails(golden)
if CAN_GENERATE_GOLDEN_FILE:
self.assertEqual(normalized_golden, normalized_actual,
'\n'.join(difflib.unified_diff(
normalized_golden.split('\n'),
normalized_actual.split('\n'),
'golden', 'actual')))
else:
normalized_actual = NormalizeToCurrentPlatform(
RemoveTestCounts(normalized_actual))
normalized_golden = NormalizeToCurrentPlatform(
RemoveTestCounts(self.RemoveUnsupportedTests(normalized_golden)))
# This code is very handy when debugging golden file differences:
if os.getenv('DEBUG_GTEST_OUTPUT_TEST'):
open(os.path.join(
gtest_test_utils.GetSourceDir(),
'_gtest_output_test_normalized_actual.txt'), 'wb').write(
normalized_actual)
open(os.path.join(
gtest_test_utils.GetSourceDir(),
'_gtest_output_test_normalized_golden.txt'), 'wb').write(
normalized_golden)
self.assertEqual(normalized_golden, normalized_actual)
if __name__ == '__main__':
if sys.argv[1:] == [GENGOLDEN_FLAG]:
if CAN_GENERATE_GOLDEN_FILE:
output = GetOutputOfAllCommands()
golden_file = open(GOLDEN_PATH, 'wb')
golden_file.write(output)
golden_file.close()
else:
message = (
"""Unable to write a golden file when compiled in an environment
that does not support all the required features (death tests, typed tests,
and multiple threads). Please generate the golden file using a binary built
with those features enabled.""")
sys.stderr.write(message)
sys.exit(1)
else:
gtest_test_utils.Main()
| unlicense |
eneldoserrata/marcos_openerp | addons/hr_timesheet_invoice/wizard/__init__.py | 433 | 1159 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import hr_timesheet_invoice_create
import hr_timesheet_analytic_profit
import hr_timesheet_final_invoice_create
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
RichardLitt/wyrd-django-dev | tests/regressiontests/null_queries/tests.py | 91 | 2939 | from __future__ import absolute_import
from django.test import TestCase
from django.core.exceptions import FieldError
from .models import Poll, Choice, OuterA, Inner, OuterB
class NullQueriesTests(TestCase):
def test_none_as_null(self):
"""
Regression test for the use of None as a query value.
None is interpreted as an SQL NULL, but only in __exact queries.
Set up some initial polls and choices
"""
p1 = Poll(question='Why?')
p1.save()
c1 = Choice(poll=p1, choice='Because.')
c1.save()
c2 = Choice(poll=p1, choice='Why Not?')
c2.save()
# Exact query with value None returns nothing ("is NULL" in sql,
# but every 'id' field has a value).
self.assertQuerysetEqual(Choice.objects.filter(choice__exact=None), [])
# Excluding the previous result returns everything.
self.assertQuerysetEqual(
Choice.objects.exclude(choice=None).order_by('id'),
[
'<Choice: Choice: Because. in poll Q: Why? >',
'<Choice: Choice: Why Not? in poll Q: Why? >'
]
)
# Valid query, but fails because foo isn't a keyword
self.assertRaises(FieldError, Choice.objects.filter, foo__exact=None)
# Can't use None on anything other than __exact
self.assertRaises(ValueError, Choice.objects.filter, id__gt=None)
# Can't use None on anything other than __exact
self.assertRaises(ValueError, Choice.objects.filter, foo__gt=None)
# Related managers use __exact=None implicitly if the object hasn't been saved.
p2 = Poll(question="How?")
self.assertEqual(repr(p2.choice_set.all()), '[]')
def test_reverse_relations(self):
"""
Querying across reverse relations and then another relation should
insert outer joins correctly so as not to exclude results.
"""
obj = OuterA.objects.create()
self.assertQuerysetEqual(
OuterA.objects.filter(inner__second=None),
['<OuterA: OuterA object>']
)
self.assertQuerysetEqual(
OuterA.objects.filter(inner__second__data=None),
['<OuterA: OuterA object>']
)
inner_obj = Inner.objects.create(first=obj)
self.assertQuerysetEqual(
Inner.objects.filter(first__inner__second=None),
['<Inner: Inner object>']
)
# Ticket #13815: check if <reverse>_isnull=False does not produce
# faulty empty lists
objB = OuterB.objects.create(data="reverse")
self.assertQuerysetEqual(
OuterB.objects.filter(inner__isnull=False),
[]
)
Inner.objects.create(first=obj)
self.assertQuerysetEqual(
OuterB.objects.exclude(inner__isnull=False),
['<OuterB: OuterB object>']
)
| bsd-3-clause |
sunlightlabs/regulations-scraper | one_offs/lightsquared/download_files.py | 1 | 1209 | from optparse import OptionParser
arg_parser = OptionParser()
def run(options, args):
import json, os
from regscrape_lib.transfer import bulk_download
if len(args) > 1:
metadata_path = args[0]
out_path = args[1]
else:
print "Specify files"
sys.exit(0)
input = json.load(open(metadata_path, 'r'))
download_path = os.path.join(os.path.dirname(metadata_path), 'downloads')
def download_generator():
for record in input:
for document in record['documents']:
num = document['url'].split('=').pop() + '.pdf'
yield (document['url'], os.path.join(download_path, num), document)
def status_func(status, url, filename, record):
if status[0]:
record['filename'] = 'downloads/' + filename.split('downloads/').pop()
else:
record['filename'] = False
record['download_error'] = status[1]
bulk_download(download_generator(), status_func, retries=2, verbose=True)
date_handler = lambda obj: obj.isoformat() if hasattr(obj, 'isoformat') else None
open(out_path, 'w').write(json.dumps(input, default=date_handler, indent=4)) | bsd-3-clause |
dkarakats/edx-platform | common/djangoapps/course_modes/migrations/0004_auto__add_field_coursemode_expiration_date.py | 114 | 1674 | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'CourseMode.expiration_date'
db.add_column('course_modes_coursemode', 'expiration_date',
self.gf('django.db.models.fields.DateField')(default=None, null=True),
keep_default=False)
def backwards(self, orm):
# Deleting field 'CourseMode.expiration_date'
db.delete_column('course_modes_coursemode', 'expiration_date')
models = {
'course_modes.coursemode': {
'Meta': {'unique_together': "(('course_id', 'mode_slug', 'currency'),)", 'object_name': 'CourseMode'},
'course_id': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}),
'currency': ('django.db.models.fields.CharField', [], {'default': "'usd'", 'max_length': '8'}),
'expiration_date': ('django.db.models.fields.DateField', [], {'default': 'None', 'null': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'min_price': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'mode_display_name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'mode_slug': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'suggested_prices': ('django.db.models.fields.CommaSeparatedIntegerField', [], {'default': "''", 'max_length': '255', 'blank': 'True'})
}
}
complete_apps = ['course_modes']
| agpl-3.0 |
jolyonb/edx-platform | common/djangoapps/third_party_auth/migrations/0023_auto_20190418_2033.py | 1 | 1545 | # -*- coding: utf-8 -*-
# Generated by Django 1.11.20 on 2019-04-18 20:33
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('organizations', '0006_auto_20171207_0259'),
('third_party_auth', '0022_auto_20181012_0307'),
]
operations = [
migrations.AddField(
model_name='ltiproviderconfig',
name='organization',
field=models.OneToOneField(blank=True, help_text="optional. If this provider is an Organization, this attribute can be used reference users in that Organization", null=True, on_delete=django.db.models.deletion.CASCADE, to='organizations.Organization'),
),
migrations.AddField(
model_name='oauth2providerconfig',
name='organization',
field=models.OneToOneField(blank=True, help_text="optional. If this provider is an Organization, this attribute can be used reference users in that Organization", null=True, on_delete=django.db.models.deletion.CASCADE, to='organizations.Organization'),
),
migrations.AddField(
model_name='samlproviderconfig',
name='organization',
field=models.OneToOneField(blank=True, help_text="optional. If this provider is an Organization, this attribute can be used reference users in that Organization", null=True, on_delete=django.db.models.deletion.CASCADE, to='organizations.Organization'),
),
]
| agpl-3.0 |
leapcode/bitmask_client | src/leap/bitmask/config/tests/test_providerconfig.py | 5 | 9022 | # -*- coding: utf-8 -*-
# test_providerconfig.py
# Copyright (C) 2013 LEAP
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
Tests for providerconfig
"""
try:
import unittest2 as unittest
except ImportError:
import unittest
import os
import json
import copy
from leap.bitmask.config.providerconfig import ProviderConfig, MissingCACert
from leap.bitmask.services import get_supported
from leap.common.testing.basetest import BaseLeapTest
from mock import Mock
sample_config = {
"api_uri": "https://api.test.bitmask.net:4430",
"api_version": "1",
"ca_cert_fingerprint":
"SHA256: 0f17c033115f6b76ff67871872303ff65034efe7dd1b910062ca323eb4da5c7e",
"ca_cert_uri": "https://test.bitmask.net/ca.crt",
"default_language": "en",
"description": {
"en": "Test description for provider",
"es": "Descripcion de prueba para el proveedor"
},
"domain": "test.bitmask.net",
"enrollment_policy": "open",
"languages": [
"en",
"es"
],
"name": {
"en": "Bitmask testing environment",
"es": "Entorno de pruebas de Bitmask"
},
"service": {
"allow_anonymous": True,
"allow_free": True,
"allow_limited_bandwidth": True,
"allow_paid": False,
"allow_registration": True,
"allow_unlimited_bandwidth": False,
"bandwidth_limit": 400000,
"default_service_level": 1,
"levels": [
{
"bandwidth": "limited",
"id": 1,
"name": "anonymous"
},
{
"bandwidth": "limited",
"id": 2,
"name": "free",
"storage": 50
}
]
},
"services": [
"openvpn"
]
}
class ProviderConfigTest(BaseLeapTest):
"""Tests for ProviderConfig"""
def setUp(self):
self._provider_config = ProviderConfig()
json_string = json.dumps(sample_config)
self._provider_config.load(data=json_string)
# At certain points we are going to be replacing these method
# to avoid creating a file.
# We need to save the old implementation and restore it in
# tearDown so we are sure everything is as expected for each
# test. If we do it inside each specific test, a failure in
# the test will leave the implementation with the mock.
self._old_ospath_exists = os.path.exists
def tearDown(self):
os.path.exists = self._old_ospath_exists
def test_configs_ok(self):
"""
Test if the configs loads ok
"""
# TODO: this test should go to the BaseConfig tests
pc = self._provider_config
self.assertEqual(pc.get_api_uri(), sample_config['api_uri'])
self.assertEqual(pc.get_api_version(), sample_config['api_version'])
self.assertEqual(pc.get_ca_cert_fingerprint(),
sample_config['ca_cert_fingerprint'])
self.assertEqual(pc.get_ca_cert_uri(), sample_config['ca_cert_uri'])
self.assertEqual(pc.get_default_language(),
sample_config['default_language'])
self.assertEqual(pc.get_domain(), sample_config['domain'])
self.assertEqual(pc.get_enrollment_policy(),
sample_config['enrollment_policy'])
self.assertEqual(pc.get_languages(), sample_config['languages'])
def test_localizations(self):
pc = self._provider_config
self.assertEqual(pc.get_description(lang='en'),
sample_config['description']['en'])
self.assertEqual(pc.get_description(lang='es'),
sample_config['description']['es'])
self.assertEqual(pc.get_name(lang='en'), sample_config['name']['en'])
self.assertEqual(pc.get_name(lang='es'), sample_config['name']['es'])
def _localize(self, lang):
"""
Helper to change default language of the provider config.
"""
pc = self._provider_config
config = copy.deepcopy(sample_config)
config['default_language'] = lang
json_string = json.dumps(config)
pc.load(data=json_string)
return config
def test_default_localization1(self):
pc = self._provider_config
config = self._localize(sample_config['languages'][0])
default_language = config['default_language']
default_description = config['description'][default_language]
default_name = config['name'][default_language]
self.assertEqual(pc.get_description(lang='xx'), default_description)
self.assertEqual(pc.get_description(), default_description)
self.assertEqual(pc.get_name(lang='xx'), default_name)
self.assertEqual(pc.get_name(), default_name)
def test_default_localization2(self):
pc = self._provider_config
config = self._localize(sample_config['languages'][1])
default_language = config['default_language']
default_description = config['description'][default_language]
default_name = config['name'][default_language]
self.assertEqual(pc.get_description(lang='xx'), default_description)
self.assertEqual(pc.get_description(), default_description)
self.assertEqual(pc.get_name(lang='xx'), default_name)
self.assertEqual(pc.get_name(), default_name)
def test_get_ca_cert_path_as_expected(self):
pc = self._provider_config
provider_domain = sample_config['domain']
expected_path = os.path.join('leap', 'providers',
provider_domain, 'keys', 'ca',
'cacert.pem')
# mock 'os.path.exists' so we don't get an error for unexisting file
os.path.exists = Mock(return_value=True)
cert_path = pc.get_ca_cert_path()
self.assertTrue(cert_path.endswith(expected_path))
def test_get_ca_cert_path_about_to_download(self):
pc = self._provider_config
provider_domain = sample_config['domain']
expected_path = os.path.join('leap', 'providers',
provider_domain, 'keys', 'ca',
'cacert.pem')
cert_path = pc.get_ca_cert_path(about_to_download=True)
self.assertTrue(cert_path.endswith(expected_path))
def test_get_ca_cert_path_fails(self):
pc = self._provider_config
# mock 'get_domain' so we don't need to load a config
provider_domain = 'test.provider.com'
pc.get_domain = Mock(return_value=provider_domain)
with self.assertRaises(MissingCACert):
pc.get_ca_cert_path()
def test_provides_eip(self):
pc = self._provider_config
config = copy.deepcopy(sample_config)
# It provides
config['services'] = ['openvpn', 'test_service']
json_string = json.dumps(config)
pc.load(data=json_string)
self.assertTrue(pc.provides_eip())
# It does not provides
config['services'] = ['test_service', 'other_service']
json_string = json.dumps(config)
pc.load(data=json_string)
self.assertFalse(pc.provides_eip())
def test_provides_mx(self):
pc = self._provider_config
config = copy.deepcopy(sample_config)
# It provides
config['services'] = ['mx', 'other_service']
json_string = json.dumps(config)
pc.load(data=json_string)
self.assertTrue(pc.provides_mx())
# It does not provides
config['services'] = ['test_service', 'other_service']
json_string = json.dumps(config)
pc.load(data=json_string)
self.assertFalse(pc.provides_mx())
def test_supports_unknown_service(self):
pc = self._provider_config
config = copy.deepcopy(sample_config)
config['services'] = ['unknown']
json_string = json.dumps(config)
pc.load(data=json_string)
self.assertFalse('unknown' in get_supported(pc.get_services()))
def test_provides_unknown_service(self):
pc = self._provider_config
config = copy.deepcopy(sample_config)
config['services'] = ['unknown']
json_string = json.dumps(config)
pc.load(data=json_string)
self.assertTrue('unknown' in pc.get_services())
if __name__ == "__main__":
unittest.main()
| gpl-3.0 |
jantman/RPyMostat | docs/discovery_register.py | 1 | 2550 | #!/usr/bin/env python
""" Example of announcing a service (in this case, a fake HTTP server) """
import socket
import time
from six.moves import input
from zeroconf import ServiceInfo, Zeroconf, InterfaceChoice, DNSOutgoing, _FLAGS_QR_RESPONSE, _FLAGS_AA, _TYPE_PTR, _TYPE_SRV, _CLASS_IN, _TYPE_TXT, _TYPE_A, DNSAddress, DNSText, DNSService, DNSPointer, _REGISTER_TIME, current_time_millis
import logging
FORMAT = "[%(levelname)s %(filename)s:%(lineno)s - %(name)s%(funcName)20s() ] %(message)s"
logging.basicConfig(level=logging.DEBUG, format=FORMAT)
logger = logging.getLogger()
def register_service(zc, info, ttl=60, send_num=3):
"""
like zeroconf.Zeroconf.register_service() but
just broadcasts send_num packets and then returns
"""
logger.info("Registering service: {s}".format(s=info))
now = current_time_millis()
next_time = now
i = 0
while i < 3:
if now < next_time:
sleep_time = next_time - now
logger.debug("sleeping {s}".format(s=sleep_time))
zc.wait(sleep_time)
now = current_time_millis()
continue
out = DNSOutgoing(_FLAGS_QR_RESPONSE | _FLAGS_AA)
out.add_answer_at_time(DNSPointer(info.type, _TYPE_PTR,
_CLASS_IN, ttl, info.name), 0)
out.add_answer_at_time(DNSService(info.name, _TYPE_SRV,
_CLASS_IN, ttl, info.priority, info.weight, info.port,
info.server), 0)
out.add_answer_at_time(DNSText(info.name, _TYPE_TXT, _CLASS_IN,
ttl, info.text), 0)
if info.address:
out.add_answer_at_time(DNSAddress(info.server, _TYPE_A,
_CLASS_IN, ttl, info.address), 0)
zc.send(out)
i += 1
next_time += _REGISTER_TIME
logger.debug("done registering service")
desc = {'path': '/~paulsm/'}
info = ServiceInfo("_http._tcp.local.",
"Paul's Test Web Site._http._tcp.local.",
socket.inet_aton("10.0.1.2"), 80, 0, 0,
desc, "ash-2.local.")
zeroconf = Zeroconf(interfaces=InterfaceChoice.All)
logger.info("Registration of a service...")
try:
wait_seconds = 60
while True:
register_service(zeroconf, info)
zeroconf.wait(wait_seconds * 1000)
finally:
logger.info("Unregistering...")
logger.error("Unregister not implemented")
zeroconf.close()
logger.info("Done.")
| agpl-3.0 |
janusnic/wagtail | wagtail/wagtailadmin/tests/tests.py | 6 | 5408 | from django.test import TestCase, override_settings
from django.core.urlresolvers import reverse
from django.core import mail
from wagtail.tests.utils import WagtailTestUtils
from wagtail.wagtailcore.models import Page
from wagtail.wagtailadmin.utils import send_mail
class TestHome(TestCase, WagtailTestUtils):
def setUp(self):
# Login
self.login()
def test_simple(self):
response = self.client.get(reverse('wagtailadmin_home'))
self.assertEqual(response.status_code, 200)
def test_admin_menu(self):
response = self.client.get(reverse('wagtailadmin_home'))
self.assertEqual(response.status_code, 200)
# check that media attached to menu items is correctly pulled in
self.assertContains(response, '<script type="text/javascript" src="/static/wagtailadmin/js/explorer-menu.js"></script>')
# check that custom menu items (including classname / attrs parameters) are pulled in
self.assertContains(response, '<a href="http://www.tomroyal.com/teaandkittens/" class="icon icon-kitten" data-fluffy="yes">Kittens!</a>')
# check that is_shown is respected on menu items
response = self.client.get(reverse('wagtailadmin_home') + '?hide-kittens=true')
self.assertNotContains(response, '<a href="http://www.tomroyal.com/teaandkittens/" class="icon icon-kitten" data-fluffy="yes">Kittens!</a>')
def test_never_cache_header(self):
# This tests that wagtailadmins global cache settings have been applied correctly
response = self.client.get(reverse('wagtailadmin_home'))
self.assertIn('private', response['Cache-Control'])
self.assertIn('no-cache', response['Cache-Control'])
self.assertIn('no-store', response['Cache-Control'])
self.assertIn('max-age=0', response['Cache-Control'])
class TestEditorHooks(TestCase, WagtailTestUtils):
def setUp(self):
self.homepage = Page.objects.get(id=2)
self.login()
def test_editor_css_and_js_hooks_on_add(self):
response = self.client.get(reverse('wagtailadmin_pages_create', args=('tests', 'simplepage', self.homepage.id)))
self.assertEqual(response.status_code, 200)
self.assertContains(response, '<link rel="stylesheet" href="/path/to/my/custom.css">')
self.assertContains(response, '<script src="/path/to/my/custom.js"></script>')
def test_editor_css_and_js_hooks_on_edit(self):
response = self.client.get(reverse('wagtailadmin_pages_edit', args=(self.homepage.id, )))
self.assertEqual(response.status_code, 200)
self.assertContains(response, '<link rel="stylesheet" href="/path/to/my/custom.css">')
self.assertContains(response, '<script src="/path/to/my/custom.js"></script>')
class TestSendMail(TestCase):
def test_send_email(self):
send_mail("Test subject", "Test content", ["nobody@email.com"], "test@email.com")
# Check that the email was sent
self.assertEqual(len(mail.outbox), 1)
self.assertEqual(mail.outbox[0].subject, "Test subject")
self.assertEqual(mail.outbox[0].body, "Test content")
self.assertEqual(mail.outbox[0].to, ["nobody@email.com"])
self.assertEqual(mail.outbox[0].from_email, "test@email.com")
@override_settings(WAGTAILADMIN_NOTIFICATION_FROM_EMAIL='anothertest@email.com')
def test_send_fallback_to_wagtailadmin_notification_from_email_setting(self):
send_mail("Test subject", "Test content", ["nobody@email.com"])
# Check that the email was sent
self.assertEqual(len(mail.outbox), 1)
self.assertEqual(mail.outbox[0].subject, "Test subject")
self.assertEqual(mail.outbox[0].body, "Test content")
self.assertEqual(mail.outbox[0].to, ["nobody@email.com"])
self.assertEqual(mail.outbox[0].from_email, "anothertest@email.com")
@override_settings(DEFAULT_FROM_EMAIL='yetanothertest@email.com')
def test_send_fallback_to_default_from_email_setting(self):
send_mail("Test subject", "Test content", ["nobody@email.com"])
# Check that the email was sent
self.assertEqual(len(mail.outbox), 1)
self.assertEqual(mail.outbox[0].subject, "Test subject")
self.assertEqual(mail.outbox[0].body, "Test content")
self.assertEqual(mail.outbox[0].to, ["nobody@email.com"])
self.assertEqual(mail.outbox[0].from_email, "yetanothertest@email.com")
def test_send_default_from_email(self):
send_mail("Test subject", "Test content", ["nobody@email.com"])
# Check that the email was sent
self.assertEqual(len(mail.outbox), 1)
self.assertEqual(mail.outbox[0].subject, "Test subject")
self.assertEqual(mail.outbox[0].body, "Test content")
self.assertEqual(mail.outbox[0].to, ["nobody@email.com"])
self.assertEqual(mail.outbox[0].from_email, "webmaster@localhost")
class TestExplorerNavView(TestCase, WagtailTestUtils):
def setUp(self):
self.homepage = Page.objects.get(id=2).specific
self.login()
def test_explorer_nav_view(self):
response = self.client.get(reverse('wagtailadmin_explorer_nav'))
# Check response
self.assertEqual(response.status_code, 200)
self.assertTemplateUsed('wagtailadmin/shared/explorer_nav.html')
self.assertEqual(response.context['nodes'][0][0], self.homepage)
| bsd-3-clause |
kustodian/ansible | lib/ansible/modules/network/netvisor/pn_dhcp_filter.py | 35 | 4817 | #!/usr/bin/python
# Copyright: (c) 2018, Pluribus Networks
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = """
---
module: pn_dhcp_filter
author: "Pluribus Networks (@rajaspachipulusu17)"
version_added: "2.8"
short_description: CLI command to create/modify/delete dhcp-filter
description:
- This module can be used to create, delete and modify a DHCP filter config.
options:
pn_cliswitch:
description:
- Target switch to run the CLI on.
required: False
type: str
state:
description:
- State the action to perform. Use C(present) to create dhcp-filter and
C(absent) to delete dhcp-filter C(update) to modify the dhcp-filter.
required: True
type: str
choices: ['present', 'absent', 'update']
pn_trusted_ports:
description:
- trusted ports of dhcp config.
required: False
type: str
pn_name:
description:
- name of the DHCP filter.
required: false
type: str
"""
EXAMPLES = """
- name: dhcp filter create
pn_dhcp_filter:
pn_cliswitch: "sw01"
pn_name: "foo"
state: "present"
pn_trusted_ports: "1"
- name: dhcp filter delete
pn_dhcp_filter:
pn_cliswitch: "sw01"
pn_name: "foo"
state: "absent"
pn_trusted_ports: "1"
- name: dhcp filter modify
pn_dhcp_filter:
pn_cliswitch: "sw01"
pn_name: "foo"
state: "update"
pn_trusted_ports: "1,2"
"""
RETURN = """
command:
description: the CLI command run on the target node.
returned: always
type: str
stdout:
description: set of responses from the dhcp-filter command.
returned: always
type: list
stderr:
description: set of error responses from the dhcp-filter command.
returned: on error
type: list
changed:
description: indicates whether the CLI caused changes on the target.
returned: always
type: bool
"""
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.network.netvisor.pn_nvos import pn_cli, run_cli
from ansible.module_utils.network.netvisor.netvisor import run_commands
def check_cli(module, cli):
"""
This method checks for idempotency using the dhcp-filter-show command.
If a user with given name exists, return True else False.
:param module: The Ansible module to fetch input parameters
:param cli: The CLI string
"""
user_name = module.params['pn_name']
cli += ' dhcp-filter-show format name no-show-headers'
out = run_commands(module, cli)[1]
if out:
out = out.split()
return True if user_name in out else False
def main():
""" This section is for arguments parsing """
state_map = dict(
present='dhcp-filter-create',
absent='dhcp-filter-delete',
update='dhcp-filter-modify'
)
module = AnsibleModule(
argument_spec=dict(
pn_cliswitch=dict(required=False, type='str'),
state=dict(required=True, type='str',
choices=state_map.keys()),
pn_trusted_ports=dict(required=False, type='str'),
pn_name=dict(required=False, type='str'),
),
required_if=[
["state", "present", ["pn_name", "pn_trusted_ports"]],
["state", "absent", ["pn_name"]],
["state", "update", ["pn_name", "pn_trusted_ports"]]
]
)
# Accessing the arguments
cliswitch = module.params['pn_cliswitch']
state = module.params['state']
trusted_ports = module.params['pn_trusted_ports']
name = module.params['pn_name']
command = state_map[state]
# Building the CLI command string
cli = pn_cli(module, cliswitch)
USER_EXISTS = check_cli(module, cli)
cli += ' %s name %s ' % (command, name)
if command == 'dhcp-filter-modify':
if USER_EXISTS is False:
module.fail_json(
failed=True,
msg='dhcp-filter with name %s does not exist' % name
)
if command == 'dhcp-filter-delete':
if USER_EXISTS is False:
module.exit_json(
skipped=True,
msg='dhcp-filter with name %s does not exist' % name
)
if command == 'dhcp-filter-create':
if USER_EXISTS is True:
module.exit_json(
skipped=True,
msg='dhcp-filter with name %s already exists' % name
)
if command != 'dhcp-filter-delete':
if trusted_ports:
cli += ' trusted-ports ' + trusted_ports
run_cli(module, cli, state_map)
if __name__ == '__main__':
main()
| gpl-3.0 |
drammock/mne-python | mne/conftest.py | 1 | 23095 | # -*- coding: utf-8 -*-
# Author: Eric Larson <larson.eric.d@gmail.com>
#
# License: BSD (3-clause)
from contextlib import contextmanager
from distutils.version import LooseVersion
import gc
import os
import os.path as op
from pathlib import Path
import shutil
import sys
import warnings
import pytest
import numpy as np
import mne
from mne.datasets import testing
from mne.fixes import has_numba
from mne.stats import cluster_level
from mne.utils import _pl, _assert_no_instances, numerics
test_path = testing.data_path(download=False)
s_path = op.join(test_path, 'MEG', 'sample')
fname_evoked = op.join(s_path, 'sample_audvis_trunc-ave.fif')
fname_cov = op.join(s_path, 'sample_audvis_trunc-cov.fif')
fname_fwd = op.join(s_path, 'sample_audvis_trunc-meg-eeg-oct-4-fwd.fif')
fname_fwd_full = op.join(s_path, 'sample_audvis_trunc-meg-eeg-oct-6-fwd.fif')
bem_path = op.join(test_path, 'subjects', 'sample', 'bem')
fname_bem = op.join(bem_path, 'sample-1280-bem.fif')
fname_aseg = op.join(test_path, 'subjects', 'sample', 'mri', 'aseg.mgz')
subjects_dir = op.join(test_path, 'subjects')
fname_src = op.join(bem_path, 'sample-oct-4-src.fif')
subjects_dir = op.join(test_path, 'subjects')
fname_cov = op.join(s_path, 'sample_audvis_trunc-cov.fif')
fname_trans = op.join(s_path, 'sample_audvis_trunc-trans.fif')
collect_ignore = ['export/_eeglab.py']
def pytest_configure(config):
"""Configure pytest options."""
# Markers
for marker in ('slowtest', 'ultraslowtest'):
config.addinivalue_line('markers', marker)
# Fixtures
for fixture in ('matplotlib_config',):
config.addinivalue_line('usefixtures', fixture)
# Warnings
# - Once SciPy updates not to have non-integer and non-tuple errors (1.2.0)
# we should remove them from here.
# - This list should also be considered alongside reset_warnings in
# doc/conf.py.
warning_lines = r"""
error::
ignore:.*deprecated and ignored since IPython.*:DeprecationWarning
ignore::ImportWarning
ignore:the matrix subclass:PendingDeprecationWarning
ignore:numpy.dtype size changed:RuntimeWarning
ignore:.*HasTraits.trait_.*:DeprecationWarning
ignore:.*takes no parameters:DeprecationWarning
ignore:joblib not installed:RuntimeWarning
ignore:Using a non-tuple sequence for multidimensional indexing:FutureWarning
ignore:using a non-integer number instead of an integer will result in an error:DeprecationWarning
ignore:Importing from numpy.testing.decorators is deprecated:DeprecationWarning
ignore:np.loads is deprecated, use pickle.loads instead:DeprecationWarning
ignore:The oldnumeric module will be dropped:DeprecationWarning
ignore:Collection picker None could not be converted to float:UserWarning
ignore:covariance is not positive-semidefinite:RuntimeWarning
ignore:Can only plot ICA components:RuntimeWarning
ignore:Matplotlib is building the font cache using fc-list:UserWarning
ignore:Using or importing the ABCs from 'collections':DeprecationWarning
ignore:`formatargspec` is deprecated:DeprecationWarning
# This is only necessary until sklearn updates their wheels for NumPy 1.16
ignore:numpy.ufunc size changed:RuntimeWarning
ignore:.*mne-realtime.*:DeprecationWarning
ignore:.*imp.*:DeprecationWarning
ignore:Exception creating Regex for oneOf.*:SyntaxWarning
ignore:scipy\.gradient is deprecated.*:DeprecationWarning
ignore:sklearn\.externals\.joblib is deprecated.*:FutureWarning
ignore:The sklearn.*module.*deprecated.*:FutureWarning
ignore:.*trait.*handler.*deprecated.*:DeprecationWarning
ignore:.*rich_compare.*metadata.*deprecated.*:DeprecationWarning
ignore:.*In future, it will be an error for 'np.bool_'.*:DeprecationWarning
ignore:.*`np.bool` is a deprecated alias.*:DeprecationWarning
ignore:.*`np.int` is a deprecated alias.*:DeprecationWarning
ignore:.*`np.float` is a deprecated alias.*:DeprecationWarning
ignore:.*`np.object` is a deprecated alias.*:DeprecationWarning
ignore:.*`np.long` is a deprecated alias:DeprecationWarning
ignore:.*Converting `np\.character` to a dtype is deprecated.*:DeprecationWarning
ignore:.*sphinx\.util\.smartypants is deprecated.*:
ignore:.*pandas\.util\.testing is deprecated.*:
ignore:.*tostring.*is deprecated.*:DeprecationWarning
ignore:.*QDesktopWidget\.availableGeometry.*:DeprecationWarning
ignore:Unable to enable faulthandler.*:UserWarning
ignore:Fetchers from the nilearn.*:FutureWarning
ignore:SelectableGroups dict interface is deprecated\. Use select\.:DeprecationWarning
ignore:Call to deprecated class vtk.*:DeprecationWarning
ignore:Call to deprecated method.*Deprecated since.*:DeprecationWarning
always:.*get_data.* is deprecated in favor of.*:DeprecationWarning
always::ResourceWarning
""" # noqa: E501
for warning_line in warning_lines.split('\n'):
warning_line = warning_line.strip()
if warning_line and not warning_line.startswith('#'):
config.addinivalue_line('filterwarnings', warning_line)
# Have to be careful with autouse=True, but this is just an int comparison
# so it shouldn't really add appreciable overhead
@pytest.fixture(autouse=True)
def check_verbose(request):
"""Set to the default logging level to ensure it's tested properly."""
starting_level = mne.utils.logger.level
yield
# ensures that no tests break the global state
try:
assert mne.utils.logger.level == starting_level
except AssertionError:
pytest.fail('.'.join([request.module.__name__,
request.function.__name__]) +
' modifies logger.level')
@pytest.fixture(autouse=True)
def close_all():
"""Close all matplotlib plots, regardless of test status."""
# This adds < 1 µS in local testing, and we have ~2500 tests, so ~2 ms max
import matplotlib.pyplot as plt
yield
plt.close('all')
@pytest.fixture(autouse=True)
def add_mne(doctest_namespace):
"""Add mne to the namespace."""
doctest_namespace["mne"] = mne
@pytest.fixture(scope='function')
def verbose_debug():
"""Run a test with debug verbosity."""
with mne.utils.use_log_level('debug'):
yield
@pytest.fixture(scope='session')
def matplotlib_config():
"""Configure matplotlib for viz tests."""
import matplotlib
from matplotlib import cbook
# Allow for easy interactive debugging with a call like:
#
# $ MNE_MPL_TESTING_BACKEND=Qt5Agg pytest mne/viz/tests/test_raw.py -k annotation -x --pdb # noqa: E501
#
try:
want = os.environ['MNE_MPL_TESTING_BACKEND']
except KeyError:
want = 'agg' # don't pop up windows
with warnings.catch_warnings(record=True): # ignore warning
warnings.filterwarnings('ignore')
matplotlib.use(want, force=True)
import matplotlib.pyplot as plt
assert plt.get_backend() == want
# overwrite some params that can horribly slow down tests that
# users might have changed locally (but should not otherwise affect
# functionality)
plt.ioff()
plt.rcParams['figure.dpi'] = 100
try:
from traits.etsconfig.api import ETSConfig
except Exception:
pass
else:
ETSConfig.toolkit = 'qt4'
# Make sure that we always reraise exceptions in handlers
orig = cbook.CallbackRegistry
class CallbackRegistryReraise(orig):
def __init__(self, exception_handler=None):
args = ()
if LooseVersion(matplotlib.__version__) >= LooseVersion('2.1'):
args += (exception_handler,)
super(CallbackRegistryReraise, self).__init__(*args)
cbook.CallbackRegistry = CallbackRegistryReraise
@pytest.fixture(scope='session')
def ci_macos():
"""Determine if running on MacOS CI."""
return (os.getenv('CI', 'false').lower() == 'true' and
sys.platform == 'darwin')
@pytest.fixture(scope='session')
def azure_windows():
"""Determine if running on Azure Windows."""
return (os.getenv('AZURE_CI_WINDOWS', 'false').lower() == 'true' and
sys.platform.startswith('win'))
@pytest.fixture()
def check_gui_ci(ci_macos, azure_windows):
"""Skip tests that are not reliable on CIs."""
if azure_windows or ci_macos:
pytest.skip('Skipping GUI tests on MacOS CIs and Azure Windows')
@pytest.fixture(scope='session', params=[testing._pytest_param()])
def _evoked():
# This one is session scoped, so be sure not to modify it (use evoked
# instead)
evoked = mne.read_evokeds(fname_evoked, condition='Left Auditory',
baseline=(None, 0))
evoked.crop(0, 0.2)
return evoked
@pytest.fixture()
def evoked(_evoked):
"""Get evoked data."""
return _evoked.copy()
@pytest.fixture(scope='function', params=[testing._pytest_param()])
def noise_cov():
"""Get a noise cov from the testing dataset."""
return mne.read_cov(fname_cov)
@pytest.fixture(scope='function')
def bias_params_free(evoked, noise_cov):
"""Provide inputs for free bias functions."""
fwd = mne.read_forward_solution(fname_fwd)
return _bias_params(evoked, noise_cov, fwd)
@pytest.fixture(scope='function')
def bias_params_fixed(evoked, noise_cov):
"""Provide inputs for fixed bias functions."""
fwd = mne.read_forward_solution(fname_fwd)
mne.convert_forward_solution(
fwd, force_fixed=True, surf_ori=True, copy=False)
return _bias_params(evoked, noise_cov, fwd)
def _bias_params(evoked, noise_cov, fwd):
evoked.pick_types(meg=True, eeg=True, exclude=())
# restrict to limited set of verts (small src here) and one hemi for speed
vertices = [fwd['src'][0]['vertno'].copy(), []]
stc = mne.SourceEstimate(
np.zeros((sum(len(v) for v in vertices), 1)), vertices, 0, 1)
fwd = mne.forward.restrict_forward_to_stc(fwd, stc)
assert fwd['sol']['row_names'] == noise_cov['names']
assert noise_cov['names'] == evoked.ch_names
evoked = mne.EvokedArray(fwd['sol']['data'].copy(), evoked.info)
data_cov = noise_cov.copy()
data = fwd['sol']['data'] @ fwd['sol']['data'].T
data *= 1e-14 # 100 nAm at each source, effectively (1e-18 would be 1 nAm)
# This is rank-deficient, so let's make it actually positive semidefinite
# by regularizing a tiny bit
data.flat[::data.shape[0] + 1] += mne.make_ad_hoc_cov(evoked.info)['data']
# Do our projection
proj, _, _ = mne.io.proj.make_projector(
data_cov['projs'], data_cov['names'])
data = proj @ data @ proj.T
data_cov['data'][:] = data
assert data_cov['data'].shape[0] == len(noise_cov['names'])
want = np.arange(fwd['sol']['data'].shape[1])
if not mne.forward.is_fixed_orient(fwd):
want //= 3
return evoked, fwd, noise_cov, data_cov, want
@pytest.fixture
def garbage_collect():
"""Garbage collect on exit."""
yield
gc.collect()
@pytest.fixture(params=["mayavi", "pyvista"])
def renderer(request, garbage_collect):
"""Yield the 3D backends."""
with _use_backend(request.param, interactive=False) as renderer:
yield renderer
@pytest.fixture(params=["pyvista"])
def renderer_pyvista(request, garbage_collect):
"""Yield the PyVista backend."""
with _use_backend(request.param, interactive=False) as renderer:
yield renderer
@pytest.fixture(params=["notebook"])
def renderer_notebook(request):
"""Yield the 3D notebook renderer."""
with _use_backend(request.param, interactive=False) as renderer:
yield renderer
@pytest.fixture(scope="module", params=["pyvista"])
def renderer_interactive_pyvista(request):
"""Yield the interactive PyVista backend."""
with _use_backend(request.param, interactive=True) as renderer:
yield renderer
@pytest.fixture(scope="module", params=["pyvista", "mayavi"])
def renderer_interactive(request):
"""Yield the interactive 3D backends."""
with _use_backend(request.param, interactive=True) as renderer:
if renderer._get_3d_backend() == 'mayavi':
with warnings.catch_warnings(record=True):
try:
from surfer import Brain # noqa: 401 analysis:ignore
except Exception:
pytest.skip('Requires PySurfer')
yield renderer
@contextmanager
def _use_backend(backend_name, interactive):
from mne.viz.backends.renderer import _use_test_3d_backend
_check_skip_backend(backend_name)
with _use_test_3d_backend(backend_name, interactive=interactive):
from mne.viz.backends import renderer
try:
yield renderer
finally:
renderer.backend._close_all()
def _check_skip_backend(name):
from mne.viz.backends.tests._utils import (has_mayavi, has_pyvista,
has_pyqt5, has_imageio_ffmpeg)
check_pyvista = name in ('pyvista', 'notebook')
check_pyqt5 = name in ('mayavi', 'pyvista')
if name == 'mayavi':
if not has_mayavi():
pytest.skip("Test skipped, requires mayavi.")
elif name == 'pyvista':
if not has_imageio_ffmpeg():
pytest.skip("Test skipped, requires imageio-ffmpeg")
if check_pyvista and not has_pyvista():
pytest.skip("Test skipped, requires pyvista.")
if check_pyqt5 and not has_pyqt5():
pytest.skip("Test skipped, requires PyQt5.")
@pytest.fixture(scope='session')
def pixel_ratio():
"""Get the pixel ratio."""
from mne.viz.backends.tests._utils import (has_mayavi, has_pyvista,
has_pyqt5)
if not (has_mayavi() or has_pyvista()) or not has_pyqt5():
return 1.
from PyQt5.QtWidgets import QApplication, QMainWindow
_ = QApplication.instance() or QApplication([])
window = QMainWindow()
ratio = float(window.devicePixelRatio())
window.close()
return ratio
@pytest.fixture(scope='function', params=[testing._pytest_param()])
def subjects_dir_tmp(tmpdir):
"""Copy MNE-testing-data subjects_dir to a temp dir for manipulation."""
for key in ('sample', 'fsaverage'):
shutil.copytree(op.join(subjects_dir, key), str(tmpdir.join(key)))
return str(tmpdir)
# Scoping these as session will make things faster, but need to make sure
# not to modify them in-place in the tests, so keep them private
@pytest.fixture(scope='session', params=[testing._pytest_param()])
def _evoked_cov_sphere(_evoked):
"""Compute a small evoked/cov/sphere combo for use with forwards."""
evoked = _evoked.copy().pick_types(meg=True)
evoked.pick_channels(evoked.ch_names[::4])
assert len(evoked.ch_names) == 77
cov = mne.read_cov(fname_cov)
sphere = mne.make_sphere_model('auto', 'auto', evoked.info)
return evoked, cov, sphere
@pytest.fixture(scope='session')
def _fwd_surf(_evoked_cov_sphere):
"""Compute a forward for a surface source space."""
evoked, cov, sphere = _evoked_cov_sphere
src_surf = mne.read_source_spaces(fname_src)
return mne.make_forward_solution(
evoked.info, fname_trans, src_surf, sphere, mindist=5.0)
@pytest.fixture(scope='session')
def _fwd_subvolume(_evoked_cov_sphere):
"""Compute a forward for a surface source space."""
pytest.importorskip('nibabel')
evoked, cov, sphere = _evoked_cov_sphere
volume_labels = ['Left-Cerebellum-Cortex', 'right-Cerebellum-Cortex']
with pytest.raises(ValueError,
match=r"Did you mean one of \['Right-Cere"):
mne.setup_volume_source_space(
'sample', pos=20., volume_label=volume_labels,
subjects_dir=subjects_dir)
volume_labels[1] = 'R' + volume_labels[1][1:]
src_vol = mne.setup_volume_source_space(
'sample', pos=20., volume_label=volume_labels,
subjects_dir=subjects_dir, add_interpolator=False)
return mne.make_forward_solution(
evoked.info, fname_trans, src_vol, sphere, mindist=5.0)
@pytest.fixture(scope='session')
def _all_src_types_fwd(_fwd_surf, _fwd_subvolume):
"""Create all three forward types (surf, vol, mixed)."""
fwds = dict(surface=_fwd_surf, volume=_fwd_subvolume)
with pytest.raises(RuntimeError,
match='Invalid source space with kinds'):
fwds['volume']['src'] + fwds['surface']['src']
# mixed (4)
fwd = fwds['surface'].copy()
f2 = fwds['volume']
for keys, axis in [(('source_rr',), 0),
(('source_nn',), 0),
(('sol', 'data'), 1),
(('_orig_sol',), 1)]:
a, b = fwd, f2
key = keys[0]
if len(keys) > 1:
a, b = a[key], b[key]
key = keys[1]
a[key] = np.concatenate([a[key], b[key]], axis=axis)
fwd['sol']['ncol'] = fwd['sol']['data'].shape[1]
fwd['nsource'] = fwd['sol']['ncol'] // 3
fwd['src'] = fwd['src'] + f2['src']
fwds['mixed'] = fwd
return fwds
@pytest.fixture(scope='session')
def _all_src_types_inv_evoked(_evoked_cov_sphere, _all_src_types_fwd):
"""Compute inverses for all source types."""
evoked, cov, _ = _evoked_cov_sphere
invs = dict()
for kind, fwd in _all_src_types_fwd.items():
assert fwd['src'].kind == kind
with pytest.warns(RuntimeWarning, match='has magnitude'):
invs[kind] = mne.minimum_norm.make_inverse_operator(
evoked.info, fwd, cov)
return invs, evoked
@pytest.fixture(scope='function')
def all_src_types_inv_evoked(_all_src_types_inv_evoked):
"""All source types of inverses, allowing for possible modification."""
invs, evoked = _all_src_types_inv_evoked
invs = {key: val.copy() for key, val in invs.items()}
evoked = evoked.copy()
return invs, evoked
@pytest.fixture(scope='function')
def mixed_fwd_cov_evoked(_evoked_cov_sphere, _all_src_types_fwd):
"""Compute inverses for all source types."""
evoked, cov, _ = _evoked_cov_sphere
return _all_src_types_fwd['mixed'].copy(), cov.copy(), evoked.copy()
@pytest.fixture(scope='session')
@pytest.mark.slowtest
@pytest.mark.parametrize(params=[testing._pytest_param()])
def src_volume_labels():
"""Create a 7mm source space with labels."""
pytest.importorskip('nibabel')
volume_labels = mne.get_volume_labels_from_aseg(fname_aseg)
src = mne.setup_volume_source_space(
'sample', 7., mri='aseg.mgz', volume_label=volume_labels,
add_interpolator=False, bem=fname_bem,
subjects_dir=subjects_dir)
lut, _ = mne.read_freesurfer_lut()
assert len(volume_labels) == 46
assert volume_labels[0] == 'Unknown'
assert lut['Unknown'] == 0 # it will be excluded during label gen
return src, tuple(volume_labels), lut
def _fail(*args, **kwargs):
raise AssertionError('Test should not download')
@pytest.fixture(scope='function')
def download_is_error(monkeypatch):
"""Prevent downloading by raising an error when it's attempted."""
monkeypatch.setattr(mne.utils.fetching, '_get_http', _fail)
@pytest.fixture()
def brain_gc(request):
"""Ensure that brain can be properly garbage collected."""
keys = (
'renderer_interactive',
'renderer_interactive_pyvista',
'renderer_interactive_pysurfer',
'renderer',
'renderer_pyvista',
'renderer_notebook',
)
assert set(request.fixturenames) & set(keys) != set()
for key in keys:
if key in request.fixturenames:
is_pv = request.getfixturevalue(key)._get_3d_backend() == 'pyvista'
close_func = request.getfixturevalue(key).backend._close_all
break
if not is_pv:
yield
return
import pyvista
if LooseVersion(pyvista.__version__) <= LooseVersion('0.26.1'):
yield
return
from mne.viz import Brain
ignore = set(id(o) for o in gc.get_objects())
yield
close_func()
# no need to warn if the test itself failed, pytest-harvest helps us here
try:
outcome = request.node.harvest_rep_call
except Exception:
outcome = 'failed'
if outcome != 'passed':
return
_assert_no_instances(Brain, 'after')
# We only check VTK for PyVista -- Mayavi/PySurfer is not as strict
objs = gc.get_objects()
bad = list()
for o in objs:
try:
name = o.__class__.__name__
except Exception: # old Python, probably
pass
else:
if name.startswith('vtk') and id(o) not in ignore:
bad.append(name)
del o
del objs, ignore, Brain
assert len(bad) == 0, 'VTK objects linger:\n' + '\n'.join(bad)
def pytest_sessionfinish(session, exitstatus):
"""Handle the end of the session."""
n = session.config.option.durations
if n is None:
return
print('\n')
try:
import pytest_harvest
except ImportError:
print('Module-level timings require pytest-harvest')
return
from py.io import TerminalWriter
# get the number to print
res = pytest_harvest.get_session_synthesis_dct(session)
files = dict()
for key, val in res.items():
parts = Path(key.split(':')[0]).parts
# split mne/tests/test_whatever.py into separate categories since these
# are essentially submodule-level tests. Keeping just [:3] works,
# except for mne/viz where we want level-4 granulatity
parts = parts[:4 if parts[:2] == ('mne', 'viz') else 3]
if not parts[-1].endswith('.py'):
parts = parts + ('',)
file_key = '/'.join(parts)
files[file_key] = files.get(file_key, 0) + val['pytest_duration_s']
files = sorted(list(files.items()), key=lambda x: x[1])[::-1]
# print
files = files[:n]
if len(files):
writer = TerminalWriter()
writer.line() # newline
writer.sep('=', f'slowest {n} test module{_pl(n)}')
names, timings = zip(*files)
timings = [f'{timing:0.2f}s total' for timing in timings]
rjust = max(len(timing) for timing in timings)
timings = [timing.rjust(rjust) for timing in timings]
for name, timing in zip(names, timings):
writer.line(f'{timing.ljust(15)}{name}')
@pytest.fixture(scope="function", params=('Numba', 'NumPy'))
def numba_conditional(monkeypatch, request):
"""Test both code paths on machines that have Numba."""
assert request.param in ('Numba', 'NumPy')
if request.param == 'NumPy' and has_numba:
monkeypatch.setattr(
cluster_level, '_get_buddies', cluster_level._get_buddies_fallback)
monkeypatch.setattr(
cluster_level, '_get_selves', cluster_level._get_selves_fallback)
monkeypatch.setattr(
cluster_level, '_where_first', cluster_level._where_first_fallback)
monkeypatch.setattr(
numerics, '_arange_div', numerics._arange_div_fallback)
if request.param == 'Numba' and not has_numba:
pytest.skip('Numba not installed')
yield request.param
| bsd-3-clause |
sudheerchintala/LearnEraPlatForm | common/djangoapps/track/tests/test_shim.py | 9 | 3543 | """Ensure emitted events contain the fields legacy processors expect to find."""
from datetime import datetime
from freezegun import freeze_time
from mock import sentinel
from django.test import TestCase
from django.test.utils import override_settings
from pytz import UTC
from eventtracking.django import DjangoTracker
IN_MEMORY_BACKEND = {
'mem': {
'ENGINE': 'track.tests.test_shim.InMemoryBackend'
}
}
LEGACY_SHIM_PROCESSOR = [
{
'ENGINE': 'track.shim.LegacyFieldMappingProcessor'
}
]
FROZEN_TIME = datetime(2013, 10, 3, 8, 24, 55, tzinfo=UTC)
@freeze_time(FROZEN_TIME)
class LegacyFieldMappingProcessorTestCase(TestCase):
"""Ensure emitted events contain the fields legacy processors expect to find."""
@override_settings(
EVENT_TRACKING_BACKENDS=IN_MEMORY_BACKEND,
EVENT_TRACKING_PROCESSORS=LEGACY_SHIM_PROCESSOR,
)
def test_event_field_mapping(self):
django_tracker = DjangoTracker()
data = {sentinel.key: sentinel.value}
context = {
'username': sentinel.username,
'session': sentinel.session,
'ip': sentinel.ip,
'host': sentinel.host,
'agent': sentinel.agent,
'path': sentinel.path,
'user_id': sentinel.user_id,
'course_id': sentinel.course_id,
'org_id': sentinel.org_id,
'event_type': sentinel.event_type,
'client_id': sentinel.client_id,
}
with django_tracker.context('test', context):
django_tracker.emit(sentinel.name, data)
emitted_event = django_tracker.backends['mem'].get_event()
expected_event = {
'event_type': sentinel.event_type,
'name': sentinel.name,
'context': {
'user_id': sentinel.user_id,
'course_id': sentinel.course_id,
'org_id': sentinel.org_id,
'path': sentinel.path,
},
'event': data,
'username': sentinel.username,
'event_source': 'server',
'time': FROZEN_TIME,
'agent': sentinel.agent,
'host': sentinel.host,
'ip': sentinel.ip,
'page': None,
'session': sentinel.session,
}
self.assertEqual(expected_event, emitted_event)
@override_settings(
EVENT_TRACKING_BACKENDS=IN_MEMORY_BACKEND,
EVENT_TRACKING_PROCESSORS=LEGACY_SHIM_PROCESSOR,
)
def test_missing_fields(self):
django_tracker = DjangoTracker()
django_tracker.emit(sentinel.name)
emitted_event = django_tracker.backends['mem'].get_event()
expected_event = {
'event_type': sentinel.name,
'name': sentinel.name,
'context': {},
'event': {},
'username': '',
'event_source': 'server',
'time': FROZEN_TIME,
'agent': '',
'host': '',
'ip': '',
'page': None,
'session': '',
}
self.assertEqual(expected_event, emitted_event)
class InMemoryBackend(object):
"""A backend that simply stores all events in memory"""
def __init__(self):
super(InMemoryBackend, self).__init__()
self.events = []
def send(self, event):
"""Store the event in a list"""
self.events.append(event)
def get_event(self):
"""Return the first event that was emitted."""
return self.events[0]
| agpl-3.0 |
alan-unravel/bokeh | examples/plotting/server/selection_histogram.py | 42 | 4001 | # The plot server must be running
# Go to http://localhost:5006/bokeh to view this plot
import numpy as np
from bokeh.models import BoxSelectTool, LassoSelectTool, Paragraph
from bokeh.plotting import (
curdoc, cursession, figure, output_server, show, hplot, vplot
)
# create three normal population samples with different parameters
N1 = 2000
N2 = 5000
N3 = 1000
x1 = np.random.normal(loc=5.0, size=N1) * 100
y1 = np.random.normal(loc=10.0, size=N1) * 10
x2 = np.random.normal(loc=5.0, size=N2) * 50
y2 = np.random.normal(loc=5.0, size=N2) * 10
x3 = np.random.normal(loc=55.0, size=N3) * 10
y3 = np.random.normal(loc=4.0, size=N3) * 10
x = np.concatenate((x1, x2, x3))
y = np.concatenate((y1, y2, y3))
all_inds = np.arange(len(x1) + len(x2) + len(x3))
output_server("selection_histogram")
TOOLS="pan,wheel_zoom,box_select,lasso_select"
# create the scatter plot
p = figure(tools=TOOLS, plot_width=600, plot_height=600, title=None, min_border=10, min_border_left=50)
p.scatter(x, y, size=3, color="#3A5785", alpha=0.6, name="scatter")
renderer = p.select(dict(name="scatter"))
scatter_ds = renderer[0].data_source
box_select_tool = p.select(dict(type=BoxSelectTool))
box_select_tool.select_every_mousemove = False
lasso_select_tool = p.select(dict(type=LassoSelectTool))
lasso_select_tool.select_every_mousemove = False
# create the horizontal histogram
hhist, hedges = np.histogram(x, bins=20)
hzeros = np.zeros(len(hedges)-1)
hmax = max(hhist)*1.1
ph = figure(toolbar_location=None, plot_width=p.plot_width, plot_height=200, x_range=p.x_range,
y_range=(-hmax, hmax), title=None, min_border=10, min_border_left=50)
ph.quad(bottom=0, left=hedges[:-1], right=hedges[1:], top=hhist, color="white", line_color="#3A5785")
ph.quad(bottom=0, left=hedges[:-1], right=hedges[1:], top=hzeros, color="#3A5785", alpha=0.5, line_color=None, name="hhist")
ph.quad(bottom=0, left=hedges[:-1], right=hedges[1:], top=hzeros, color="#3A5785", alpha=0.1, line_color=None, name="hhist2")
ph.xgrid.grid_line_color = None
ph_source = ph.select(dict(name="hhist"))[0].data_source
ph_source2 = ph.select(dict(name="hhist2"))[0].data_source
# create the vertical histogram
vhist, vedges = np.histogram(y, bins=20)
vzeros = np.zeros(len(vedges)-1)
vmax = max(vhist)*1.1
# need to adjust for toolbar height, unfortunately
th = 42
pv = figure(toolbar_location=None, plot_width=200, plot_height=p.plot_height+th-10, x_range=(-vmax, vmax),
y_range=p.y_range, title=None, min_border=10, min_border_top=th)
pv.quad(left=0, bottom=vedges[:-1], top=vedges[1:], right=vhist, color="white", line_color="#3A5785")
pv.quad(left=0, bottom=vedges[:-1], top=vedges[1:], right=vzeros, color="#3A5785", alpha=0.5, line_color=None, name="vhist")
pv.quad(left=0, bottom=vedges[:-1], top=vedges[1:], right=vzeros, color="#3A5785", alpha=0.1, line_color=None, name="vhist2")
pv.ygrid.grid_line_color = None
pv_source = pv.select(dict(name="vhist"))[0].data_source
pv_source2 = pv.select(dict(name="vhist2"))[0].data_source
# set up callbacks
def on_selection_change(obj, attr, old, new):
inds = np.array(new['1d']['indices'])
if len(inds) == 0 or len(inds) == len(x):
hhist = hzeros
vhist = vzeros
hhist2 = hzeros
vhist2 = vzeros
else:
hhist, _ = np.histogram(x[inds], bins=hedges)
vhist, _ = np.histogram(y[inds], bins=vedges)
negative_inds = np.ones_like(x, dtype=np.bool)
negative_inds[inds] = False
hhist2, _ = np.histogram(x[negative_inds], bins=hedges)
vhist2, _ = np.histogram(y[negative_inds], bins=vedges)
ph_source.data["top"] = hhist
pv_source.data["right"] = vhist
ph_source2.data["top"] = -hhist2
pv_source2.data["right"] = -vhist2
cursession().store_objects(ph_source, pv_source, ph_source2, pv_source2)
scatter_ds.on_change('selected', on_selection_change)
layout = vplot(hplot(p, pv), hplot(ph, Paragraph()))
show(layout)
cursession().poll_document(curdoc(), 0.05)
| bsd-3-clause |
bayusantoso/final-assignment-web-ontology | IMPLEMENTATION/Application/SourceCode/GOApps/flask/Lib/site-packages/pip/commands/uninstall.py | 798 | 2884 | from __future__ import absolute_import
import pip
from pip.wheel import WheelCache
from pip.req import InstallRequirement, RequirementSet, parse_requirements
from pip.basecommand import Command
from pip.exceptions import InstallationError
class UninstallCommand(Command):
"""
Uninstall packages.
pip is able to uninstall most installed packages. Known exceptions are:
- Pure distutils packages installed with ``python setup.py install``, which
leave behind no metadata to determine what files were installed.
- Script wrappers installed by ``python setup.py develop``.
"""
name = 'uninstall'
usage = """
%prog [options] <package> ...
%prog [options] -r <requirements file> ..."""
summary = 'Uninstall packages.'
def __init__(self, *args, **kw):
super(UninstallCommand, self).__init__(*args, **kw)
self.cmd_opts.add_option(
'-r', '--requirement',
dest='requirements',
action='append',
default=[],
metavar='file',
help='Uninstall all the packages listed in the given requirements '
'file. This option can be used multiple times.',
)
self.cmd_opts.add_option(
'-y', '--yes',
dest='yes',
action='store_true',
help="Don't ask for confirmation of uninstall deletions.")
self.parser.insert_option_group(0, self.cmd_opts)
def run(self, options, args):
with self._build_session(options) as session:
format_control = pip.index.FormatControl(set(), set())
wheel_cache = WheelCache(options.cache_dir, format_control)
requirement_set = RequirementSet(
build_dir=None,
src_dir=None,
download_dir=None,
isolated=options.isolated_mode,
session=session,
wheel_cache=wheel_cache,
)
for name in args:
requirement_set.add_requirement(
InstallRequirement.from_line(
name, isolated=options.isolated_mode,
wheel_cache=wheel_cache
)
)
for filename in options.requirements:
for req in parse_requirements(
filename,
options=options,
session=session,
wheel_cache=wheel_cache):
requirement_set.add_requirement(req)
if not requirement_set.has_requirements:
raise InstallationError(
'You must give at least one requirement to %(name)s (see '
'"pip help %(name)s")' % dict(name=self.name)
)
requirement_set.uninstall(auto_confirm=options.yes)
| gpl-3.0 |
tzaffi/git-in-practice-repo | book/lib/python2.7/site-packages/django/contrib/gis/tests/layermap/models.py | 230 | 2285 | from django.contrib.gis.db import models
class State(models.Model):
name = models.CharField(max_length=20)
objects = models.GeoManager()
class County(models.Model):
name = models.CharField(max_length=25)
state = models.ForeignKey(State)
mpoly = models.MultiPolygonField(srid=4269) # Multipolygon in NAD83
objects = models.GeoManager()
class CountyFeat(models.Model):
name = models.CharField(max_length=25)
poly = models.PolygonField(srid=4269)
objects = models.GeoManager()
class City(models.Model):
name = models.CharField(max_length=25)
name_txt = models.TextField(default='')
population = models.IntegerField()
density = models.DecimalField(max_digits=7, decimal_places=1)
dt = models.DateField()
point = models.PointField()
objects = models.GeoManager()
class Interstate(models.Model):
name = models.CharField(max_length=20)
length = models.DecimalField(max_digits=6, decimal_places=2)
path = models.LineStringField()
objects = models.GeoManager()
# Same as `City` above, but for testing model inheritance.
class CityBase(models.Model):
name = models.CharField(max_length=25)
population = models.IntegerField()
density = models.DecimalField(max_digits=7, decimal_places=1)
point = models.PointField()
objects = models.GeoManager()
class ICity1(CityBase):
dt = models.DateField()
class ICity2(ICity1):
dt_time = models.DateTimeField(auto_now=True)
class Invalid(models.Model):
point = models.PointField()
# Mapping dictionaries for the models above.
co_mapping = {'name' : 'Name',
'state' : {'name' : 'State'}, # ForeignKey's use another mapping dictionary for the _related_ Model (State in this case).
'mpoly' : 'MULTIPOLYGON', # Will convert POLYGON features into MULTIPOLYGONS.
}
cofeat_mapping = {'name' : 'Name',
'poly' : 'POLYGON',
}
city_mapping = {'name' : 'Name',
'population' : 'Population',
'density' : 'Density',
'dt' : 'Created',
'point' : 'POINT',
}
inter_mapping = {'name' : 'Name',
'length' : 'Length',
'path' : 'LINESTRING',
}
| mit |
jvictor0/TweetTracker | src/Preprocess/hourandday.py | 1 | 1645 | from mrjob.job import MRJob
import json as simplejson
from datetime import datetime, timedelta
class MRRetweetRate(MRJob):
def mapper_get_hour_day(self, _, line):
startTime = datetime.strptime('2014-03-01T00:00:00.000Z', '%Y-%m-%dT%H:%M:%S.%fZ')
tweet = simplejson.loads(line)
language = 'es'
if 'twitter_lang' in tweet :
language = tweet['twitter_lang']
if language == 'en' :
if 'id' in tweet and 'retweetCount' in tweet :
tweetid = tweet['id'][len('tag:search.twitter.com,2005:') : ]
tweeter = tweet['actor']['id'][len('id:twitter.com:') :]
retweetTime = datetime.strptime(tweet['postedTime'], '%Y-%m-%dT%H:%M:%S.%fZ')
creationTime = retweetTime
if tweet['retweetCount'] > 0 :
tweeter = tweet['object']['actor']['id'][len('id:twitter.com:') :]
tweetid = tweet['object']['id'][len('object:search.twitter.com,2005:') :]
creationTime = datetime.strptime(tweet['object']['postedTime'], '%Y-%m-%dT%H:%M:%S.%fZ')
if (creationTime - startTime).total_seconds() > 0 :
if 'utcOffset' in tweet['actor'] and tweet['actor']['utcOffset'] is not None:
offset = int(tweet['actor']['utcOffset'])/3600
yield tweetid, (creationTime.hour + offset, creationTime.weekday())
def reducer_output_hour_day(self, tweetid, stat):
dayofweek = 0
hour = 0
for hr, day in stat :
hour = hr
dayofweek = day
break
yield None, ('%s' % ' ' + tweetid + ' ' + str(hour) + ' ' + str(dayofweek))
def steps(self):
return [
self.mr(mapper=self.mapper_get_hour_day,
reducer=self.reducer_output_hour_day)
]
if __name__ == '__main__':
MRRetweetRate.run()
| mit |
Russell-IO/ansible | lib/ansible/modules/cloud/centurylink/clc_modify_server.py | 55 | 34575 | #!/usr/bin/python
#
# Copyright (c) 2015 CenturyLink
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
module: clc_modify_server
short_description: modify servers in CenturyLink Cloud.
description:
- An Ansible module to modify servers in CenturyLink Cloud.
version_added: "2.0"
options:
server_ids:
description:
- A list of server Ids to modify.
required: True
cpu:
description:
- How many CPUs to update on the server
memory:
description:
- Memory (in GB) to set to the server.
anti_affinity_policy_id:
description:
- The anti affinity policy id to be set for a hyper scale server.
This is mutually exclusive with 'anti_affinity_policy_name'
anti_affinity_policy_name:
description:
- The anti affinity policy name to be set for a hyper scale server.
This is mutually exclusive with 'anti_affinity_policy_id'
alert_policy_id:
description:
- The alert policy id to be associated to the server.
This is mutually exclusive with 'alert_policy_name'
alert_policy_name:
description:
- The alert policy name to be associated to the server.
This is mutually exclusive with 'alert_policy_id'
state:
description:
- The state to insure that the provided resources are in.
default: 'present'
choices: ['present', 'absent']
wait:
description:
- Whether to wait for the provisioning tasks to finish before returning.
type: bool
default: 'yes'
requirements:
- python = 2.7
- requests >= 2.5.0
- clc-sdk
author: "CLC Runner (@clc-runner)"
notes:
- To use this module, it is required to set the below environment variables which enables access to the
Centurylink Cloud
- CLC_V2_API_USERNAME, the account login id for the centurylink cloud
- CLC_V2_API_PASSWORD, the account password for the centurylink cloud
- Alternatively, the module accepts the API token and account alias. The API token can be generated using the
CLC account login and password via the HTTP api call @ https://api.ctl.io/v2/authentication/login
- CLC_V2_API_TOKEN, the API token generated from https://api.ctl.io/v2/authentication/login
- CLC_ACCT_ALIAS, the account alias associated with the centurylink cloud
- Users can set CLC_V2_API_URL to specify an endpoint for pointing to a different CLC environment.
'''
EXAMPLES = '''
# Note - You must set the CLC_V2_API_USERNAME And CLC_V2_API_PASSWD Environment variables before running these examples
- name: set the cpu count to 4 on a server
clc_modify_server:
server_ids:
- UC1TESTSVR01
- UC1TESTSVR02
cpu: 4
state: present
- name: set the memory to 8GB on a server
clc_modify_server:
server_ids:
- UC1TESTSVR01
- UC1TESTSVR02
memory: 8
state: present
- name: set the anti affinity policy on a server
clc_modify_server:
server_ids:
- UC1TESTSVR01
- UC1TESTSVR02
anti_affinity_policy_name: 'aa_policy'
state: present
- name: remove the anti affinity policy on a server
clc_modify_server:
server_ids:
- UC1TESTSVR01
- UC1TESTSVR02
anti_affinity_policy_name: 'aa_policy'
state: absent
- name: add the alert policy on a server
clc_modify_server:
server_ids:
- UC1TESTSVR01
- UC1TESTSVR02
alert_policy_name: 'alert_policy'
state: present
- name: remove the alert policy on a server
clc_modify_server:
server_ids:
- UC1TESTSVR01
- UC1TESTSVR02
alert_policy_name: 'alert_policy'
state: absent
- name: set the memory to 16GB and cpu to 8 core on a lust if servers
clc_modify_server:
server_ids:
- UC1TESTSVR01
- UC1TESTSVR02
cpu: 8
memory: 16
state: present
'''
RETURN = '''
server_ids:
description: The list of server ids that are changed
returned: success
type: list
sample:
[
"UC1TEST-SVR01",
"UC1TEST-SVR02"
]
servers:
description: The list of server objects that are changed
returned: success
type: list
sample:
[
{
"changeInfo":{
"createdBy":"service.wfad",
"createdDate":1438196820,
"modifiedBy":"service.wfad",
"modifiedDate":1438196820
},
"description":"test-server",
"details":{
"alertPolicies":[
],
"cpu":1,
"customFields":[
],
"diskCount":3,
"disks":[
{
"id":"0:0",
"partitionPaths":[
],
"sizeGB":1
},
{
"id":"0:1",
"partitionPaths":[
],
"sizeGB":2
},
{
"id":"0:2",
"partitionPaths":[
],
"sizeGB":14
}
],
"hostName":"",
"inMaintenanceMode":false,
"ipAddresses":[
{
"internal":"10.1.1.1"
}
],
"memoryGB":1,
"memoryMB":1024,
"partitions":[
],
"powerState":"started",
"snapshots":[
],
"storageGB":17
},
"groupId":"086ac1dfe0b6411989e8d1b77c4065f0",
"id":"test-server",
"ipaddress":"10.120.45.23",
"isTemplate":false,
"links":[
{
"href":"/v2/servers/wfad/test-server",
"id":"test-server",
"rel":"self",
"verbs":[
"GET",
"PATCH",
"DELETE"
]
},
{
"href":"/v2/groups/wfad/086ac1dfe0b6411989e8d1b77c4065f0",
"id":"086ac1dfe0b6411989e8d1b77c4065f0",
"rel":"group"
},
{
"href":"/v2/accounts/wfad",
"id":"wfad",
"rel":"account"
},
{
"href":"/v2/billing/wfad/serverPricing/test-server",
"rel":"billing"
},
{
"href":"/v2/servers/wfad/test-server/publicIPAddresses",
"rel":"publicIPAddresses",
"verbs":[
"POST"
]
},
{
"href":"/v2/servers/wfad/test-server/credentials",
"rel":"credentials"
},
{
"href":"/v2/servers/wfad/test-server/statistics",
"rel":"statistics"
},
{
"href":"/v2/servers/wfad/510ec21ae82d4dc89d28479753bf736a/upcomingScheduledActivities",
"rel":"upcomingScheduledActivities"
},
{
"href":"/v2/servers/wfad/510ec21ae82d4dc89d28479753bf736a/scheduledActivities",
"rel":"scheduledActivities",
"verbs":[
"GET",
"POST"
]
},
{
"href":"/v2/servers/wfad/test-server/capabilities",
"rel":"capabilities"
},
{
"href":"/v2/servers/wfad/test-server/alertPolicies",
"rel":"alertPolicyMappings",
"verbs":[
"POST"
]
},
{
"href":"/v2/servers/wfad/test-server/antiAffinityPolicy",
"rel":"antiAffinityPolicyMapping",
"verbs":[
"PUT",
"DELETE"
]
},
{
"href":"/v2/servers/wfad/test-server/cpuAutoscalePolicy",
"rel":"cpuAutoscalePolicyMapping",
"verbs":[
"PUT",
"DELETE"
]
}
],
"locationId":"UC1",
"name":"test-server",
"os":"ubuntu14_64Bit",
"osType":"Ubuntu 14 64-bit",
"status":"active",
"storageType":"standard",
"type":"standard"
}
]
'''
__version__ = '${version}'
import json
import os
from distutils.version import LooseVersion
try:
import requests
except ImportError:
REQUESTS_FOUND = False
else:
REQUESTS_FOUND = True
#
# Requires the clc-python-sdk.
# sudo pip install clc-sdk
#
try:
import clc as clc_sdk
from clc import CLCException
from clc import APIFailedResponse
except ImportError:
CLC_FOUND = False
clc_sdk = None
else:
CLC_FOUND = True
from ansible.module_utils.basic import AnsibleModule
class ClcModifyServer:
clc = clc_sdk
def __init__(self, module):
"""
Construct module
"""
self.clc = clc_sdk
self.module = module
if not CLC_FOUND:
self.module.fail_json(
msg='clc-python-sdk required for this module')
if not REQUESTS_FOUND:
self.module.fail_json(
msg='requests library is required for this module')
if requests.__version__ and LooseVersion(
requests.__version__) < LooseVersion('2.5.0'):
self.module.fail_json(
msg='requests library version should be >= 2.5.0')
self._set_user_agent(self.clc)
def process_request(self):
"""
Process the request - Main Code Path
:return: Returns with either an exit_json or fail_json
"""
self._set_clc_credentials_from_env()
p = self.module.params
cpu = p.get('cpu')
memory = p.get('memory')
state = p.get('state')
if state == 'absent' and (cpu or memory):
return self.module.fail_json(
msg='\'absent\' state is not supported for \'cpu\' and \'memory\' arguments')
server_ids = p['server_ids']
if not isinstance(server_ids, list):
return self.module.fail_json(
msg='server_ids needs to be a list of instances to modify: %s' %
server_ids)
(changed, server_dict_array, changed_server_ids) = self._modify_servers(
server_ids=server_ids)
self.module.exit_json(
changed=changed,
server_ids=changed_server_ids,
servers=server_dict_array)
@staticmethod
def _define_module_argument_spec():
"""
Define the argument spec for the ansible module
:return: argument spec dictionary
"""
argument_spec = dict(
server_ids=dict(type='list', required=True),
state=dict(default='present', choices=['present', 'absent']),
cpu=dict(),
memory=dict(),
anti_affinity_policy_id=dict(),
anti_affinity_policy_name=dict(),
alert_policy_id=dict(),
alert_policy_name=dict(),
wait=dict(type='bool', default=True)
)
mutually_exclusive = [
['anti_affinity_policy_id', 'anti_affinity_policy_name'],
['alert_policy_id', 'alert_policy_name']
]
return {"argument_spec": argument_spec,
"mutually_exclusive": mutually_exclusive}
def _set_clc_credentials_from_env(self):
"""
Set the CLC Credentials on the sdk by reading environment variables
:return: none
"""
env = os.environ
v2_api_token = env.get('CLC_V2_API_TOKEN', False)
v2_api_username = env.get('CLC_V2_API_USERNAME', False)
v2_api_passwd = env.get('CLC_V2_API_PASSWD', False)
clc_alias = env.get('CLC_ACCT_ALIAS', False)
api_url = env.get('CLC_V2_API_URL', False)
if api_url:
self.clc.defaults.ENDPOINT_URL_V2 = api_url
if v2_api_token and clc_alias:
self.clc._LOGIN_TOKEN_V2 = v2_api_token
self.clc._V2_ENABLED = True
self.clc.ALIAS = clc_alias
elif v2_api_username and v2_api_passwd:
self.clc.v2.SetCredentials(
api_username=v2_api_username,
api_passwd=v2_api_passwd)
else:
return self.module.fail_json(
msg="You must set the CLC_V2_API_USERNAME and CLC_V2_API_PASSWD "
"environment variables")
def _get_servers_from_clc(self, server_list, message):
"""
Internal function to fetch list of CLC server objects from a list of server ids
:param server_list: The list of server ids
:param message: the error message to throw in case of any error
:return the list of CLC server objects
"""
try:
return self.clc.v2.Servers(server_list).servers
except CLCException as ex:
return self.module.fail_json(msg=message + ': %s' % ex.message)
def _modify_servers(self, server_ids):
"""
modify the servers configuration on the provided list
:param server_ids: list of servers to modify
:return: a list of dictionaries with server information about the servers that were modified
"""
p = self.module.params
state = p.get('state')
server_params = {
'cpu': p.get('cpu'),
'memory': p.get('memory'),
'anti_affinity_policy_id': p.get('anti_affinity_policy_id'),
'anti_affinity_policy_name': p.get('anti_affinity_policy_name'),
'alert_policy_id': p.get('alert_policy_id'),
'alert_policy_name': p.get('alert_policy_name'),
}
changed = False
server_changed = False
aa_changed = False
ap_changed = False
server_dict_array = []
result_server_ids = []
request_list = []
changed_servers = []
if not isinstance(server_ids, list) or len(server_ids) < 1:
return self.module.fail_json(
msg='server_ids should be a list of servers, aborting')
servers = self._get_servers_from_clc(
server_ids,
'Failed to obtain server list from the CLC API')
for server in servers:
if state == 'present':
server_changed, server_result = self._ensure_server_config(
server, server_params)
if server_result:
request_list.append(server_result)
aa_changed = self._ensure_aa_policy_present(
server,
server_params)
ap_changed = self._ensure_alert_policy_present(
server,
server_params)
elif state == 'absent':
aa_changed = self._ensure_aa_policy_absent(
server,
server_params)
ap_changed = self._ensure_alert_policy_absent(
server,
server_params)
if server_changed or aa_changed or ap_changed:
changed_servers.append(server)
changed = True
self._wait_for_requests(self.module, request_list)
self._refresh_servers(self.module, changed_servers)
for server in changed_servers:
server_dict_array.append(server.data)
result_server_ids.append(server.id)
return changed, server_dict_array, result_server_ids
def _ensure_server_config(
self, server, server_params):
"""
ensures the server is updated with the provided cpu and memory
:param server: the CLC server object
:param server_params: the dictionary of server parameters
:return: (changed, group) -
changed: Boolean whether a change was made
result: The result from the CLC API call
"""
cpu = server_params.get('cpu')
memory = server_params.get('memory')
changed = False
result = None
if not cpu:
cpu = server.cpu
if not memory:
memory = server.memory
if memory != server.memory or cpu != server.cpu:
if not self.module.check_mode:
result = self._modify_clc_server(
self.clc,
self.module,
server.id,
cpu,
memory)
changed = True
return changed, result
@staticmethod
def _modify_clc_server(clc, module, server_id, cpu, memory):
"""
Modify the memory or CPU of a clc server.
:param clc: the clc-sdk instance to use
:param module: the AnsibleModule object
:param server_id: id of the server to modify
:param cpu: the new cpu value
:param memory: the new memory value
:return: the result of CLC API call
"""
result = None
acct_alias = clc.v2.Account.GetAlias()
try:
# Update the server configuration
job_obj = clc.v2.API.Call('PATCH',
'servers/%s/%s' % (acct_alias,
server_id),
json.dumps([{"op": "set",
"member": "memory",
"value": memory},
{"op": "set",
"member": "cpu",
"value": cpu}]))
result = clc.v2.Requests(job_obj)
except APIFailedResponse as ex:
module.fail_json(
msg='Unable to update the server configuration for server : "{0}". {1}'.format(
server_id, str(ex.response_text)))
return result
@staticmethod
def _wait_for_requests(module, request_list):
"""
Block until server provisioning requests are completed.
:param module: the AnsibleModule object
:param request_list: a list of clc-sdk.Request instances
:return: none
"""
wait = module.params.get('wait')
if wait:
# Requests.WaitUntilComplete() returns the count of failed requests
failed_requests_count = sum(
[request.WaitUntilComplete() for request in request_list])
if failed_requests_count > 0:
module.fail_json(
msg='Unable to process modify server request')
@staticmethod
def _refresh_servers(module, servers):
"""
Loop through a list of servers and refresh them.
:param module: the AnsibleModule object
:param servers: list of clc-sdk.Server instances to refresh
:return: none
"""
for server in servers:
try:
server.Refresh()
except CLCException as ex:
module.fail_json(msg='Unable to refresh the server {0}. {1}'.format(
server.id, ex.message
))
def _ensure_aa_policy_present(
self, server, server_params):
"""
ensures the server is updated with the provided anti affinity policy
:param server: the CLC server object
:param server_params: the dictionary of server parameters
:return: (changed, group) -
changed: Boolean whether a change was made
result: The result from the CLC API call
"""
changed = False
acct_alias = self.clc.v2.Account.GetAlias()
aa_policy_id = server_params.get('anti_affinity_policy_id')
aa_policy_name = server_params.get('anti_affinity_policy_name')
if not aa_policy_id and aa_policy_name:
aa_policy_id = self._get_aa_policy_id_by_name(
self.clc,
self.module,
acct_alias,
aa_policy_name)
current_aa_policy_id = self._get_aa_policy_id_of_server(
self.clc,
self.module,
acct_alias,
server.id)
if aa_policy_id and aa_policy_id != current_aa_policy_id:
self._modify_aa_policy(
self.clc,
self.module,
acct_alias,
server.id,
aa_policy_id)
changed = True
return changed
def _ensure_aa_policy_absent(
self, server, server_params):
"""
ensures the provided anti affinity policy is removed from the server
:param server: the CLC server object
:param server_params: the dictionary of server parameters
:return: (changed, group) -
changed: Boolean whether a change was made
result: The result from the CLC API call
"""
changed = False
acct_alias = self.clc.v2.Account.GetAlias()
aa_policy_id = server_params.get('anti_affinity_policy_id')
aa_policy_name = server_params.get('anti_affinity_policy_name')
if not aa_policy_id and aa_policy_name:
aa_policy_id = self._get_aa_policy_id_by_name(
self.clc,
self.module,
acct_alias,
aa_policy_name)
current_aa_policy_id = self._get_aa_policy_id_of_server(
self.clc,
self.module,
acct_alias,
server.id)
if aa_policy_id and aa_policy_id == current_aa_policy_id:
self._delete_aa_policy(
self.clc,
self.module,
acct_alias,
server.id)
changed = True
return changed
@staticmethod
def _modify_aa_policy(clc, module, acct_alias, server_id, aa_policy_id):
"""
modifies the anti affinity policy of the CLC server
:param clc: the clc-sdk instance to use
:param module: the AnsibleModule object
:param acct_alias: the CLC account alias
:param server_id: the CLC server id
:param aa_policy_id: the anti affinity policy id
:return: result: The result from the CLC API call
"""
result = None
if not module.check_mode:
try:
result = clc.v2.API.Call('PUT',
'servers/%s/%s/antiAffinityPolicy' % (
acct_alias,
server_id),
json.dumps({"id": aa_policy_id}))
except APIFailedResponse as ex:
module.fail_json(
msg='Unable to modify anti affinity policy to server : "{0}". {1}'.format(
server_id, str(ex.response_text)))
return result
@staticmethod
def _delete_aa_policy(clc, module, acct_alias, server_id):
"""
Delete the anti affinity policy of the CLC server
:param clc: the clc-sdk instance to use
:param module: the AnsibleModule object
:param acct_alias: the CLC account alias
:param server_id: the CLC server id
:return: result: The result from the CLC API call
"""
result = None
if not module.check_mode:
try:
result = clc.v2.API.Call('DELETE',
'servers/%s/%s/antiAffinityPolicy' % (
acct_alias,
server_id),
json.dumps({}))
except APIFailedResponse as ex:
module.fail_json(
msg='Unable to delete anti affinity policy to server : "{0}". {1}'.format(
server_id, str(ex.response_text)))
return result
@staticmethod
def _get_aa_policy_id_by_name(clc, module, alias, aa_policy_name):
"""
retrieves the anti affinity policy id of the server based on the name of the policy
:param clc: the clc-sdk instance to use
:param module: the AnsibleModule object
:param alias: the CLC account alias
:param aa_policy_name: the anti affinity policy name
:return: aa_policy_id: The anti affinity policy id
"""
aa_policy_id = None
try:
aa_policies = clc.v2.API.Call(method='GET',
url='antiAffinityPolicies/%s' % alias)
except APIFailedResponse as ex:
return module.fail_json(
msg='Unable to fetch anti affinity policies from account alias : "{0}". {1}'.format(
alias, str(ex.response_text)))
for aa_policy in aa_policies.get('items'):
if aa_policy.get('name') == aa_policy_name:
if not aa_policy_id:
aa_policy_id = aa_policy.get('id')
else:
return module.fail_json(
msg='multiple anti affinity policies were found with policy name : %s' % aa_policy_name)
if not aa_policy_id:
module.fail_json(
msg='No anti affinity policy was found with policy name : %s' % aa_policy_name)
return aa_policy_id
@staticmethod
def _get_aa_policy_id_of_server(clc, module, alias, server_id):
"""
retrieves the anti affinity policy id of the server based on the CLC server id
:param clc: the clc-sdk instance to use
:param module: the AnsibleModule object
:param alias: the CLC account alias
:param server_id: the CLC server id
:return: aa_policy_id: The anti affinity policy id
"""
aa_policy_id = None
try:
result = clc.v2.API.Call(
method='GET', url='servers/%s/%s/antiAffinityPolicy' %
(alias, server_id))
aa_policy_id = result.get('id')
except APIFailedResponse as ex:
if ex.response_status_code != 404:
module.fail_json(msg='Unable to fetch anti affinity policy for server "{0}". {1}'.format(
server_id, str(ex.response_text)))
return aa_policy_id
def _ensure_alert_policy_present(
self, server, server_params):
"""
ensures the server is updated with the provided alert policy
:param server: the CLC server object
:param server_params: the dictionary of server parameters
:return: (changed, group) -
changed: Boolean whether a change was made
result: The result from the CLC API call
"""
changed = False
acct_alias = self.clc.v2.Account.GetAlias()
alert_policy_id = server_params.get('alert_policy_id')
alert_policy_name = server_params.get('alert_policy_name')
if not alert_policy_id and alert_policy_name:
alert_policy_id = self._get_alert_policy_id_by_name(
self.clc,
self.module,
acct_alias,
alert_policy_name)
if alert_policy_id and not self._alert_policy_exists(
server, alert_policy_id):
self._add_alert_policy_to_server(
self.clc,
self.module,
acct_alias,
server.id,
alert_policy_id)
changed = True
return changed
def _ensure_alert_policy_absent(
self, server, server_params):
"""
ensures the alert policy is removed from the server
:param server: the CLC server object
:param server_params: the dictionary of server parameters
:return: (changed, group) -
changed: Boolean whether a change was made
result: The result from the CLC API call
"""
changed = False
acct_alias = self.clc.v2.Account.GetAlias()
alert_policy_id = server_params.get('alert_policy_id')
alert_policy_name = server_params.get('alert_policy_name')
if not alert_policy_id and alert_policy_name:
alert_policy_id = self._get_alert_policy_id_by_name(
self.clc,
self.module,
acct_alias,
alert_policy_name)
if alert_policy_id and self._alert_policy_exists(
server, alert_policy_id):
self._remove_alert_policy_to_server(
self.clc,
self.module,
acct_alias,
server.id,
alert_policy_id)
changed = True
return changed
@staticmethod
def _add_alert_policy_to_server(
clc, module, acct_alias, server_id, alert_policy_id):
"""
add the alert policy to CLC server
:param clc: the clc-sdk instance to use
:param module: the AnsibleModule object
:param acct_alias: the CLC account alias
:param server_id: the CLC server id
:param alert_policy_id: the alert policy id
:return: result: The result from the CLC API call
"""
result = None
if not module.check_mode:
try:
result = clc.v2.API.Call('POST',
'servers/%s/%s/alertPolicies' % (
acct_alias,
server_id),
json.dumps({"id": alert_policy_id}))
except APIFailedResponse as ex:
module.fail_json(msg='Unable to set alert policy to the server : "{0}". {1}'.format(
server_id, str(ex.response_text)))
return result
@staticmethod
def _remove_alert_policy_to_server(
clc, module, acct_alias, server_id, alert_policy_id):
"""
remove the alert policy to the CLC server
:param clc: the clc-sdk instance to use
:param module: the AnsibleModule object
:param acct_alias: the CLC account alias
:param server_id: the CLC server id
:param alert_policy_id: the alert policy id
:return: result: The result from the CLC API call
"""
result = None
if not module.check_mode:
try:
result = clc.v2.API.Call('DELETE',
'servers/%s/%s/alertPolicies/%s'
% (acct_alias, server_id, alert_policy_id))
except APIFailedResponse as ex:
module.fail_json(msg='Unable to remove alert policy from the server : "{0}". {1}'.format(
server_id, str(ex.response_text)))
return result
@staticmethod
def _get_alert_policy_id_by_name(clc, module, alias, alert_policy_name):
"""
retrieves the alert policy id of the server based on the name of the policy
:param clc: the clc-sdk instance to use
:param module: the AnsibleModule object
:param alias: the CLC account alias
:param alert_policy_name: the alert policy name
:return: alert_policy_id: The alert policy id
"""
alert_policy_id = None
try:
alert_policies = clc.v2.API.Call(method='GET',
url='alertPolicies/%s' % alias)
except APIFailedResponse as ex:
return module.fail_json(msg='Unable to fetch alert policies for account : "{0}". {1}'.format(
alias, str(ex.response_text)))
for alert_policy in alert_policies.get('items'):
if alert_policy.get('name') == alert_policy_name:
if not alert_policy_id:
alert_policy_id = alert_policy.get('id')
else:
return module.fail_json(
msg='multiple alert policies were found with policy name : %s' % alert_policy_name)
return alert_policy_id
@staticmethod
def _alert_policy_exists(server, alert_policy_id):
"""
Checks if the alert policy exists for the server
:param server: the clc server object
:param alert_policy_id: the alert policy
:return: True: if the given alert policy id associated to the server, False otherwise
"""
result = False
alert_policies = server.alertPolicies
if alert_policies:
for alert_policy in alert_policies:
if alert_policy.get('id') == alert_policy_id:
result = True
return result
@staticmethod
def _set_user_agent(clc):
if hasattr(clc, 'SetRequestsSession'):
agent_string = "ClcAnsibleModule/" + __version__
ses = requests.Session()
ses.headers.update({"Api-Client": agent_string})
ses.headers['User-Agent'] += " " + agent_string
clc.SetRequestsSession(ses)
def main():
"""
The main function. Instantiates the module and calls process_request.
:return: none
"""
argument_dict = ClcModifyServer._define_module_argument_spec()
module = AnsibleModule(supports_check_mode=True, **argument_dict)
clc_modify_server = ClcModifyServer(module)
clc_modify_server.process_request()
if __name__ == '__main__':
main()
| gpl-3.0 |
nikolas/lettuce | tests/integration/lib/Django-1.3/django/views/generic/base.py | 95 | 5621 | from django import http
from django.core.exceptions import ImproperlyConfigured
from django.template import RequestContext, loader
from django.template.response import TemplateResponse
from django.utils.functional import update_wrapper
from django.utils.log import getLogger
from django.utils.decorators import classonlymethod
logger = getLogger('django.request')
class View(object):
"""
Intentionally simple parent class for all views. Only implements
dispatch-by-method and simple sanity checking.
"""
http_method_names = ['get', 'post', 'put', 'delete', 'head', 'options', 'trace']
def __init__(self, **kwargs):
"""
Constructor. Called in the URLconf; can contain helpful extra
keyword arguments, and other things.
"""
# Go through keyword arguments, and either save their values to our
# instance, or raise an error.
for key, value in kwargs.iteritems():
setattr(self, key, value)
@classonlymethod
def as_view(cls, **initkwargs):
"""
Main entry point for a request-response process.
"""
# sanitize keyword arguments
for key in initkwargs:
if key in cls.http_method_names:
raise TypeError(u"You tried to pass in the %s method name as a "
u"keyword argument to %s(). Don't do that."
% (key, cls.__name__))
if not hasattr(cls, key):
raise TypeError(u"%s() received an invalid keyword %r" % (
cls.__name__, key))
def view(request, *args, **kwargs):
self = cls(**initkwargs)
return self.dispatch(request, *args, **kwargs)
# take name and docstring from class
update_wrapper(view, cls, updated=())
# and possible attributes set by decorators
# like csrf_exempt from dispatch
update_wrapper(view, cls.dispatch, assigned=())
return view
def dispatch(self, request, *args, **kwargs):
# Try to dispatch to the right method; if a method doesn't exist,
# defer to the error handler. Also defer to the error handler if the
# request method isn't on the approved list.
if request.method.lower() in self.http_method_names:
handler = getattr(self, request.method.lower(), self.http_method_not_allowed)
else:
handler = self.http_method_not_allowed
self.request = request
self.args = args
self.kwargs = kwargs
return handler(request, *args, **kwargs)
def http_method_not_allowed(self, request, *args, **kwargs):
allowed_methods = [m for m in self.http_method_names if hasattr(self, m)]
logger.warning('Method Not Allowed (%s): %s' % (request.method, request.path),
extra={
'status_code': 405,
'request': self.request
}
)
return http.HttpResponseNotAllowed(allowed_methods)
class TemplateResponseMixin(object):
"""
A mixin that can be used to render a template.
"""
template_name = None
response_class = TemplateResponse
def render_to_response(self, context, **response_kwargs):
"""
Returns a response with a template rendered with the given context.
"""
return self.response_class(
request = self.request,
template = self.get_template_names(),
context = context,
**response_kwargs
)
def get_template_names(self):
"""
Returns a list of template names to be used for the request. Must return
a list. May not be called if render_to_response is overridden.
"""
if self.template_name is None:
raise ImproperlyConfigured(
"TemplateResponseMixin requires either a definition of "
"'template_name' or an implementation of 'get_template_names()'")
else:
return [self.template_name]
class TemplateView(TemplateResponseMixin, View):
"""
A view that renders a template.
"""
def get_context_data(self, **kwargs):
return {
'params': kwargs
}
def get(self, request, *args, **kwargs):
context = self.get_context_data(**kwargs)
return self.render_to_response(context)
class RedirectView(View):
"""
A view that provides a redirect on any GET request.
"""
permanent = True
url = None
query_string = False
def get_redirect_url(self, **kwargs):
"""
Return the URL redirect to. Keyword arguments from the
URL pattern match generating the redirect request
are provided as kwargs to this method.
"""
if self.url:
args = self.request.META["QUERY_STRING"]
if args and self.query_string:
url = "%s?%s" % (self.url, args)
else:
url = self.url
return url % kwargs
else:
return None
def get(self, request, *args, **kwargs):
url = self.get_redirect_url(**kwargs)
if url:
if self.permanent:
return http.HttpResponsePermanentRedirect(url)
else:
return http.HttpResponseRedirect(url)
else:
logger.warning('Gone: %s' % self.request.path,
extra={
'status_code': 410,
'request': self.request
})
return http.HttpResponseGone()
| gpl-3.0 |
plq/spyne | spyne/interface/xml_schema/__init__.py | 4 | 1069 |
#
# spyne - Copyright (C) Spyne contributors.
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301
#
"""The spyne.interface.xml_schema package contains an implementation of a subset
of the Xml Schema 1.0 standard. Volunteers are needed to see whether the brand
new Xml Schema 1.1 standard is worth the trouble, and patch as necessary.
"""
from spyne.interface.xml_schema._base import XmlSchema
| lgpl-2.1 |
blackzw/openwrt_sdk_dev1 | staging_dir/target-mips_r2_uClibc-0.9.33.2/usr/lib/python2.7/test/test_class.py | 19 | 18135 | "Test the functionality of Python classes implementing operators."
import unittest
from test import test_support
testmeths = [
# Binary operations
"add",
"radd",
"sub",
"rsub",
"mul",
"rmul",
"div",
"rdiv",
"mod",
"rmod",
"divmod",
"rdivmod",
"pow",
"rpow",
"rshift",
"rrshift",
"lshift",
"rlshift",
"and",
"rand",
"or",
"ror",
"xor",
"rxor",
# List/dict operations
"contains",
"getitem",
"getslice",
"setitem",
"setslice",
"delitem",
"delslice",
# Unary operations
"neg",
"pos",
"abs",
# generic operations
"init",
]
# These need to return something other than None
# "coerce",
# "hash",
# "str",
# "repr",
# "int",
# "long",
# "float",
# "oct",
# "hex",
# These are separate because they can influence the test of other methods.
# "getattr",
# "setattr",
# "delattr",
callLst = []
def trackCall(f):
def track(*args, **kwargs):
callLst.append((f.__name__, args))
return f(*args, **kwargs)
return track
class AllTests:
trackCall = trackCall
@trackCall
def __coerce__(self, *args):
return (self,) + args
@trackCall
def __hash__(self, *args):
return hash(id(self))
@trackCall
def __str__(self, *args):
return "AllTests"
@trackCall
def __repr__(self, *args):
return "AllTests"
@trackCall
def __int__(self, *args):
return 1
@trackCall
def __float__(self, *args):
return 1.0
@trackCall
def __long__(self, *args):
return 1L
@trackCall
def __oct__(self, *args):
return '01'
@trackCall
def __hex__(self, *args):
return '0x1'
@trackCall
def __cmp__(self, *args):
return 0
# Synthesize all the other AllTests methods from the names in testmeths.
method_template = """\
@trackCall
def __%(method)s__(self, *args):
pass
"""
for method in testmeths:
exec method_template % locals() in AllTests.__dict__
del method, method_template
class ClassTests(unittest.TestCase):
def setUp(self):
callLst[:] = []
def assertCallStack(self, expected_calls):
actualCallList = callLst[:] # need to copy because the comparison below will add
# additional calls to callLst
if expected_calls != actualCallList:
self.fail("Expected call list:\n %s\ndoes not match actual call list\n %s" %
(expected_calls, actualCallList))
def testInit(self):
foo = AllTests()
self.assertCallStack([("__init__", (foo,))])
def testBinaryOps(self):
testme = AllTests()
# Binary operations
callLst[:] = []
testme + 1
self.assertCallStack([("__coerce__", (testme, 1)), ("__add__", (testme, 1))])
callLst[:] = []
1 + testme
self.assertCallStack([("__coerce__", (testme, 1)), ("__radd__", (testme, 1))])
callLst[:] = []
testme - 1
self.assertCallStack([("__coerce__", (testme, 1)), ("__sub__", (testme, 1))])
callLst[:] = []
1 - testme
self.assertCallStack([("__coerce__", (testme, 1)), ("__rsub__", (testme, 1))])
callLst[:] = []
testme * 1
self.assertCallStack([("__coerce__", (testme, 1)), ("__mul__", (testme, 1))])
callLst[:] = []
1 * testme
self.assertCallStack([("__coerce__", (testme, 1)), ("__rmul__", (testme, 1))])
if 1/2 == 0:
callLst[:] = []
testme / 1
self.assertCallStack([("__coerce__", (testme, 1)), ("__div__", (testme, 1))])
callLst[:] = []
1 / testme
self.assertCallStack([("__coerce__", (testme, 1)), ("__rdiv__", (testme, 1))])
callLst[:] = []
testme % 1
self.assertCallStack([("__coerce__", (testme, 1)), ("__mod__", (testme, 1))])
callLst[:] = []
1 % testme
self.assertCallStack([("__coerce__", (testme, 1)), ("__rmod__", (testme, 1))])
callLst[:] = []
divmod(testme,1)
self.assertCallStack([("__coerce__", (testme, 1)), ("__divmod__", (testme, 1))])
callLst[:] = []
divmod(1, testme)
self.assertCallStack([("__coerce__", (testme, 1)), ("__rdivmod__", (testme, 1))])
callLst[:] = []
testme ** 1
self.assertCallStack([("__coerce__", (testme, 1)), ("__pow__", (testme, 1))])
callLst[:] = []
1 ** testme
self.assertCallStack([("__coerce__", (testme, 1)), ("__rpow__", (testme, 1))])
callLst[:] = []
testme >> 1
self.assertCallStack([("__coerce__", (testme, 1)), ("__rshift__", (testme, 1))])
callLst[:] = []
1 >> testme
self.assertCallStack([("__coerce__", (testme, 1)), ("__rrshift__", (testme, 1))])
callLst[:] = []
testme << 1
self.assertCallStack([("__coerce__", (testme, 1)), ("__lshift__", (testme, 1))])
callLst[:] = []
1 << testme
self.assertCallStack([("__coerce__", (testme, 1)), ("__rlshift__", (testme, 1))])
callLst[:] = []
testme & 1
self.assertCallStack([("__coerce__", (testme, 1)), ("__and__", (testme, 1))])
callLst[:] = []
1 & testme
self.assertCallStack([("__coerce__", (testme, 1)), ("__rand__", (testme, 1))])
callLst[:] = []
testme | 1
self.assertCallStack([("__coerce__", (testme, 1)), ("__or__", (testme, 1))])
callLst[:] = []
1 | testme
self.assertCallStack([("__coerce__", (testme, 1)), ("__ror__", (testme, 1))])
callLst[:] = []
testme ^ 1
self.assertCallStack([("__coerce__", (testme, 1)), ("__xor__", (testme, 1))])
callLst[:] = []
1 ^ testme
self.assertCallStack([("__coerce__", (testme, 1)), ("__rxor__", (testme, 1))])
def testListAndDictOps(self):
testme = AllTests()
# List/dict operations
class Empty: pass
try:
1 in Empty()
self.fail('failed, should have raised TypeError')
except TypeError:
pass
callLst[:] = []
1 in testme
self.assertCallStack([('__contains__', (testme, 1))])
callLst[:] = []
testme[1]
self.assertCallStack([('__getitem__', (testme, 1))])
callLst[:] = []
testme[1] = 1
self.assertCallStack([('__setitem__', (testme, 1, 1))])
callLst[:] = []
del testme[1]
self.assertCallStack([('__delitem__', (testme, 1))])
callLst[:] = []
testme[:42]
self.assertCallStack([('__getslice__', (testme, 0, 42))])
callLst[:] = []
testme[:42] = "The Answer"
self.assertCallStack([('__setslice__', (testme, 0, 42, "The Answer"))])
callLst[:] = []
del testme[:42]
self.assertCallStack([('__delslice__', (testme, 0, 42))])
callLst[:] = []
testme[2:1024:10]
self.assertCallStack([('__getitem__', (testme, slice(2, 1024, 10)))])
callLst[:] = []
testme[2:1024:10] = "A lot"
self.assertCallStack([('__setitem__', (testme, slice(2, 1024, 10),
"A lot"))])
callLst[:] = []
del testme[2:1024:10]
self.assertCallStack([('__delitem__', (testme, slice(2, 1024, 10)))])
callLst[:] = []
testme[:42, ..., :24:, 24, 100]
self.assertCallStack([('__getitem__', (testme, (slice(None, 42, None),
Ellipsis,
slice(None, 24, None),
24, 100)))])
callLst[:] = []
testme[:42, ..., :24:, 24, 100] = "Strange"
self.assertCallStack([('__setitem__', (testme, (slice(None, 42, None),
Ellipsis,
slice(None, 24, None),
24, 100), "Strange"))])
callLst[:] = []
del testme[:42, ..., :24:, 24, 100]
self.assertCallStack([('__delitem__', (testme, (slice(None, 42, None),
Ellipsis,
slice(None, 24, None),
24, 100)))])
# Now remove the slice hooks to see if converting normal slices to
# slice object works.
getslice = AllTests.__getslice__
del AllTests.__getslice__
setslice = AllTests.__setslice__
del AllTests.__setslice__
delslice = AllTests.__delslice__
del AllTests.__delslice__
# XXX when using new-style classes the slice testme[:42] produces
# slice(None, 42, None) instead of slice(0, 42, None). py3k will have
# to change this test.
callLst[:] = []
testme[:42]
self.assertCallStack([('__getitem__', (testme, slice(0, 42, None)))])
callLst[:] = []
testme[:42] = "The Answer"
self.assertCallStack([('__setitem__', (testme, slice(0, 42, None),
"The Answer"))])
callLst[:] = []
del testme[:42]
self.assertCallStack([('__delitem__', (testme, slice(0, 42, None)))])
# Restore the slice methods, or the tests will fail with regrtest -R.
AllTests.__getslice__ = getslice
AllTests.__setslice__ = setslice
AllTests.__delslice__ = delslice
@test_support.cpython_only
def testDelItem(self):
class A:
ok = False
def __delitem__(self, key):
self.ok = True
a = A()
# Subtle: we need to call PySequence_SetItem, not PyMapping_SetItem.
from _testcapi import sequence_delitem
sequence_delitem(a, 2)
self.assertTrue(a.ok)
def testUnaryOps(self):
testme = AllTests()
callLst[:] = []
-testme
self.assertCallStack([('__neg__', (testme,))])
callLst[:] = []
+testme
self.assertCallStack([('__pos__', (testme,))])
callLst[:] = []
abs(testme)
self.assertCallStack([('__abs__', (testme,))])
callLst[:] = []
int(testme)
self.assertCallStack([('__int__', (testme,))])
callLst[:] = []
long(testme)
self.assertCallStack([('__long__', (testme,))])
callLst[:] = []
float(testme)
self.assertCallStack([('__float__', (testme,))])
callLst[:] = []
oct(testme)
self.assertCallStack([('__oct__', (testme,))])
callLst[:] = []
hex(testme)
self.assertCallStack([('__hex__', (testme,))])
def testMisc(self):
testme = AllTests()
callLst[:] = []
hash(testme)
self.assertCallStack([('__hash__', (testme,))])
callLst[:] = []
repr(testme)
self.assertCallStack([('__repr__', (testme,))])
callLst[:] = []
str(testme)
self.assertCallStack([('__str__', (testme,))])
callLst[:] = []
testme == 1
self.assertCallStack([("__coerce__", (testme, 1)), ('__cmp__', (testme, 1))])
callLst[:] = []
testme < 1
self.assertCallStack([("__coerce__", (testme, 1)), ('__cmp__', (testme, 1))])
callLst[:] = []
testme > 1
self.assertCallStack([("__coerce__", (testme, 1)), ('__cmp__', (testme, 1))])
callLst[:] = []
eval('testme <> 1') # XXX kill this in py3k
self.assertCallStack([("__coerce__", (testme, 1)), ('__cmp__', (testme, 1))])
callLst[:] = []
testme != 1
self.assertCallStack([("__coerce__", (testme, 1)), ('__cmp__', (testme, 1))])
callLst[:] = []
1 == testme
self.assertCallStack([("__coerce__", (testme, 1)), ('__cmp__', (1, testme))])
callLst[:] = []
1 < testme
self.assertCallStack([("__coerce__", (testme, 1)), ('__cmp__', (1, testme))])
callLst[:] = []
1 > testme
self.assertCallStack([("__coerce__", (testme, 1)), ('__cmp__', (1, testme))])
callLst[:] = []
eval('1 <> testme')
self.assertCallStack([("__coerce__", (testme, 1)), ('__cmp__', (1, testme))])
callLst[:] = []
1 != testme
self.assertCallStack([("__coerce__", (testme, 1)), ('__cmp__', (1, testme))])
def testGetSetAndDel(self):
# Interfering tests
class ExtraTests(AllTests):
@trackCall
def __getattr__(self, *args):
return "SomeVal"
@trackCall
def __setattr__(self, *args):
pass
@trackCall
def __delattr__(self, *args):
pass
testme = ExtraTests()
callLst[:] = []
testme.spam
self.assertCallStack([('__getattr__', (testme, "spam"))])
callLst[:] = []
testme.eggs = "spam, spam, spam and ham"
self.assertCallStack([('__setattr__', (testme, "eggs",
"spam, spam, spam and ham"))])
callLst[:] = []
del testme.cardinal
self.assertCallStack([('__delattr__', (testme, "cardinal"))])
def testDel(self):
x = []
class DelTest:
def __del__(self):
x.append("crab people, crab people")
testme = DelTest()
del testme
import gc
gc.collect()
self.assertEqual(["crab people, crab people"], x)
def testBadTypeReturned(self):
# return values of some method are type-checked
class BadTypeClass:
def __int__(self):
return None
__float__ = __int__
__long__ = __int__
__str__ = __int__
__repr__ = __int__
__oct__ = __int__
__hex__ = __int__
for f in [int, float, long, str, repr, oct, hex]:
self.assertRaises(TypeError, f, BadTypeClass())
def testMixIntsAndLongs(self):
# mixing up ints and longs is okay
class IntLongMixClass:
@trackCall
def __int__(self):
return 42L
@trackCall
def __long__(self):
return 64
mixIntAndLong = IntLongMixClass()
callLst[:] = []
as_int = int(mixIntAndLong)
self.assertEqual(type(as_int), long)
self.assertEqual(as_int, 42L)
self.assertCallStack([('__int__', (mixIntAndLong,))])
callLst[:] = []
as_long = long(mixIntAndLong)
self.assertEqual(type(as_long), long)
self.assertEqual(as_long, 64)
self.assertCallStack([('__long__', (mixIntAndLong,))])
def testHashStuff(self):
# Test correct errors from hash() on objects with comparisons but
# no __hash__
class C0:
pass
hash(C0()) # This should work; the next two should raise TypeError
class C1:
def __cmp__(self, other): return 0
self.assertRaises(TypeError, hash, C1())
class C2:
def __eq__(self, other): return 1
self.assertRaises(TypeError, hash, C2())
def testSFBug532646(self):
# Test for SF bug 532646
class A:
pass
A.__call__ = A()
a = A()
try:
a() # This should not segfault
except RuntimeError:
pass
else:
self.fail("Failed to raise RuntimeError")
def testForExceptionsRaisedInInstanceGetattr2(self):
# Tests for exceptions raised in instance_getattr2().
def booh(self):
raise AttributeError("booh")
class A:
a = property(booh)
try:
A().a # Raised AttributeError: A instance has no attribute 'a'
except AttributeError, x:
if str(x) != "booh":
self.fail("attribute error for A().a got masked: %s" % x)
class E:
__eq__ = property(booh)
E() == E() # In debug mode, caused a C-level assert() to fail
class I:
__init__ = property(booh)
try:
# In debug mode, printed XXX undetected error and
# raises AttributeError
I()
except AttributeError, x:
pass
else:
self.fail("attribute error for I.__init__ got masked")
def testHashComparisonOfMethods(self):
# Test comparison and hash of methods
class A:
def __init__(self, x):
self.x = x
def f(self):
pass
def g(self):
pass
def __eq__(self, other):
return self.x == other.x
def __hash__(self):
return self.x
class B(A):
pass
a1 = A(1)
a2 = A(2)
self.assertEqual(a1.f, a1.f)
self.assertNotEqual(a1.f, a2.f)
self.assertNotEqual(a1.f, a1.g)
self.assertEqual(a1.f, A(1).f)
self.assertEqual(hash(a1.f), hash(a1.f))
self.assertEqual(hash(a1.f), hash(A(1).f))
self.assertNotEqual(A.f, a1.f)
self.assertNotEqual(A.f, A.g)
self.assertEqual(B.f, A.f)
self.assertEqual(hash(B.f), hash(A.f))
# the following triggers a SystemError in 2.4
a = A(hash(A.f.im_func)^(-1))
hash(a.f)
def test_main():
with test_support.check_py3k_warnings(
(".+__(get|set|del)slice__ has been removed", DeprecationWarning),
("classic int division", DeprecationWarning),
("<> not supported", DeprecationWarning)):
test_support.run_unittest(ClassTests)
if __name__=='__main__':
test_main()
| gpl-2.0 |
zzzombat/lucid-python-django | django/utils/simplejson/scanner.py | 928 | 2227 | """JSON token scanner
"""
import re
try:
from simplejson._speedups import make_scanner as c_make_scanner
except ImportError:
c_make_scanner = None
__all__ = ['make_scanner']
NUMBER_RE = re.compile(
r'(-?(?:0|[1-9]\d*))(\.\d+)?([eE][-+]?\d+)?',
(re.VERBOSE | re.MULTILINE | re.DOTALL))
def py_make_scanner(context):
parse_object = context.parse_object
parse_array = context.parse_array
parse_string = context.parse_string
match_number = NUMBER_RE.match
encoding = context.encoding
strict = context.strict
parse_float = context.parse_float
parse_int = context.parse_int
parse_constant = context.parse_constant
object_hook = context.object_hook
def _scan_once(string, idx):
try:
nextchar = string[idx]
except IndexError:
raise StopIteration
if nextchar == '"':
return parse_string(string, idx + 1, encoding, strict)
elif nextchar == '{':
return parse_object((string, idx + 1), encoding, strict, _scan_once, object_hook)
elif nextchar == '[':
return parse_array((string, idx + 1), _scan_once)
elif nextchar == 'n' and string[idx:idx + 4] == 'null':
return None, idx + 4
elif nextchar == 't' and string[idx:idx + 4] == 'true':
return True, idx + 4
elif nextchar == 'f' and string[idx:idx + 5] == 'false':
return False, idx + 5
m = match_number(string, idx)
if m is not None:
integer, frac, exp = m.groups()
if frac or exp:
res = parse_float(integer + (frac or '') + (exp or ''))
else:
res = parse_int(integer)
return res, m.end()
elif nextchar == 'N' and string[idx:idx + 3] == 'NaN':
return parse_constant('NaN'), idx + 3
elif nextchar == 'I' and string[idx:idx + 8] == 'Infinity':
return parse_constant('Infinity'), idx + 8
elif nextchar == '-' and string[idx:idx + 9] == '-Infinity':
return parse_constant('-Infinity'), idx + 9
else:
raise StopIteration
return _scan_once
make_scanner = c_make_scanner or py_make_scanner
| bsd-3-clause |
patrickod/stem | test/unit/response/events.py | 1 | 65415 | """
Unit tests for the stem.response.events classes.
"""
import datetime
import threading
import unittest
import stem.response
import stem.response.events
import stem.util.log
from stem import * # enums and exceptions
from stem.response import ControlMessage
from stem.descriptor.router_status_entry import RouterStatusEntryV3
try:
# added in python 3.3
from unittest.mock import Mock
except ImportError:
from mock import Mock
# ADDRMAP event
ADDRMAP = '650 ADDRMAP www.atagar.com 75.119.206.243 "2012-11-19 00:50:13" \
EXPIRES="2012-11-19 08:50:13"'
ADDRMAP_NO_EXPIRATION = '650 ADDRMAP www.atagar.com 75.119.206.243 NEVER'
ADDRMAP_ERROR_EVENT = '650 ADDRMAP www.atagar.com <error> "2012-11-19 00:50:13" \
error=yes EXPIRES="2012-11-19 08:50:13"'
ADDRMAP_BAD_1 = '650 ADDRMAP www.atagar.com 75.119.206.243 2012-11-19 00:50:13" \
EXPIRES="2012-11-19 08:50:13"'
ADDRMAP_BAD_2 = '650 ADDRMAP www.atagar.com 75.119.206.243 "2012-11-19 00:50:13 \
EXPIRES="2012-11-19 08:50:13"'
ADDRMAP_CACHED = '650 ADDRMAP example.com 192.0.43.10 "2013-04-03 22:31:22" \
EXPIRES="2013-04-03 20:31:22" \
CACHED="YES"'
ADDRMAP_NOT_CACHED = '650 ADDRMAP example.com 192.0.43.10 "2013-04-03 22:29:11" \
EXPIRES="2013-04-03 20:29:11" \
CACHED="NO"'
ADDRMAP_CACHED_MALFORMED = '650 ADDRMAP example.com 192.0.43.10 "2013-04-03 22:29:11" \
CACHED="KINDA"'
AUTHDIR_NEWDESC = """\
650+AUTHDIR_NEWDESCS
DROPPED
Not replacing router descriptor; no information has changed since the last one with this identity.
@uploaded-at 2017-05-25 04:46:21
@source "127.0.0.1"
router test002r 127.0.0.1 5002 0 7002
identity-ed25519
-----BEGIN ED25519 CERT-----
AQQABlm9AXVOPVG4KHqFmShWRFPU2oXO15yaS+J8c6SrLBMpnB1vAQAgBABBic8D
+GIdBzNCezf1Lfw8NSpbDL7S4ExBuMXvi6WvEoN1gIGwEwddLvUF91l6BXL5yoXf
xg7fDhYZ7CDwtVBHSfvmsIKR/QnQyylbDpVllsV9Wz6JLz52JgFGQaNjAgA=
-----END ED25519 CERT-----
master-key-ed25519 QYnPA/hiHQczQns39S38PDUqWwy+0uBMQbjF74ulrxI
platform Tor 0.3.0.7 on Darwin
proto Cons=1-2 Desc=1-2 DirCache=1 HSDir=1-2 HSIntro=3-4 HSRend=1-2 Link=1-4 LinkAuth=1,3 Microdesc=1-2 Relay=1-2
published 2017-05-25 04:46:20
fingerprint 3482 25F8 3C85 4796 B2DD 6364 E65C B189 B33B D696
uptime 9
bandwidth 1073741824 1073741824 0
extra-info-digest 114E4F433D6E08D9E73621BB418EBF02336EB578 i6+wKCBaNqUbNZIM9DFacEM74R5m0lkh8//h6R/nbGc
onion-key
-----BEGIN RSA PUBLIC KEY-----
MIGJAoGBAMMpf3fLSUEme0aQfN2RUfAPhZJMXVSqMpFRdBoKA4AYlz78VA9zxfj3
Nyir2G2HFaTzeS82p74obc8RufJQcGoDUwDnPlHtjb2ezmr018j8i3fTEvPwj5xC
5001FRwUVcOaLnxZKSDzpTyKRWGnSQBSbGcyXwMRtySKf0P5yjHDAgMBAAE=
-----END RSA PUBLIC KEY-----
signing-key
-----BEGIN RSA PUBLIC KEY-----
MIGJAoGBAM7Rn1kQffaJE8rbtgTiMxNY67i27hCBzzr0gE558jARizOJo8lf7get
rxz92mzYPYskM1V/j16QhoRlrruMn319/l6o97+/ta6qIwlSPXZ1jd/BGs3yqS4X
2N+N9qW8zC6km88K/YZuIsqYyXL7oHoIGqbERYLmp/JqLlAR52JJAgMBAAE=
-----END RSA PUBLIC KEY-----
onion-key-crosscert
-----BEGIN CROSSCERT-----
t4BE0PcPra9o5HBJHr9+h1MgP76XY3UGLQX8FGfEPHfNBMtVxRKkhOZ7Ki01+dkK
IpfkdQn3bOXTIa+FYGvzpyADYx4RDpbHG9/Tna/xR+6LhAQfvcLrlBxsjyntXjkX
FwE+AemijIU4DM4F1FHIkFz6OgT9B1/G0mr5QggzXS8=
-----END CROSSCERT-----
ntor-onion-key-crosscert 0
-----BEGIN ED25519 CERT-----
AQoABleVAUGJzwP4Yh0HM0J7N/Ut/Dw1KlsMvtLgTEG4xe+Lpa8SAKgaOVGouOKa
N64AOq7FdJJM/qgI1r2+jqTj1Mk/a19kfTIQ8hhWxoaJhg1xo8BasnjNQ+4Cm7ds
vMW0fhwJjQ4=
-----END ED25519 CERT-----
hidden-service-dir
ntor-onion-key tic6dEvMWt3kaUtDhwULgb3qBnc2wd5GkJ5cv0TelhA=
accept *:*
ipv6-policy accept 1-65535
tunnelled-dir-server
router-sig-ed25519 t7XJfoMOKbk1167f4p5+z5nlXxr+8us3qaLdNeQ1nGpUyQtM4G8Ie9P/oeqwhV4lCx5pQN1vIy3lUaN811WqAA
router-signature
-----BEGIN SIGNATURE-----
j/mMC1JuChvJH/ZP/Ayy0ZAV2P6VoxpRhHJfZMC07rr2ctmxfDTwLQhbYrqJ2adW
FYy5QTzxYzSFoTA7FvsadJuyvsGlaRfgFs+AxYjBUUoK7Dcd2ri+Y11NmwCBLFqF
4cYG3pqPHb38gXLj89QXfMJUDbOwrxvkVxIFbdwDCsE=
-----END SIGNATURE-----
.
650 OK
"""
# BUILDTIMEOUT_SET event from tor 0.2.3.16.
BUILD_TIMEOUT_EVENT = '650 BUILDTIMEOUT_SET COMPUTED \
TOTAL_TIMES=124 \
TIMEOUT_MS=9019 \
XM=1375 \
ALPHA=0.855662 \
CUTOFF_QUANTILE=0.800000 \
TIMEOUT_RATE=0.137097 \
CLOSE_MS=21850 \
CLOSE_RATE=0.072581'
BUILD_TIMEOUT_EVENT_BAD_1 = '650 BUILDTIMEOUT_SET COMPUTED \
TOTAL_TIMES=one_twenty_four \
TIMEOUT_MS=9019 \
XM=1375 \
ALPHA=0.855662 \
CUTOFF_QUANTILE=0.800000 \
TIMEOUT_RATE=0.137097 \
CLOSE_MS=21850 \
CLOSE_RATE=0.072581'
BUILD_TIMEOUT_EVENT_BAD_2 = '650 BUILDTIMEOUT_SET COMPUTED \
TOTAL_TIMES=124 \
TIMEOUT_MS=9019 \
XM=1375 \
ALPHA=0.855662 \
CUTOFF_QUANTILE=zero_point_eight \
TIMEOUT_RATE=0.137097 \
CLOSE_MS=21850 \
CLOSE_RATE=0.072581'
# CIRC events from tor v0.2.3.16
CIRC_LAUNCHED = '650 CIRC 7 LAUNCHED \
BUILD_FLAGS=NEED_CAPACITY \
PURPOSE=GENERAL \
TIME_CREATED=2012-11-08T16:48:38.417238'
CIRC_LAUNCHED_BAD_1 = '650 CIRC 7 LAUNCHED \
BUILD_FLAGS=NEED_CAPACITY \
PURPOSE=GENERAL \
TIME_CREATED=20121108T164838417238'
CIRC_LAUNCHED_BAD_2 = '650 CIRC toolong8901234567 LAUNCHED \
BUILD_FLAGS=NEED_CAPACITY \
PURPOSE=GENERAL \
TIME_CREATED=2012-11-08T16:48:38.417238'
CIRC_EXTENDED = '650 CIRC 7 EXTENDED \
$999A226EBED397F331B612FE1E4CFAE5C1F201BA=piyaz \
BUILD_FLAGS=NEED_CAPACITY \
PURPOSE=GENERAL \
TIME_CREATED=2012-11-08T16:48:38.417238'
CIRC_FAILED = '650 CIRC 5 FAILED \
$E57A476CD4DFBD99B4EE52A100A58610AD6E80B9=ergebnisoffen \
BUILD_FLAGS=NEED_CAPACITY \
PURPOSE=GENERAL \
TIME_CREATED=2012-11-08T16:48:36.400959 \
REASON=DESTROYED \
REMOTE_REASON=OR_CONN_CLOSED'
CIRC_WITH_CREDENTIALS = '650 CIRC 7 LAUNCHED \
SOCKS_USERNAME="It\'s a me, Mario!" \
SOCKS_PASSWORD="your princess is in another castle"'
# CIRC events from tor v0.2.1.30 without the VERBOSE_NAMES feature
CIRC_LAUNCHED_OLD = '650 CIRC 4 LAUNCHED'
CIRC_EXTENDED_OLD = '650 CIRC 1 EXTENDED \
$E57A476CD4DFBD99B4EE52A100A58610AD6E80B9,hamburgerphone'
CIRC_BUILT_OLD = '650 CIRC 1 BUILT \
$E57A476CD4DFBD99B4EE52A100A58610AD6E80B9,hamburgerphone,PrivacyRepublic14'
# CIRC_MINOR event from tor 0.2.3.16.
CIRC_MINOR_EVENT = '650 CIRC_MINOR 7 PURPOSE_CHANGED \
$67B2BDA4264D8A189D9270E28B1D30A262838243~europa1 \
BUILD_FLAGS=IS_INTERNAL,NEED_CAPACITY \
PURPOSE=MEASURE_TIMEOUT \
TIME_CREATED=2012-12-03T16:45:33.409602 \
OLD_PURPOSE=TESTING'
CIRC_MINOR_EVENT_BAD_1 = '650 CIRC_MINOR 7 PURPOSE_CHANGED \
$67B2BDA4264D8A189D9270E28B1D30A262838243~europa1 \
BUILD_FLAGS=IS_INTERNAL,NEED_CAPACITY \
PURPOSE=MEASURE_TIMEOUT \
TIME_CREATED=20121203T164533409602 \
OLD_PURPOSE=TESTING'
CIRC_MINOR_EVENT_BAD_2 = '650 CIRC_MINOR toolong8901234567 PURPOSE_CHANGED \
$67B2BDA4264D8A189D9270E28B1D30A262838243~europa1 \
BUILD_FLAGS=IS_INTERNAL,NEED_CAPACITY \
PURPOSE=MEASURE_TIMEOUT \
TIME_CREATED=2012-12-03T16:45:33.409602 \
OLD_PURPOSE=TESTING'
# CLIENTS_SEEN example from the spec
CLIENTS_SEEN_EVENT = '650 CLIENTS_SEEN \
TimeStarted="2008-12-25 23:50:43" \
CountrySummary=us=16,de=8,uk=8 \
IPVersions=v4=16,v6=40'
CLIENTS_SEEN_EVENT_BAD_1 = '650 CLIENTS_SEEN \
TimeStarted="2008-12-25 23:50:43" \
CountrySummary=us:16,de:8,uk:8 \
IPVersions=v4=16,v6=40'
CLIENTS_SEEN_EVENT_BAD_2 = '650 CLIENTS_SEEN \
TimeStarted="2008-12-25 23:50:43" \
CountrySummary=usa=16,unitedkingdom=8 \
IPVersions=v4=16,v6=40'
CLIENTS_SEEN_EVENT_BAD_3 = '650 CLIENTS_SEEN \
TimeStarted="2008-12-25 23:50:43" \
CountrySummary=us=16,de=8,uk=eight \
IPVersions=v4=16,v6=40'
CLIENTS_SEEN_EVENT_BAD_4 = '650 CLIENTS_SEEN \
TimeStarted="2008-12-25 23:50:43" \
CountrySummary=au=16,au=8,au=8 \
IPVersions=v4=16,v6=40'
CLIENTS_SEEN_EVENT_BAD_5 = '650 CLIENTS_SEEN \
TimeStarted="2008-12-25 23:50:43" \
CountrySummary=us=16,de=8,uk=8 \
IPVersions=v4:16,v6:40'
CLIENTS_SEEN_EVENT_BAD_6 = '650 CLIENTS_SEEN \
TimeStarted="2008-12-25 23:50:43" \
CountrySummary=us=16,de=8,uk=8 \
IPVersions=v4=sixteen,v6=40'
# CONF_CHANGED event from tor 0.2.3.16.
CONF_CHANGED_EVENT = """650-CONF_CHANGED
650-ExitNodes=caerSidi
650-ExitPolicy
650-MaxCircuitDirtiness=20
650 OK
"""
# GUARD events from tor v0.2.1.30.
GUARD_NEW = '650 GUARD ENTRY $36B5DBA788246E8369DBAF58577C6BC044A9A374 NEW'
GUARD_GOOD = '650 GUARD ENTRY $5D0034A368E0ABAF663D21847E1C9B6CFA09752A GOOD'
GUARD_BAD = '650 GUARD ENTRY $5D0034A368E0ABAF663D21847E1C9B6CFA09752A=caerSidi BAD'
HS_DESC_EVENT = '650 HS_DESC REQUESTED ajhb7kljbiru65qo NO_AUTH \
$67B2BDA4264D8A189D9270E28B1D30A262838243=europa1 b3oeducbhjmbqmgw2i3jtz4fekkrinwj'
HS_DESC_NO_DESC_ID = '650 HS_DESC REQUESTED ajhb7kljbiru65qo NO_AUTH \
$67B2BDA4264D8A189D9270E28B1D30A262838243'
HS_DESC_NOT_FOUND = '650 HS_DESC REQUESTED ajhb7kljbiru65qo NO_AUTH UNKNOWN'
HS_DESC_FAILED = '650 HS_DESC FAILED ajhb7kljbiru65qo NO_AUTH \
$67B2BDA4264D8A189D9270E28B1D30A262838243 \
b3oeducbhjmbqmgw2i3jtz4fekkrinwj REASON=NOT_FOUND'
# HS_DESC_CONTENT from 0.2.7.1
HS_DESC_CONTENT_EVENT = """\
650+HS_DESC_CONTENT facebookcorewwwi riwvyw6njgvs4koel4heqs7w4bssnmlw $8A30C9E8F5954EE286D29BD65CADEA6991200804~YorkshireTOR
rendezvous-service-descriptor riwvyw6njgvs4koel4heqs7w4bssnmlw
version 2
permanent-key
-----BEGIN RSA PUBLIC KEY-----
MIGJAoGBALfng/krEfrBcvblDiM3PAkowkiAKxLoTsXt3nPEzyTP6Cw+Gdr0ODje
hmxTngN1pKiH7szk4Q1p2RabOrUHWwXmGXeDDNs00fcyU6HupgqsCoKOqCsmPac6
/58apC64A7xHeS02wtfWJp6qiZ8i6GGu6xWXRWux+ShPgcHvkajRAgMahU8=
-----END RSA PUBLIC KEY-----
secret-id-part vnb2j6ftvkvghypd4yyypsl3qmpjyq3j
publication-time 2015-03-13 19:00:00
protocol-versions 2,3
introduction-points
-----BEGIN MESSAGE-----
aW50cm9kdWN0aW9uLXBvaW50IHNqbm1xbmdraXl3YmtkeXBjb2FqdHY2dmNtNjY2
NmR6CmlwLWFkZHJlc3MgMTk4LjIzLjE4Ny4xNTgKb25pb24tcG9ydCA0NDMKb25p
b24ta2V5Ci0tLS0tQkVHSU4gUlNBIFBVQkxJQyBLRVktLS0tLQpNSUdKQW9HQkFO
MUNsaERkZDNSWHdwT0hMZUNNYlVvNFlISDNEMUxnR0pXbEFPVnBxQ3ZSbDhEbjIv
UWpGeHNVCnJGaG9WUzRDUjlNVFIzMnlsSnJ0R2JTcWxRVm1HY3M3bnZ5RDU5YVky
em9RVGhIdm1lWVUwS0ZzQTc5ZFNyTW0KUDR5WnZFSkZmdkpQODRySWd0TlVtZ3R4
aHQzVzNiR2FVMUNBNGU4bjBza2hYWXdRRzg1MUFnTUJBQUU9Ci0tLS0tRU5EIFJT
QSBQVUJMSUMgS0VZLS0tLS0Kc2VydmljZS1rZXkKLS0tLS1CRUdJTiBSU0EgUFVC
TElDIEtFWS0tLS0tCk1JR0pBb0dCQUxDajREUTJPaTJhRjF4WE1iNjhsSHFJQnN5
NjRSbXFjTUpNb1d3THF4WTFiREcwbnE0Nlk5eHYKVnBPVzAxTmQrYnF3b3BIa0J2
TzllSGVKTm9NN1BYMmtVWmQ5RlFQSUJHbWdCZ0dxenV6a2lQTEFpbHhtWHRQbwpN
cnRheGdzRTR6MjlWYnJUV2Q0SHFKSDJFOWNybDdzeHhiTGVvSDFLRjZzSm5lMFlP
WGlyQWdNQkFBRT0KLS0tLS1FTkQgUlNBIFBVQkxJQyBLRVktLS0tLQppbnRyb2R1
Y3Rpb24tcG9pbnQgYmhzbjVhdDNzaDIzZGZ5cmNxYnU1bDV6NGs1Z3RueHAKaXAt
YWRkcmVzcyAxMDQuMTI4Ljc4LjEwNwpvbmlvbi1wb3J0IDMwMDIKb25pb24ta2V5
Ci0tLS0tQkVHSU4gUlNBIFBVQkxJQyBLRVktLS0tLQpNSUdKQW9HQkFMYk5HdU0v
RlNnZEJjOTFZSjNQOXRoVC9vQzRWOFZDZzZBcjk5WlFHRldhVGlRdXRjNGZLWC9F
CnR1TGRjdzBsRmxVbXhPZXNXMVduaVkxaVFDOW9yUkQ3cGE1amNES0EyRThDb3kv
WmYzYTlXNFNRRzYxakswUzcKYlNGVk9LUHQ3TDUvT21pK05icStsSnB5MmdCTnFU
TWt0U0k0YklPUlY1aUpWWkRWU21qVkFnTUJBQUU9Ci0tLS0tRU5EIFJTQSBQVUJM
SUMgS0VZLS0tLS0Kc2VydmljZS1rZXkKLS0tLS1CRUdJTiBSU0EgUFVCTElDIEtF
WS0tLS0tCk1JR0pBb0dCQU5YOVJobHRobkZkbXFXQVRsNGJ4dTBrR0UyNWlYcm83
VzFvM05GV3Q4cG8rL25oU080aVZRMHQKWVZzSGwyZEdGSVNKcWxqK3FaTXh1emVL
ZmNwV3dHQnZMR1FaTDZJYUxJMUxkWSt6YzBaNjFFdWx5dXRFWFl5bAo3djFwRWN2
dGFJSDhuRXdzQnZlU1ZWUVJ5cFI4b3BnbXhmMWFKWmdzZVdMSE5hZ0JwNW81QWdN
QkFBRT0KLS0tLS1FTkQgUlNBIFBVQkxJQyBLRVktLS0tLQppbnRyb2R1Y3Rpb24t
cG9pbnQgbTVibGd0dHRscWc1Mno1emJlcW82a2ViczQ0bG1wd2EKaXAtYWRkcmVz
cyAxNzYuMzEuMzUuMTQ5Cm9uaW9uLXBvcnQgNDQzCm9uaW9uLWtleQotLS0tLUJF
R0lOIFJTQSBQVUJMSUMgS0VZLS0tLS0KTUlHSkFvR0JBTnVORFlobFF2RG9TNEFa
cE5nUkFLUmZhRjAzVWFSM0JrSXo3UC8zOVB4NjZueDc1bG5wQ1pMYwpkSHl4cGJu
UWp2ekE0UzdjUUVnYXUyQkgyeUtzU1NBL3ZXUHk4OVJBWUVhaUV2TlZQS1hRWmNw
cnY0WXdmejU0CmRuY2VJNG51NVFQM0E3SUpkSi9PYTlHMklhdHA3OVBlTzJkN2Rq
L3pzWFNKMkxvRXgyZWRBZ01CQUFFPQotLS0tLUVORCBSU0EgUFVCTElDIEtFWS0t
LS0tCnNlcnZpY2Uta2V5Ci0tLS0tQkVHSU4gUlNBIFBVQkxJQyBLRVktLS0tLQpN
SUdKQW9HQkFLbU9KbXB6ZVphVkZXaVlkMms5SHY1TWpidUY0eDBUKzlXclV4Z041
N2o2Uk1CVFZQZ0lVM2hUCkdCY3dwWjR2NDduNitIbVg4VHFNTFlVZkc1bTg1cm8x
SHNKMWVObnh2cW9iZVFVMW13TXdHdDMwbkJ6Y0F2NzMKbWFsYmlYRkxiOVdsK1hl
OTBRdXZhbjZTenhERkx5STFPbzA2aGVUeVZwQ3d0QVVvejhCVEFnTUJBQUU9Ci0t
LS0tRU5EIFJTQSBQVUJMSUMgS0VZLS0tLS0KCg==
-----END MESSAGE-----
signature
-----BEGIN SIGNATURE-----
s9Z0zWHsoPuqLw3GOwA6cv68vCFybnitIK4vD0MbNKF5qrOfKydZzGWfN09PuShy
H8Gr6aBYoz7HmlD7KyeGz9xdwRdouP+LW5YREOCORuwYnu5chWtB4iVJ0N1tIzSp
nHIs1lSrV7Ux2WQ3qSVj505fTGSCmaQRBX726ZlTPW0=
-----END SIGNATURE-----
.
650 OK
"""
HS_DESC_CONTENT_EMPTY_EVENT = """\
650+HS_DESC_CONTENT 3g2upl4pq6kufc4n 255tjwttk3wi7r2df57nuprs72j2daa3 $D7A0C3262724F2BC9646F6836E967A2777A3AF83~tsunaminitor
.
650 OK
"""
# NEWCONSENSUS event from v0.2.1.30.
NEWCONSENSUS_EVENT = """650+NEWCONSENSUS
r Beaver /96bKo4soysolMgKn5Hex2nyFSY pAJH9dSBp/CG6sPhhVY/5bLaVPM 2012-12-02 22:02:45 77.223.43.54 9001 0
s Fast Named Running Stable Valid
r Unnamed /+fJRWjmIGNAL2C5rRZHq3R91tA 7AnpZjfdBpYzXnMNm+w1bTsFF6Y 2012-12-02 17:51:10 91.121.184.87 9001 0
s Fast Guard Running Stable Valid
.
650 OK
"""
# NEWDESC events. I've never actually seen multiple descriptors in an event,
# but the spec allows for it.
NEWDESC_SINGLE = '650 NEWDESC $B3FA3110CC6F42443F039220C134CBD2FC4F0493=Sakura'
NEWDESC_MULTIPLE = '650 NEWDESC $BE938957B2CA5F804B3AFC2C1EE6673170CDBBF8=Moonshine \
$B4BE08B22D4D2923EDC3970FD1B93D0448C6D8FF~Unnamed'
# NS event from tor v0.2.1.30.
NS_EVENT = """650+NS
r whnetz dbBxYcJriTTrcxsuy4PUZcMRwCA VStM7KAIH/mXXoGDUpoGB1OXufg 2012-12-02 21:03:56 141.70.120.13 9001 9030
s Fast HSDir Named Stable V2Dir Valid
.
650 OK
"""
# ORCONN events from starting tor 0.2.2.39 via TBB
ORCONN_CLOSED = '650 ORCONN $A1130635A0CDA6F60C276FBF6994EFBD4ECADAB1~tama CLOSED REASON=DONE'
ORCONN_CONNECTED = '650 ORCONN 127.0.0.1:9000 CONNECTED NCIRCS=20 ID=18'
ORCONN_LAUNCHED = '650 ORCONN $7ED90E2833EE38A75795BA9237B0A4560E51E1A0=GreenDragon LAUNCHED'
ORCONN_BAD_1 = '650 ORCONN $7ED90E2833EE38A75795BA9237B0A4560E5=GreenD LAUNCHED'
ORCONN_BAD_2 = '650 ORCONN 127.0.0.1:001 CONNECTED'
ORCONN_BAD_3 = '650 ORCONN 127.0.0.1:9000 CONNECTED NCIRCS=too_many'
ORCONN_BAD_4 = '650 ORCONN 127.0.0.1:9000 CONNECTED NCIRCS=20 ID=30635A0CDA6F60C276FBF6994EFBD4ECADA'
# STATUS_* events that I was able to easily trigger. Most came from starting
# TBB, then listening while it bootstrapped.
STATUS_GENERAL_CONSENSUS_ARRIVED = '650 STATUS_GENERAL NOTICE CONSENSUS_ARRIVED'
STATUS_CLIENT_ENOUGH_DIR_INFO = '650 STATUS_CLIENT NOTICE ENOUGH_DIR_INFO'
STATUS_CLIENT_CIRC_ESTABLISHED = '650 STATUS_CLIENT NOTICE CIRCUIT_ESTABLISHED'
STATUS_CLIENT_BOOTSTRAP_DESCRIPTORS = '650 STATUS_CLIENT NOTICE BOOTSTRAP \
PROGRESS=53 \
TAG=loading_descriptors \
SUMMARY="Loading relay descriptors"'
STATUS_CLIENT_BOOTSTRAP_STUCK = '650 STATUS_CLIENT WARN BOOTSTRAP \
PROGRESS=80 \
TAG=conn_or \
SUMMARY="Connecting to the Tor network" \
WARNING="Network is unreachable" \
REASON=NOROUTE \
COUNT=5 \
RECOMMENDATION=warn'
STATUS_CLIENT_BOOTSTRAP_CONNECTING = '650 STATUS_CLIENT NOTICE BOOTSTRAP \
PROGRESS=80 \
TAG=conn_or \
SUMMARY="Connecting to the Tor network"'
STATUS_CLIENT_BOOTSTRAP_FIRST_HANDSHAKE = '650 STATUS_CLIENT NOTICE BOOTSTRAP \
PROGRESS=85 \
TAG=handshake_or \
SUMMARY="Finishing handshake with first hop"'
STATUS_CLIENT_BOOTSTRAP_ESTABLISHED = '650 STATUS_CLIENT NOTICE BOOTSTRAP \
PROGRESS=90 \
TAG=circuit_create \
SUMMARY="Establishing a Tor circuit"'
STATUS_CLIENT_BOOTSTRAP_DONE = '650 STATUS_CLIENT NOTICE BOOTSTRAP \
PROGRESS=100 \
TAG=done \
SUMMARY="Done"'
STATUS_SERVER_CHECK_REACHABILITY = '650 STATUS_SERVER NOTICE CHECKING_REACHABILITY \
ORADDRESS=71.35.143.230:9050'
STATUS_SERVER_DNS_TIMEOUT = '650 STATUS_SERVER NOTICE NAMESERVER_STATUS \
NS=205.171.3.25 \
STATUS=DOWN \
ERR="request timed out."'
STATUS_SERVER_DNS_DOWN = '650 STATUS_SERVER WARN NAMESERVER_ALL_DOWN'
STATUS_SERVER_DNS_UP = '650 STATUS_SERVER NOTICE NAMESERVER_STATUS \
NS=205.171.3.25 \
STATUS=UP'
# unknown STATUS_* event type
STATUS_SPECIFIC_CONSENSUS_ARRIVED = '650 STATUS_SPECIFIC NOTICE CONSENSUS_ARRIVED'
# STREAM events from tor 0.2.3.16 for visiting the google front page
STREAM_NEW = '650 STREAM 18 NEW 0 \
encrypted.google.com:443 \
SOURCE_ADDR=127.0.0.1:47849 \
PURPOSE=USER'
STREAM_SENTCONNECT = '650 STREAM 18 SENTCONNECT 26 encrypted.google.com:443'
STREAM_REMAP = '650 STREAM 18 REMAP 26 74.125.227.129:443 SOURCE=EXIT'
STREAM_SUCCEEDED = '650 STREAM 18 SUCCEEDED 26 74.125.227.129:443'
STREAM_CLOSED_RESET = '650 STREAM 21 CLOSED 26 74.125.227.129:443 REASON=CONNRESET'
STREAM_CLOSED_DONE = '650 STREAM 25 CLOSED 26 199.7.52.72:80 REASON=DONE'
STREAM_DIR_FETCH = '650 STREAM 14 NEW 0 \
176.28.51.238.$649F2D0ACF418F7CFC6539AB2257EB2D5297BAFA.exit:443 \
SOURCE_ADDR=(Tor_internal):0 PURPOSE=DIR_FETCH'
STREAM_DNS_REQUEST = '650 STREAM 1113 NEW 0 www.google.com:0 \
SOURCE_ADDR=127.0.0.1:15297 \
PURPOSE=DNS_REQUEST'
STREAM_SENTCONNECT_BAD_1 = '650 STREAM 18 SENTCONNECT 26'
STREAM_SENTCONNECT_BAD_2 = '650 STREAM 18 SENTCONNECT 26 encrypted.google.com'
STREAM_SENTCONNECT_BAD_3 = '650 STREAM 18 SENTCONNECT 26 encrypted.google.com:https'
STREAM_DNS_REQUEST_BAD_1 = '650 STREAM 1113 NEW 0 www.google.com:0 \
SOURCE_ADDR=127.0.0.1 \
PURPOSE=DNS_REQUEST'
STREAM_DNS_REQUEST_BAD_2 = '650 STREAM 1113 NEW 0 www.google.com:0 \
SOURCE_ADDR=127.0.0.1:dns \
PURPOSE=DNS_REQUEST'
STREAM_NEWRESOLVE_IP6 = '650 STREAM 23 NEWRESOLVE 0 2001:db8::1:0 PURPOSE=DNS_REQUEST'
TRANSPORT_LAUNCHED = '650 TRANSPORT_LAUNCHED server obfs1 127.0.0.1 1111'
TRANSPORT_LAUNCHED_BAD_TYPE = '650 TRANSPORT_LAUNCHED unicorn obfs1 127.0.0.1 1111'
TRANSPORT_LAUNCHED_BAD_ADDRESS = '650 TRANSPORT_LAUNCHED server obfs1 127.0.x.y 1111'
TRANSPORT_LAUNCHED_BAD_PORT = '650 TRANSPORT_LAUNCHED server obfs1 127.0.0.1 my_port'
CONN_BW = '650 CONN_BW ID=11 TYPE=DIR READ=272 WRITTEN=817'
CONN_BW_BAD_WRITTEN_VALUE = '650 CONN_BW ID=11 TYPE=DIR READ=272 WRITTEN=817.7'
CONN_BW_BAD_MISSING_ID = '650 CONN_BW TYPE=DIR READ=272 WRITTEN=817'
CIRC_BW = '650 CIRC_BW ID=11 READ=272 WRITTEN=817'
CIRC_BW_BAD_WRITTEN_VALUE = '650 CIRC_BW ID=11 READ=272 WRITTEN=817.7'
CIRC_BW_BAD_MISSING_ID = '650 CIRC_BW READ=272 WRITTEN=817'
CELL_STATS_1 = '650 CELL_STATS ID=14 \
OutboundQueue=19403 OutboundConn=15 \
OutboundAdded=create_fast:1,relay_early:2 \
OutboundRemoved=create_fast:1,relay_early:2 \
OutboundTime=create_fast:0,relay_early:0'
CELL_STATS_2 = '650 CELL_STATS \
InboundQueue=19403 InboundConn=32 \
InboundAdded=relay:1,created_fast:1 \
InboundRemoved=relay:1,created_fast:1 \
InboundTime=relay:0,created_fast:0 \
OutboundQueue=6710 OutboundConn=18 \
OutboundAdded=create:1,relay_early:1 \
OutboundRemoved=create:1,relay_early:1 \
OutboundTime=create:0,relay_early:0'
CELL_STATS_BAD_1 = '650 CELL_STATS OutboundAdded=create_fast:-1,relay_early:2'
CELL_STATS_BAD_2 = '650 CELL_STATS OutboundAdded=create_fast:arg,relay_early:-2'
CELL_STATS_BAD_3 = '650 CELL_STATS OutboundAdded=create_fast!:1,relay_early:-2'
TB_EMPTY_1 = '650 TB_EMPTY ORCONN ID=16 READ=0 WRITTEN=0 LAST=100'
TB_EMPTY_2 = '650 TB_EMPTY GLOBAL READ=93 WRITTEN=93 LAST=100'
TB_EMPTY_3 = '650 TB_EMPTY RELAY READ=93 WRITTEN=93 LAST=100'
TB_EMPTY_BAD_1 = '650 TB_EMPTY GLOBAL READ=93 WRITTEN=blarg LAST=100'
TB_EMPTY_BAD_2 = '650 TB_EMPTY GLOBAL READ=93 WRITTEN=93 LAST=-100'
def _get_event(content):
return ControlMessage.from_str(content, 'EVENT', normalize = True)
class TestEvents(unittest.TestCase):
def test_example(self):
"""
Exercises the add_event_listener() pydoc example, but without the sleep().
"""
import time
from stem.control import Controller, EventType
def print_bw(event):
msg = 'sent: %i, received: %i' % (event.written, event.read)
self.assertEqual('sent: 25, received: 15', msg)
def event_sender():
for index in range(3):
print_bw(_get_event('650 BW 15 25'))
time.sleep(0.0005)
controller = Mock(spec = Controller)
controller.authenticate()
controller.add_event_listener(print_bw, EventType.BW)
events_thread = threading.Thread(target = event_sender)
events_thread.start()
time.sleep(0.002)
events_thread.join()
def test_event(self):
# synthetic, contrived message construction to reach the blank event check
self.assertRaises(ProtocolError, stem.response.convert, 'EVENT', stem.response.ControlMessage([('', '', '')], ''))
# Event._parse_message() on an unknown event type
event = _get_event('650 NONE SOLID "NON SENSE" condition=MEH quoted="1 2 3"')
self.assertEqual('NONE', event.type)
self.assertEqual(['SOLID', '"NON', 'SENSE"'], event.positional_args)
self.assertEqual({'condition': 'MEH', 'quoted': '1 2 3'}, event.keyword_args)
def test_log_events(self):
event = _get_event('650 DEBUG connection_edge_process_relay_cell(): Got an extended cell! Yay.')
self.assertTrue(isinstance(event, stem.response.events.LogEvent))
self.assertEqual('DEBUG', event.runlevel)
self.assertEqual('connection_edge_process_relay_cell(): Got an extended cell! Yay.', event.message)
event = _get_event('650 INFO circuit_finish_handshake(): Finished building circuit hop:')
self.assertTrue(isinstance(event, stem.response.events.LogEvent))
self.assertEqual('INFO', event.runlevel)
self.assertEqual('circuit_finish_handshake(): Finished building circuit hop:', event.message)
event = _get_event('650+WARN\na multi-line\nwarning message\n.\n650 OK\n')
self.assertTrue(isinstance(event, stem.response.events.LogEvent))
self.assertEqual('WARN', event.runlevel)
self.assertEqual('a multi-line\nwarning message', event.message)
def test_addrmap_event(self):
event = _get_event(ADDRMAP)
self.assertTrue(isinstance(event, stem.response.events.AddrMapEvent))
self.assertEqual(ADDRMAP.lstrip('650 '), str(event))
self.assertEqual('www.atagar.com', event.hostname)
self.assertEqual('75.119.206.243', event.destination)
self.assertEqual(datetime.datetime(2012, 11, 19, 0, 50, 13), event.expiry)
self.assertEqual(None, event.error)
self.assertEqual(datetime.datetime(2012, 11, 19, 8, 50, 13), event.utc_expiry)
event = _get_event(ADDRMAP_NO_EXPIRATION)
self.assertTrue(isinstance(event, stem.response.events.AddrMapEvent))
self.assertEqual(ADDRMAP_NO_EXPIRATION.lstrip('650 '), str(event))
self.assertEqual('www.atagar.com', event.hostname)
self.assertEqual('75.119.206.243', event.destination)
self.assertEqual(None, event.expiry)
self.assertEqual(None, event.error)
self.assertEqual(None, event.utc_expiry)
event = _get_event(ADDRMAP_ERROR_EVENT)
self.assertTrue(isinstance(event, stem.response.events.AddrMapEvent))
self.assertEqual(ADDRMAP_ERROR_EVENT.lstrip('650 '), str(event))
self.assertEqual('www.atagar.com', event.hostname)
self.assertEqual(None, event.destination)
self.assertEqual(datetime.datetime(2012, 11, 19, 0, 50, 13), event.expiry)
self.assertEqual('yes', event.error)
self.assertEqual(datetime.datetime(2012, 11, 19, 8, 50, 13), event.utc_expiry)
self.assertEqual(None, event.cached)
# malformed content where quotes are missing
self.assertRaises(ProtocolError, _get_event, ADDRMAP_BAD_1)
self.assertRaises(ProtocolError, _get_event, ADDRMAP_BAD_2)
# check the CACHED flag
event = _get_event(ADDRMAP_CACHED)
self.assertTrue(isinstance(event, stem.response.events.AddrMapEvent))
self.assertEqual('example.com', event.hostname)
self.assertEqual(True, event.cached)
event = _get_event(ADDRMAP_NOT_CACHED)
self.assertTrue(isinstance(event, stem.response.events.AddrMapEvent))
self.assertEqual('example.com', event.hostname)
self.assertEqual(False, event.cached)
# the CACHED argument should only allow YES or NO
self.assertRaises(ProtocolError, _get_event, ADDRMAP_CACHED_MALFORMED)
def test_authdir_newdesc_event(self):
minimal_event = _get_event('650+AUTHDIR_NEWDESCS\nAction\nMessage\nDescriptor\n.\n650 OK\n')
self.assertTrue(isinstance(minimal_event, stem.response.events.AuthDirNewDescEvent))
self.assertEqual('Action', minimal_event.action)
self.assertEqual('Message', minimal_event.message)
self.assertEqual('Descriptor', minimal_event.descriptor)
event = _get_event(AUTHDIR_NEWDESC)
self.assertTrue(isinstance(event, stem.response.events.AuthDirNewDescEvent))
self.assertEqual('DROPPED', event.action)
self.assertEqual('Not replacing router descriptor; no information has changed since the last one with this identity.', event.message)
self.assertTrue('Descripto', event.descriptor.startswith('@uploaded-at 2017-05-25 04:46:21'))
def test_build_timeout_set_event(self):
event = _get_event(BUILD_TIMEOUT_EVENT)
self.assertTrue(isinstance(event, stem.response.events.BuildTimeoutSetEvent))
self.assertEqual(BUILD_TIMEOUT_EVENT.lstrip('650 '), str(event))
self.assertEqual(TimeoutSetType.COMPUTED, event.set_type)
self.assertEqual(124, event.total_times)
self.assertEqual(9019, event.timeout)
self.assertEqual(1375, event.xm)
self.assertEqual(0.855662, event.alpha)
self.assertEqual(0.8, event.quantile)
self.assertEqual(0.137097, event.timeout_rate)
self.assertEqual(21850, event.close_timeout)
self.assertEqual(0.072581, event.close_rate)
# malformed content where we get non-numeric values
self.assertRaises(ProtocolError, _get_event, BUILD_TIMEOUT_EVENT_BAD_1)
self.assertRaises(ProtocolError, _get_event, BUILD_TIMEOUT_EVENT_BAD_2)
def test_bw_event(self):
event = _get_event('650 BW 15 25')
self.assertTrue(isinstance(event, stem.response.events.BandwidthEvent))
self.assertEqual(15, event.read)
self.assertEqual(25, event.written)
event = _get_event('650 BW 0 0')
self.assertEqual(0, event.read)
self.assertEqual(0, event.written)
# BW events are documented as possibly having various keywords including
# DIR, OR, EXIT, and APP in the future. This is kinda a pointless note
# since tor doesn't actually do it yet (and likely never will), but might
# as well sanity test that it'll be ok.
event = _get_event('650 BW 10 20 OR=5 EXIT=500')
self.assertEqual(10, event.read)
self.assertEqual(20, event.written)
self.assertEqual({'OR': '5', 'EXIT': '500'}, event.keyword_args)
self.assertRaises(ProtocolError, _get_event, '650 BW')
self.assertRaises(ProtocolError, _get_event, '650 BW 15')
self.assertRaises(ProtocolError, _get_event, '650 BW -15 25')
self.assertRaises(ProtocolError, _get_event, '650 BW 15 -25')
self.assertRaises(ProtocolError, _get_event, '650 BW x 25')
def test_circ_event(self):
event = _get_event(CIRC_LAUNCHED)
self.assertTrue(isinstance(event, stem.response.events.CircuitEvent))
self.assertEqual(CIRC_LAUNCHED.lstrip('650 '), str(event))
self.assertEqual('7', event.id)
self.assertEqual(CircStatus.LAUNCHED, event.status)
self.assertEqual((), event.path)
self.assertEqual((CircBuildFlag.NEED_CAPACITY,), event.build_flags)
self.assertEqual(CircPurpose.GENERAL, event.purpose)
self.assertEqual(None, event.hs_state)
self.assertEqual(None, event.rend_query)
self.assertEqual(datetime.datetime(2012, 11, 8, 16, 48, 38, 417238), event.created)
self.assertEqual(None, event.reason)
self.assertEqual(None, event.remote_reason)
self.assertEqual(None, event.socks_username)
self.assertEqual(None, event.socks_password)
event = _get_event(CIRC_EXTENDED)
self.assertTrue(isinstance(event, stem.response.events.CircuitEvent))
self.assertEqual(CIRC_EXTENDED.lstrip('650 '), str(event))
self.assertEqual('7', event.id)
self.assertEqual(CircStatus.EXTENDED, event.status)
self.assertEqual((('999A226EBED397F331B612FE1E4CFAE5C1F201BA', 'piyaz'),), event.path)
self.assertEqual((CircBuildFlag.NEED_CAPACITY,), event.build_flags)
self.assertEqual(CircPurpose.GENERAL, event.purpose)
self.assertEqual(None, event.hs_state)
self.assertEqual(None, event.rend_query)
self.assertEqual(datetime.datetime(2012, 11, 8, 16, 48, 38, 417238), event.created)
self.assertEqual(None, event.reason)
self.assertEqual(None, event.remote_reason)
event = _get_event(CIRC_FAILED)
self.assertTrue(isinstance(event, stem.response.events.CircuitEvent))
self.assertEqual(CIRC_FAILED.lstrip('650 '), str(event))
self.assertEqual('5', event.id)
self.assertEqual(CircStatus.FAILED, event.status)
self.assertEqual((('E57A476CD4DFBD99B4EE52A100A58610AD6E80B9', 'ergebnisoffen'),), event.path)
self.assertEqual((CircBuildFlag.NEED_CAPACITY,), event.build_flags)
self.assertEqual(CircPurpose.GENERAL, event.purpose)
self.assertEqual(None, event.hs_state)
self.assertEqual(None, event.rend_query)
self.assertEqual(datetime.datetime(2012, 11, 8, 16, 48, 36, 400959), event.created)
self.assertEqual(CircClosureReason.DESTROYED, event.reason)
self.assertEqual(CircClosureReason.OR_CONN_CLOSED, event.remote_reason)
event = _get_event(CIRC_WITH_CREDENTIALS)
self.assertTrue(isinstance(event, stem.response.events.CircuitEvent))
self.assertEqual(CIRC_WITH_CREDENTIALS.lstrip('650 '), str(event))
self.assertEqual('7', event.id)
self.assertEqual(CircStatus.LAUNCHED, event.status)
self.assertEqual((), event.path)
self.assertEqual(None, event.build_flags)
self.assertEqual(None, event.purpose)
self.assertEqual(None, event.hs_state)
self.assertEqual(None, event.rend_query)
self.assertEqual(None, event.created)
self.assertEqual(None, event.reason)
self.assertEqual(None, event.remote_reason)
self.assertEqual("It's a me, Mario!", event.socks_username)
self.assertEqual('your princess is in another castle', event.socks_password)
event = _get_event(CIRC_LAUNCHED_OLD)
self.assertTrue(isinstance(event, stem.response.events.CircuitEvent))
self.assertEqual(CIRC_LAUNCHED_OLD.lstrip('650 '), str(event))
self.assertEqual('4', event.id)
self.assertEqual(CircStatus.LAUNCHED, event.status)
self.assertEqual((), event.path)
self.assertEqual(None, event.build_flags)
self.assertEqual(None, event.purpose)
self.assertEqual(None, event.hs_state)
self.assertEqual(None, event.rend_query)
self.assertEqual(None, event.created)
self.assertEqual(None, event.reason)
self.assertEqual(None, event.remote_reason)
event = _get_event(CIRC_EXTENDED_OLD)
self.assertTrue(isinstance(event, stem.response.events.CircuitEvent))
self.assertEqual(CIRC_EXTENDED_OLD.lstrip('650 '), str(event))
self.assertEqual('1', event.id)
self.assertEqual(CircStatus.EXTENDED, event.status)
self.assertEqual((('E57A476CD4DFBD99B4EE52A100A58610AD6E80B9', None), (None, 'hamburgerphone')), event.path)
self.assertEqual(None, event.build_flags)
self.assertEqual(None, event.purpose)
self.assertEqual(None, event.hs_state)
self.assertEqual(None, event.rend_query)
self.assertEqual(None, event.created)
self.assertEqual(None, event.reason)
self.assertEqual(None, event.remote_reason)
event = _get_event(CIRC_BUILT_OLD)
self.assertTrue(isinstance(event, stem.response.events.CircuitEvent))
self.assertEqual(CIRC_BUILT_OLD.lstrip('650 '), str(event))
self.assertEqual('1', event.id)
self.assertEqual(CircStatus.BUILT, event.status)
self.assertEqual((('E57A476CD4DFBD99B4EE52A100A58610AD6E80B9', None), (None, 'hamburgerphone'), (None, 'PrivacyRepublic14')), event.path)
self.assertEqual(None, event.build_flags)
self.assertEqual(None, event.purpose)
self.assertEqual(None, event.hs_state)
self.assertEqual(None, event.rend_query)
self.assertEqual(None, event.created)
self.assertEqual(None, event.reason)
self.assertEqual(None, event.remote_reason)
# malformed TIME_CREATED timestamp
self.assertRaises(ProtocolError, _get_event, CIRC_LAUNCHED_BAD_1)
# invalid circuit id
self.assertRaises(ProtocolError, _get_event, CIRC_LAUNCHED_BAD_2)
def test_circ_minor_event(self):
event = _get_event(CIRC_MINOR_EVENT)
self.assertTrue(isinstance(event, stem.response.events.CircMinorEvent))
self.assertEqual(CIRC_MINOR_EVENT.lstrip('650 '), str(event))
self.assertEqual('7', event.id)
self.assertEqual(CircEvent.PURPOSE_CHANGED, event.event)
self.assertEqual((('67B2BDA4264D8A189D9270E28B1D30A262838243', 'europa1'),), event.path)
self.assertEqual((CircBuildFlag.IS_INTERNAL, CircBuildFlag.NEED_CAPACITY), event.build_flags)
self.assertEqual(CircPurpose.MEASURE_TIMEOUT, event.purpose)
self.assertEqual(None, event.hs_state)
self.assertEqual(None, event.rend_query)
self.assertEqual(datetime.datetime(2012, 12, 3, 16, 45, 33, 409602), event.created)
self.assertEqual(CircPurpose.TESTING, event.old_purpose)
self.assertEqual(None, event.old_hs_state)
# malformed TIME_CREATED timestamp
self.assertRaises(ProtocolError, _get_event, CIRC_MINOR_EVENT_BAD_1)
# invalid circuit id
self.assertRaises(ProtocolError, _get_event, CIRC_MINOR_EVENT_BAD_2)
def test_clients_seen_event(self):
event = _get_event(CLIENTS_SEEN_EVENT)
self.assertTrue(isinstance(event, stem.response.events.ClientsSeenEvent))
self.assertEqual(CLIENTS_SEEN_EVENT.lstrip('650 '), str(event))
self.assertEqual(datetime.datetime(2008, 12, 25, 23, 50, 43), event.start_time)
self.assertEqual({'us': 16, 'de': 8, 'uk': 8}, event.locales)
self.assertEqual({'v4': 16, 'v6': 40}, event.ip_versions)
# CountrySummary's 'key=value' mappings are replaced with 'key:value'
self.assertRaises(ProtocolError, _get_event, CLIENTS_SEEN_EVENT_BAD_1)
# CountrySummary's country codes aren't two letters
self.assertRaises(ProtocolError, _get_event, CLIENTS_SEEN_EVENT_BAD_2)
# CountrySummary's mapping contains a non-numeric value
self.assertRaises(ProtocolError, _get_event, CLIENTS_SEEN_EVENT_BAD_3)
# CountrySummary has duplicate country codes (multiple 'au=' mappings)
self.assertRaises(ProtocolError, _get_event, CLIENTS_SEEN_EVENT_BAD_4)
# IPVersions's 'key=value' mappings are replaced with 'key:value'
self.assertRaises(ProtocolError, _get_event, CLIENTS_SEEN_EVENT_BAD_5)
# IPVersions's mapping contains a non-numeric value
self.assertRaises(ProtocolError, _get_event, CLIENTS_SEEN_EVENT_BAD_6)
def test_conf_changed(self):
event = _get_event(CONF_CHANGED_EVENT)
expected_config = {
'ExitNodes': 'caerSidi',
'MaxCircuitDirtiness': '20',
'ExitPolicy': None,
}
self.assertTrue(isinstance(event, stem.response.events.ConfChangedEvent))
self.assertEqual(expected_config, event.config)
def test_descchanged_event(self):
# all we can check for is that the event is properly parsed as a
# DescChangedEvent instance
event = _get_event('650 DESCCHANGED')
self.assertTrue(isinstance(event, stem.response.events.DescChangedEvent))
self.assertEqual('DESCCHANGED', str(event))
self.assertEqual([], event.positional_args)
self.assertEqual({}, event.keyword_args)
def test_guard_event(self):
event = _get_event(GUARD_NEW)
self.assertTrue(isinstance(event, stem.response.events.GuardEvent))
self.assertEqual(GUARD_NEW.lstrip('650 '), str(event))
self.assertEqual(GuardType.ENTRY, event.guard_type)
self.assertEqual('$36B5DBA788246E8369DBAF58577C6BC044A9A374', event.endpoint)
self.assertEqual('36B5DBA788246E8369DBAF58577C6BC044A9A374', event.endpoint_fingerprint)
self.assertEqual(None, event.endpoint_nickname)
self.assertEqual(GuardStatus.NEW, event.status)
event = _get_event(GUARD_GOOD)
self.assertEqual(GuardType.ENTRY, event.guard_type)
self.assertEqual('$5D0034A368E0ABAF663D21847E1C9B6CFA09752A', event.endpoint)
self.assertEqual('5D0034A368E0ABAF663D21847E1C9B6CFA09752A', event.endpoint_fingerprint)
self.assertEqual(None, event.endpoint_nickname)
self.assertEqual(GuardStatus.GOOD, event.status)
event = _get_event(GUARD_BAD)
self.assertEqual(GuardType.ENTRY, event.guard_type)
self.assertEqual('$5D0034A368E0ABAF663D21847E1C9B6CFA09752A=caerSidi', event.endpoint)
self.assertEqual('5D0034A368E0ABAF663D21847E1C9B6CFA09752A', event.endpoint_fingerprint)
self.assertEqual('caerSidi', event.endpoint_nickname)
self.assertEqual(GuardStatus.BAD, event.status)
def test_hs_desc_event(self):
event = _get_event(HS_DESC_EVENT)
self.assertTrue(isinstance(event, stem.response.events.HSDescEvent))
self.assertEqual(HS_DESC_EVENT.lstrip('650 '), str(event))
self.assertEqual(HSDescAction.REQUESTED, event.action)
self.assertEqual('ajhb7kljbiru65qo', event.address)
self.assertEqual(HSAuth.NO_AUTH, event.authentication)
self.assertEqual('$67B2BDA4264D8A189D9270E28B1D30A262838243=europa1', event.directory)
self.assertEqual('67B2BDA4264D8A189D9270E28B1D30A262838243', event.directory_fingerprint)
self.assertEqual('europa1', event.directory_nickname)
self.assertEqual('b3oeducbhjmbqmgw2i3jtz4fekkrinwj', event.descriptor_id)
self.assertEqual(None, event.reason)
event = _get_event(HS_DESC_NO_DESC_ID)
self.assertEqual('$67B2BDA4264D8A189D9270E28B1D30A262838243', event.directory)
self.assertEqual('67B2BDA4264D8A189D9270E28B1D30A262838243', event.directory_fingerprint)
self.assertEqual(None, event.directory_nickname)
self.assertEqual(None, event.descriptor_id)
self.assertEqual(None, event.reason)
event = _get_event(HS_DESC_NOT_FOUND)
self.assertEqual('UNKNOWN', event.directory)
self.assertEqual(None, event.directory_fingerprint)
self.assertEqual(None, event.directory_nickname)
self.assertEqual(None, event.descriptor_id)
self.assertEqual(None, event.reason)
event = _get_event(HS_DESC_FAILED)
self.assertTrue(isinstance(event, stem.response.events.HSDescEvent))
self.assertEqual(HS_DESC_FAILED.lstrip('650 '), str(event))
self.assertEqual(HSDescAction.FAILED, event.action)
self.assertEqual('ajhb7kljbiru65qo', event.address)
self.assertEqual(HSAuth.NO_AUTH, event.authentication)
self.assertEqual('$67B2BDA4264D8A189D9270E28B1D30A262838243', event.directory)
self.assertEqual('67B2BDA4264D8A189D9270E28B1D30A262838243', event.directory_fingerprint)
self.assertEqual(None, event.directory_nickname)
self.assertEqual('b3oeducbhjmbqmgw2i3jtz4fekkrinwj', event.descriptor_id)
self.assertEqual(HSDescReason.NOT_FOUND, event.reason)
def test_hs_desc_content_event(self):
event = _get_event(HS_DESC_CONTENT_EVENT)
self.assertTrue(isinstance(event, stem.response.events.HSDescContentEvent))
self.assertEqual('facebookcorewwwi', event.address)
self.assertEqual('riwvyw6njgvs4koel4heqs7w4bssnmlw', event.descriptor_id)
self.assertEqual('$8A30C9E8F5954EE286D29BD65CADEA6991200804~YorkshireTOR', event.directory)
self.assertEqual('8A30C9E8F5954EE286D29BD65CADEA6991200804', event.directory_fingerprint)
self.assertEqual('YorkshireTOR', event.directory_nickname)
desc = event.descriptor
self.assertEqual('riwvyw6njgvs4koel4heqs7w4bssnmlw', desc.descriptor_id)
self.assertEqual(2, desc.version)
self.assertTrue('MIGJAoGBALf' in desc.permanent_key)
self.assertEqual('vnb2j6ftvkvghypd4yyypsl3qmpjyq3j', desc.secret_id_part)
self.assertEqual(datetime.datetime(2015, 3, 13, 19, 0, 0), desc.published)
self.assertEqual([2, 3], desc.protocol_versions)
self.assertEqual(3, len(desc.introduction_points()))
self.assertTrue('s9Z0zWHsoPu' in desc.signature)
event = _get_event(HS_DESC_CONTENT_EMPTY_EVENT)
self.assertTrue(isinstance(event, stem.response.events.HSDescContentEvent))
self.assertEqual('3g2upl4pq6kufc4n', event.address)
self.assertEqual('255tjwttk3wi7r2df57nuprs72j2daa3', event.descriptor_id)
self.assertEqual('$D7A0C3262724F2BC9646F6836E967A2777A3AF83~tsunaminitor', event.directory)
self.assertEqual('D7A0C3262724F2BC9646F6836E967A2777A3AF83', event.directory_fingerprint)
self.assertEqual('tsunaminitor', event.directory_nickname)
self.assertEqual(None, event.descriptor)
def test_newdesc_event(self):
event = _get_event(NEWDESC_SINGLE)
expected_relays = (('B3FA3110CC6F42443F039220C134CBD2FC4F0493', 'Sakura'),)
self.assertTrue(isinstance(event, stem.response.events.NewDescEvent))
self.assertEqual(NEWDESC_SINGLE.lstrip('650 '), str(event))
self.assertEqual(expected_relays, event.relays)
event = _get_event(NEWDESC_MULTIPLE)
expected_relays = (('BE938957B2CA5F804B3AFC2C1EE6673170CDBBF8', 'Moonshine'),
('B4BE08B22D4D2923EDC3970FD1B93D0448C6D8FF', 'Unnamed'))
self.assertTrue(isinstance(event, stem.response.events.NewDescEvent))
self.assertEqual(NEWDESC_MULTIPLE.lstrip('650 '), str(event))
self.assertEqual(expected_relays, event.relays)
def test_network_liveness_event(self):
event = _get_event('650 NETWORK_LIVENESS UP')
self.assertTrue(isinstance(event, stem.response.events.NetworkLivenessEvent))
self.assertEqual('NETWORK_LIVENESS UP', str(event))
self.assertEqual('UP', event.status)
event = _get_event('650 NETWORK_LIVENESS DOWN')
self.assertEqual('DOWN', event.status)
event = _get_event('650 NETWORK_LIVENESS OTHER_STATUS key=value')
self.assertEqual('OTHER_STATUS', event.status)
def test_new_consensus_event(self):
expected_desc = []
expected_desc.append(RouterStatusEntryV3.create({
'r': 'Beaver /96bKo4soysolMgKn5Hex2nyFSY pAJH9dSBp/CG6sPhhVY/5bLaVPM 2012-12-02 22:02:45 77.223.43.54 9001 0',
's': 'Fast Named Running Stable Valid',
}))
expected_desc.append(RouterStatusEntryV3.create({
'r': 'Unnamed /+fJRWjmIGNAL2C5rRZHq3R91tA 7AnpZjfdBpYzXnMNm+w1bTsFF6Y 2012-12-02 17:51:10 91.121.184.87 9001 0',
's': 'Fast Guard Running Stable Valid',
}))
event = _get_event(NEWCONSENSUS_EVENT)
self.assertTrue(isinstance(event, stem.response.events.NewConsensusEvent))
self.assertEqual(expected_desc, event.desc)
def test_ns_event(self):
expected_desc = RouterStatusEntryV3.create({
'r': 'whnetz dbBxYcJriTTrcxsuy4PUZcMRwCA VStM7KAIH/mXXoGDUpoGB1OXufg 2012-12-02 21:03:56 141.70.120.13 9001 9030',
's': 'Fast HSDir Named Stable V2Dir Valid',
})
event = _get_event(NS_EVENT)
self.assertTrue(isinstance(event, stem.response.events.NetworkStatusEvent))
self.assertEqual([expected_desc], event.desc)
def test_orconn_event(self):
event = _get_event(ORCONN_CLOSED)
self.assertTrue(isinstance(event, stem.response.events.ORConnEvent))
self.assertEqual(ORCONN_CLOSED.lstrip('650 '), str(event))
self.assertEqual('$A1130635A0CDA6F60C276FBF6994EFBD4ECADAB1~tama', event.endpoint)
self.assertEqual('A1130635A0CDA6F60C276FBF6994EFBD4ECADAB1', event.endpoint_fingerprint)
self.assertEqual('tama', event.endpoint_nickname)
self.assertEqual(None, event.endpoint_address)
self.assertEqual(None, event.endpoint_port)
self.assertEqual(ORStatus.CLOSED, event.status)
self.assertEqual(ORClosureReason.DONE, event.reason)
self.assertEqual(None, event.circ_count)
self.assertEqual(None, event.id)
event = _get_event(ORCONN_CONNECTED)
self.assertTrue(isinstance(event, stem.response.events.ORConnEvent))
self.assertEqual(ORCONN_CONNECTED.lstrip('650 '), str(event))
self.assertEqual('127.0.0.1:9000', event.endpoint)
self.assertEqual(None, event.endpoint_fingerprint)
self.assertEqual(None, event.endpoint_nickname)
self.assertEqual('127.0.0.1', event.endpoint_address)
self.assertEqual(9000, event.endpoint_port)
self.assertEqual(ORStatus.CONNECTED, event.status)
self.assertEqual(None, event.reason)
self.assertEqual(20, event.circ_count)
self.assertEqual('18', event.id)
event = _get_event(ORCONN_LAUNCHED)
self.assertTrue(isinstance(event, stem.response.events.ORConnEvent))
self.assertEqual(ORCONN_LAUNCHED.lstrip('650 '), str(event))
self.assertEqual('$7ED90E2833EE38A75795BA9237B0A4560E51E1A0=GreenDragon', event.endpoint)
self.assertEqual('7ED90E2833EE38A75795BA9237B0A4560E51E1A0', event.endpoint_fingerprint)
self.assertEqual('GreenDragon', event.endpoint_nickname)
self.assertEqual(None, event.endpoint_address)
self.assertEqual(None, event.endpoint_port)
self.assertEqual(ORStatus.LAUNCHED, event.status)
self.assertEqual(None, event.reason)
self.assertEqual(None, event.circ_count)
# malformed fingerprint
self.assertRaises(ProtocolError, _get_event, ORCONN_BAD_1)
# invalid port number ('001')
self.assertRaises(ProtocolError, _get_event, ORCONN_BAD_2)
# non-numeric NCIRCS
self.assertRaises(ProtocolError, _get_event, ORCONN_BAD_3)
# invalid connection id
self.assertRaises(ProtocolError, _get_event, ORCONN_BAD_4)
def test_signal_event(self):
event = _get_event('650 SIGNAL DEBUG')
self.assertTrue(isinstance(event, stem.response.events.SignalEvent))
self.assertEqual('SIGNAL DEBUG', str(event))
self.assertEqual(Signal.DEBUG, event.signal)
event = _get_event('650 SIGNAL DUMP')
self.assertEqual(Signal.DUMP, event.signal)
def test_status_event_consensus_arrived(self):
event = _get_event(STATUS_GENERAL_CONSENSUS_ARRIVED)
self.assertTrue(isinstance(event, stem.response.events.StatusEvent))
self.assertEqual(STATUS_GENERAL_CONSENSUS_ARRIVED.lstrip('650 '), str(event))
self.assertEqual(StatusType.GENERAL, event.status_type)
self.assertEqual(Runlevel.NOTICE, event.runlevel)
self.assertEqual('CONSENSUS_ARRIVED', event.action)
def test_status_event_enough_dir_info(self):
event = _get_event(STATUS_CLIENT_ENOUGH_DIR_INFO)
self.assertEqual(StatusType.CLIENT, event.status_type)
self.assertEqual(Runlevel.NOTICE, event.runlevel)
self.assertEqual('ENOUGH_DIR_INFO', event.action)
def test_status_event_circuit_established(self):
event = _get_event(STATUS_CLIENT_CIRC_ESTABLISHED)
self.assertEqual(StatusType.CLIENT, event.status_type)
self.assertEqual(Runlevel.NOTICE, event.runlevel)
self.assertEqual('CIRCUIT_ESTABLISHED', event.action)
def test_status_event_bootstrap_descriptors(self):
event = _get_event(STATUS_CLIENT_BOOTSTRAP_DESCRIPTORS)
self.assertEqual(StatusType.CLIENT, event.status_type)
self.assertEqual(Runlevel.NOTICE, event.runlevel)
self.assertEqual('BOOTSTRAP', event.action)
expected_attr = {
'PROGRESS': '53',
'TAG': 'loading_descriptors',
'SUMMARY': 'Loading relay descriptors',
}
self.assertEqual(expected_attr, event.arguments)
def test_status_event_bootstrap_stuck(self):
event = _get_event(STATUS_CLIENT_BOOTSTRAP_STUCK)
self.assertEqual(StatusType.CLIENT, event.status_type)
self.assertEqual(Runlevel.WARN, event.runlevel)
self.assertEqual('BOOTSTRAP', event.action)
expected_attr = {
'PROGRESS': '80',
'TAG': 'conn_or',
'SUMMARY': 'Connecting to the Tor network',
'WARNING': 'Network is unreachable',
'REASON': 'NOROUTE',
'COUNT': '5',
'RECOMMENDATION': 'warn',
}
self.assertEqual(expected_attr, event.keyword_args)
def test_status_event_bootstrap_connecting(self):
event = _get_event(STATUS_CLIENT_BOOTSTRAP_CONNECTING)
self.assertEqual(StatusType.CLIENT, event.status_type)
self.assertEqual(Runlevel.NOTICE, event.runlevel)
self.assertEqual('BOOTSTRAP', event.action)
expected_attr = {
'PROGRESS': '80',
'TAG': 'conn_or',
'SUMMARY': 'Connecting to the Tor network',
}
self.assertEqual(expected_attr, event.keyword_args)
def test_status_event_bootstrap_first_handshake(self):
event = _get_event(STATUS_CLIENT_BOOTSTRAP_FIRST_HANDSHAKE)
self.assertEqual(StatusType.CLIENT, event.status_type)
self.assertEqual(Runlevel.NOTICE, event.runlevel)
self.assertEqual('BOOTSTRAP', event.action)
expected_attr = {
'PROGRESS': '85',
'TAG': 'handshake_or',
'SUMMARY': 'Finishing handshake with first hop',
}
self.assertEqual(expected_attr, event.keyword_args)
def test_status_event_bootstrap_established(self):
event = _get_event(STATUS_CLIENT_BOOTSTRAP_ESTABLISHED)
self.assertEqual(StatusType.CLIENT, event.status_type)
self.assertEqual(Runlevel.NOTICE, event.runlevel)
self.assertEqual('BOOTSTRAP', event.action)
expected_attr = {
'PROGRESS': '90',
'TAG': 'circuit_create',
'SUMMARY': 'Establishing a Tor circuit',
}
self.assertEqual(expected_attr, event.keyword_args)
def test_status_event_bootstrap_done(self):
event = _get_event(STATUS_CLIENT_BOOTSTRAP_DONE)
self.assertEqual(StatusType.CLIENT, event.status_type)
self.assertEqual(Runlevel.NOTICE, event.runlevel)
self.assertEqual('BOOTSTRAP', event.action)
expected_attr = {
'PROGRESS': '100',
'TAG': 'done',
'SUMMARY': 'Done',
}
self.assertEqual(expected_attr, event.keyword_args)
def test_status_event_bootstrap_check_reachability(self):
event = _get_event(STATUS_SERVER_CHECK_REACHABILITY)
self.assertEqual(StatusType.SERVER, event.status_type)
self.assertEqual(Runlevel.NOTICE, event.runlevel)
self.assertEqual('CHECKING_REACHABILITY', event.action)
expected_attr = {
'ORADDRESS': '71.35.143.230:9050',
}
self.assertEqual(expected_attr, event.keyword_args)
def test_status_event_dns_timeout(self):
event = _get_event(STATUS_SERVER_DNS_TIMEOUT)
self.assertEqual(StatusType.SERVER, event.status_type)
self.assertEqual(Runlevel.NOTICE, event.runlevel)
self.assertEqual('NAMESERVER_STATUS', event.action)
expected_attr = {
'NS': '205.171.3.25',
'STATUS': 'DOWN',
'ERR': 'request timed out.',
}
self.assertEqual(expected_attr, event.keyword_args)
def test_status_event_dns_down(self):
event = _get_event(STATUS_SERVER_DNS_DOWN)
self.assertEqual(StatusType.SERVER, event.status_type)
self.assertEqual(Runlevel.WARN, event.runlevel)
self.assertEqual('NAMESERVER_ALL_DOWN', event.action)
def test_status_event_dns_up(self):
event = _get_event(STATUS_SERVER_DNS_UP)
self.assertEqual(StatusType.SERVER, event.status_type)
self.assertEqual(Runlevel.NOTICE, event.runlevel)
self.assertEqual('NAMESERVER_STATUS', event.action)
expected_attr = {
'NS': '205.171.3.25',
'STATUS': 'UP',
}
self.assertEqual(expected_attr, event.keyword_args)
def test_status_event_bug(self):
# briefly insert a fake value in EVENT_TYPE_TO_CLASS
stem.response.events.EVENT_TYPE_TO_CLASS['STATUS_SPECIFIC'] = stem.response.events.StatusEvent
self.assertRaises(ValueError, _get_event, STATUS_SPECIFIC_CONSENSUS_ARRIVED)
del stem.response.events.EVENT_TYPE_TO_CLASS['STATUS_SPECIFIC']
def test_stream_event(self):
event = _get_event(STREAM_NEW)
self.assertTrue(isinstance(event, stem.response.events.StreamEvent))
self.assertEqual(STREAM_NEW.lstrip('650 '), str(event))
self.assertEqual('18', event.id)
self.assertEqual(StreamStatus.NEW, event.status)
self.assertEqual(None, event.circ_id)
self.assertEqual('encrypted.google.com:443', event.target)
self.assertEqual('encrypted.google.com', event.target_address)
self.assertEqual(443, event.target_port)
self.assertEqual(None, event.reason)
self.assertEqual(None, event.remote_reason)
self.assertEqual(None, event.source)
self.assertEqual('127.0.0.1:47849', event.source_addr)
self.assertEqual('127.0.0.1', event.source_address)
self.assertEqual(47849, event.source_port)
self.assertEqual(StreamPurpose.USER, event.purpose)
event = _get_event(STREAM_SENTCONNECT)
self.assertTrue(isinstance(event, stem.response.events.StreamEvent))
self.assertEqual(STREAM_SENTCONNECT.lstrip('650 '), str(event))
self.assertEqual('18', event.id)
self.assertEqual(StreamStatus.SENTCONNECT, event.status)
self.assertEqual('26', event.circ_id)
self.assertEqual('encrypted.google.com:443', event.target)
self.assertEqual('encrypted.google.com', event.target_address)
self.assertEqual(443, event.target_port)
self.assertEqual(None, event.reason)
self.assertEqual(None, event.remote_reason)
self.assertEqual(None, event.source)
self.assertEqual(None, event.source_addr)
self.assertEqual(None, event.source_address)
self.assertEqual(None, event.source_port)
self.assertEqual(None, event.purpose)
event = _get_event(STREAM_REMAP)
self.assertTrue(isinstance(event, stem.response.events.StreamEvent))
self.assertEqual(STREAM_REMAP.lstrip('650 '), str(event))
self.assertEqual('18', event.id)
self.assertEqual(StreamStatus.REMAP, event.status)
self.assertEqual('26', event.circ_id)
self.assertEqual('74.125.227.129:443', event.target)
self.assertEqual('74.125.227.129', event.target_address)
self.assertEqual(443, event.target_port)
self.assertEqual(None, event.reason)
self.assertEqual(None, event.remote_reason)
self.assertEqual(StreamSource.EXIT, event.source)
self.assertEqual(None, event.source_addr)
self.assertEqual(None, event.source_address)
self.assertEqual(None, event.source_port)
self.assertEqual(None, event.purpose)
event = _get_event(STREAM_SUCCEEDED)
self.assertTrue(isinstance(event, stem.response.events.StreamEvent))
self.assertEqual(STREAM_SUCCEEDED.lstrip('650 '), str(event))
self.assertEqual('18', event.id)
self.assertEqual(StreamStatus.SUCCEEDED, event.status)
self.assertEqual('26', event.circ_id)
self.assertEqual('74.125.227.129:443', event.target)
self.assertEqual('74.125.227.129', event.target_address)
self.assertEqual(443, event.target_port)
self.assertEqual(None, event.reason)
self.assertEqual(None, event.remote_reason)
self.assertEqual(None, event.source)
self.assertEqual(None, event.source_addr)
self.assertEqual(None, event.source_address)
self.assertEqual(None, event.source_port)
self.assertEqual(None, event.purpose)
event = _get_event(STREAM_CLOSED_RESET)
self.assertTrue(isinstance(event, stem.response.events.StreamEvent))
self.assertEqual(STREAM_CLOSED_RESET.lstrip('650 '), str(event))
self.assertEqual('21', event.id)
self.assertEqual(StreamStatus.CLOSED, event.status)
self.assertEqual('26', event.circ_id)
self.assertEqual('74.125.227.129:443', event.target)
self.assertEqual('74.125.227.129', event.target_address)
self.assertEqual(443, event.target_port)
self.assertEqual(StreamClosureReason.CONNRESET, event.reason)
self.assertEqual(None, event.remote_reason)
self.assertEqual(None, event.source)
self.assertEqual(None, event.source_addr)
self.assertEqual(None, event.source_address)
self.assertEqual(None, event.source_port)
self.assertEqual(None, event.purpose)
event = _get_event(STREAM_CLOSED_DONE)
self.assertTrue(isinstance(event, stem.response.events.StreamEvent))
self.assertEqual(STREAM_CLOSED_DONE.lstrip('650 '), str(event))
self.assertEqual('25', event.id)
self.assertEqual(StreamStatus.CLOSED, event.status)
self.assertEqual('26', event.circ_id)
self.assertEqual('199.7.52.72:80', event.target)
self.assertEqual('199.7.52.72', event.target_address)
self.assertEqual(80, event.target_port)
self.assertEqual(StreamClosureReason.DONE, event.reason)
self.assertEqual(None, event.remote_reason)
self.assertEqual(None, event.source)
self.assertEqual(None, event.source_addr)
self.assertEqual(None, event.source_address)
self.assertEqual(None, event.source_port)
self.assertEqual(None, event.purpose)
event = _get_event(STREAM_DIR_FETCH)
self.assertTrue(isinstance(event, stem.response.events.StreamEvent))
self.assertEqual(STREAM_DIR_FETCH.lstrip('650 '), str(event))
self.assertEqual('14', event.id)
self.assertEqual(StreamStatus.NEW, event.status)
self.assertEqual(None, event.circ_id)
self.assertEqual('176.28.51.238.$649F2D0ACF418F7CFC6539AB2257EB2D5297BAFA.exit:443', event.target)
self.assertEqual('176.28.51.238.$649F2D0ACF418F7CFC6539AB2257EB2D5297BAFA.exit', event.target_address)
self.assertEqual(443, event.target_port)
self.assertEqual(None, event.reason)
self.assertEqual(None, event.remote_reason)
self.assertEqual(None, event.source)
self.assertEqual('(Tor_internal):0', event.source_addr)
self.assertEqual('(Tor_internal)', event.source_address)
self.assertEqual(0, event.source_port)
self.assertEqual(StreamPurpose.DIR_FETCH, event.purpose)
event = _get_event(STREAM_DNS_REQUEST)
self.assertTrue(isinstance(event, stem.response.events.StreamEvent))
self.assertEqual(STREAM_DNS_REQUEST.lstrip('650 '), str(event))
self.assertEqual('1113', event.id)
self.assertEqual(StreamStatus.NEW, event.status)
self.assertEqual(None, event.circ_id)
self.assertEqual('www.google.com:0', event.target)
self.assertEqual('www.google.com', event.target_address)
self.assertEqual(0, event.target_port)
self.assertEqual(None, event.reason)
self.assertEqual(None, event.remote_reason)
self.assertEqual(None, event.source)
self.assertEqual('127.0.0.1:15297', event.source_addr)
self.assertEqual('127.0.0.1', event.source_address)
self.assertEqual(15297, event.source_port)
self.assertEqual(StreamPurpose.DNS_REQUEST, event.purpose)
# missing target
self.assertRaises(ProtocolError, _get_event, STREAM_SENTCONNECT_BAD_1)
# target is missing a port
self.assertRaises(ProtocolError, _get_event, STREAM_SENTCONNECT_BAD_2)
# target's port is malformed
self.assertRaises(ProtocolError, _get_event, STREAM_SENTCONNECT_BAD_3)
# SOURCE_ADDR is missing a port
self.assertRaises(ProtocolError, _get_event, STREAM_DNS_REQUEST_BAD_1)
# SOURCE_ADDR's port is malformed
self.assertRaises(ProtocolError, _get_event, STREAM_DNS_REQUEST_BAD_2)
# IPv6 address
event = _get_event(STREAM_NEWRESOLVE_IP6)
self.assertTrue(isinstance(event, stem.response.events.StreamEvent))
self.assertEqual(STREAM_NEWRESOLVE_IP6.lstrip('650 '), str(event))
self.assertEqual('23', event.id)
self.assertEqual(StreamStatus.NEWRESOLVE, event.status)
self.assertEqual(None, event.circ_id)
self.assertEqual('2001:db8::1:0', event.target)
self.assertEqual('2001:db8::1', event.target_address)
self.assertEqual(0, event.target_port)
self.assertEqual(None, event.reason)
self.assertEqual(None, event.remote_reason)
self.assertEqual(None, event.source)
self.assertEqual(None, event.source_addr)
self.assertEqual(None, event.source_address)
self.assertEqual(None, event.source_port)
self.assertEqual(StreamPurpose.DNS_REQUEST, event.purpose)
def test_stream_bw_event(self):
event = _get_event('650 STREAM_BW 2 15 25')
self.assertTrue(isinstance(event, stem.response.events.StreamBwEvent))
self.assertEqual('2', event.id)
self.assertEqual(15, event.written)
self.assertEqual(25, event.read)
event = _get_event('650 STREAM_BW Stream02 0 0')
self.assertEqual('Stream02', event.id)
self.assertEqual(0, event.written)
self.assertEqual(0, event.read)
self.assertRaises(ProtocolError, _get_event, '650 STREAM_BW')
self.assertRaises(ProtocolError, _get_event, '650 STREAM_BW 2')
self.assertRaises(ProtocolError, _get_event, '650 STREAM_BW 2 15')
self.assertRaises(ProtocolError, _get_event, '650 STREAM_BW - 15 25')
self.assertRaises(ProtocolError, _get_event, '650 STREAM_BW 12345678901234567 15 25')
self.assertRaises(ProtocolError, _get_event, '650 STREAM_BW 2 -15 25')
self.assertRaises(ProtocolError, _get_event, '650 STREAM_BW 2 15 -25')
self.assertRaises(ProtocolError, _get_event, '650 STREAM_BW 2 x 25')
def test_transport_launched_event(self):
event = _get_event(TRANSPORT_LAUNCHED)
self.assertTrue(isinstance(event, stem.response.events.TransportLaunchedEvent))
self.assertEqual(TRANSPORT_LAUNCHED.lstrip('650 '), str(event))
self.assertEqual('server', event.type)
self.assertEqual('obfs1', event.name)
self.assertEqual('127.0.0.1', event.address)
self.assertEqual(1111, event.port)
self.assertRaises(ProtocolError, _get_event, TRANSPORT_LAUNCHED_BAD_TYPE)
self.assertRaises(ProtocolError, _get_event, TRANSPORT_LAUNCHED_BAD_ADDRESS)
self.assertRaises(ProtocolError, _get_event, TRANSPORT_LAUNCHED_BAD_PORT)
def test_conn_bw_event(self):
event = _get_event(CONN_BW)
self.assertTrue(isinstance(event, stem.response.events.ConnectionBandwidthEvent))
self.assertEqual(CONN_BW.lstrip('650 '), str(event))
self.assertEqual('11', event.id)
self.assertEqual(stem.ConnectionType.DIR, event.conn_type)
self.assertEqual(272, event.read)
self.assertEqual(817, event.written)
self.assertRaises(ProtocolError, _get_event, CONN_BW_BAD_WRITTEN_VALUE)
self.assertRaises(ProtocolError, _get_event, CONN_BW_BAD_MISSING_ID)
def test_circ_bw_event(self):
event = _get_event(CIRC_BW)
self.assertTrue(isinstance(event, stem.response.events.CircuitBandwidthEvent))
self.assertEqual(CIRC_BW.lstrip('650 '), str(event))
self.assertEqual('11', event.id)
self.assertEqual(272, event.read)
self.assertEqual(817, event.written)
self.assertRaises(ProtocolError, _get_event, CIRC_BW_BAD_WRITTEN_VALUE)
self.assertRaises(ProtocolError, _get_event, CIRC_BW_BAD_MISSING_ID)
def test_cell_stats_event(self):
event = _get_event(CELL_STATS_1)
self.assertTrue(isinstance(event, stem.response.events.CellStatsEvent))
self.assertEqual(CELL_STATS_1.lstrip('650 '), str(event))
self.assertEqual('14', event.id)
self.assertEqual(None, event.inbound_queue)
self.assertEqual(None, event.inbound_connection)
self.assertEqual(None, event.inbound_added)
self.assertEqual(None, event.inbound_removed)
self.assertEqual(None, event.inbound_time)
self.assertEqual('19403', event.outbound_queue)
self.assertEqual('15', event.outbound_connection)
self.assertEqual({'create_fast': 1, 'relay_early': 2}, event.outbound_added)
self.assertEqual({'create_fast': 1, 'relay_early': 2}, event.outbound_removed)
self.assertEqual({'create_fast': 0, 'relay_early': 0}, event.outbound_time)
event = _get_event(CELL_STATS_2)
self.assertTrue(isinstance(event, stem.response.events.CellStatsEvent))
self.assertEqual(CELL_STATS_2.lstrip('650 '), str(event))
self.assertEqual(None, event.id)
self.assertEqual('19403', event.inbound_queue)
self.assertEqual('32', event.inbound_connection)
self.assertEqual({'relay': 1, 'created_fast': 1}, event.inbound_added)
self.assertEqual({'relay': 1, 'created_fast': 1}, event.inbound_removed)
self.assertEqual({'relay': 0, 'created_fast': 0}, event.inbound_time)
self.assertEqual('6710', event.outbound_queue)
self.assertEqual('18', event.outbound_connection)
self.assertEqual({'create': 1, 'relay_early': 1}, event.outbound_added)
self.assertEqual({'create': 1, 'relay_early': 1}, event.outbound_removed)
self.assertEqual({'create': 0, 'relay_early': 0}, event.outbound_time)
# check a few invalid mappings (bad key or value)
self.assertRaises(ProtocolError, _get_event, CELL_STATS_BAD_1)
self.assertRaises(ProtocolError, _get_event, CELL_STATS_BAD_2)
self.assertRaises(ProtocolError, _get_event, CELL_STATS_BAD_3)
def test_token_bucket_empty_event(self):
event = _get_event(TB_EMPTY_1)
self.assertTrue(isinstance(event, stem.response.events.TokenBucketEmptyEvent))
self.assertEqual(TB_EMPTY_1.lstrip('650 '), str(event))
self.assertEqual(stem.TokenBucket.ORCONN, event.bucket)
self.assertEqual('16', event.id)
self.assertEqual(0, event.read)
self.assertEqual(0, event.written)
self.assertEqual(100, event.last_refill)
event = _get_event(TB_EMPTY_2)
self.assertTrue(isinstance(event, stem.response.events.TokenBucketEmptyEvent))
self.assertEqual(TB_EMPTY_2.lstrip('650 '), str(event))
self.assertEqual(stem.TokenBucket.GLOBAL, event.bucket)
self.assertEqual(None, event.id)
self.assertEqual(93, event.read)
self.assertEqual(93, event.written)
self.assertEqual(100, event.last_refill)
event = _get_event(TB_EMPTY_3)
self.assertTrue(isinstance(event, stem.response.events.TokenBucketEmptyEvent))
self.assertEqual(TB_EMPTY_3.lstrip('650 '), str(event))
self.assertEqual(stem.TokenBucket.RELAY, event.bucket)
self.assertEqual(None, event.id)
self.assertEqual(93, event.read)
self.assertEqual(93, event.written)
self.assertEqual(100, event.last_refill)
self.assertRaises(ProtocolError, _get_event, TB_EMPTY_BAD_1)
self.assertRaises(ProtocolError, _get_event, TB_EMPTY_BAD_2)
def test_unrecognized_enum_logging(self):
"""
Checks that when event parsing gets a value that isn't recognized by stem's
enumeration of the attribute that we log a message.
"""
stem_logger = stem.util.log.get_logger()
logging_buffer = stem.util.log.LogBuffer(stem.util.log.INFO)
stem_logger.addHandler(logging_buffer)
# Try parsing a valid event. We shouldn't log anything.
_get_event(STATUS_GENERAL_CONSENSUS_ARRIVED)
self.assertTrue(logging_buffer.is_empty())
self.assertEqual([], list(logging_buffer))
# Parse an invalid runlevel.
_get_event(STATUS_GENERAL_CONSENSUS_ARRIVED.replace('NOTICE', 'OMEGA_CRITICAL!!!'))
logged_events = list(logging_buffer)
self.assertEqual(1, len(logged_events))
self.assertTrue('STATUS_GENERAL event had an unrecognized runlevel' in logged_events[0])
stem_logger.removeHandler(logging_buffer)
| lgpl-3.0 |
junhuac/MQUIC | src/tools/symsrc/source_index.py | 59 | 19487 | #!/usr/bin/env python
# Copyright (c) 2011 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Usage: <win-path-to-pdb.pdb>
This tool will take a PDB on the command line, extract the source files that
were used in building the PDB, query the source server for which repository
and revision these files are at, and then finally write this information back
into the PDB in a format that the debugging tools understand. This allows for
automatic source debugging, as all of the information is contained in the PDB,
and the debugger can go out and fetch the source files.
You most likely want to run these immediately after a build, since the source
input files need to match the generated PDB, and we want the correct
revision information for the exact files that were used for the build.
The following files from a windbg + source server installation are expected
to reside in the same directory as this python script:
dbghelp.dll
pdbstr.exe
srctool.exe
NOTE: Expected to run under a native win32 python, NOT cygwin. All paths are
dealt with as win32 paths, since we have to interact with the Microsoft tools.
"""
import os
import optparse
import sys
import tempfile
import time
import subprocess
import win32api
from collections import namedtuple
# This serves two purposes. First, it acts as a whitelist, and only files
# from repositories listed here will be source indexed. Second, it allows us
# to map from one URL to another, so we can map to external source servers. It
# also indicates if the source for this project will be retrieved in a base64
# encoded format.
# TODO(sebmarchand): Initialize this variable in the main function and pass it
# to the sub functions instead of having a global variable.
REPO_MAP = {
'http://src.chromium.org/svn': {
'url': 'https://src.chromium.org/chrome/'
'{file_path}?revision={revision}',
'base64': False
},
'https://src.chromium.org/svn': {
'url': 'https://src.chromium.org/chrome/'
'{file_path}?revision={revision}',
'base64': False
}
}
PROJECT_GROUPS = [
# Googlecode SVN projects
{
'projects': [
'angleproject',
'google-breakpad',
'google-cache-invalidation-api',
'google-url',
'googletest',
'leveldb',
'libphonenumber',
'libyuv',
'open-vcdiff',
'ots',
'sawbuck',
'sfntly',
'smhasher',
'v8',
'v8-i18n',
'webrtc',
],
'public_url': 'https://%s.googlecode.com/svn-history/' \
'r{revision}/{file_path}',
'svn_urls': [
'svn://svn-mirror.golo.chromium.org/%s',
'http://src.chromium.org/%s',
'https://src.chromium.org/%s',
'http://%s.googlecode.com/svn',
'https://%s.googlecode.com/svn',
],
},
# Googlecode Git projects
{
'projects': [
'syzygy',
],
'public_url': 'https://%s.googlecode.com/git-history/' \
'{revision}/{file_path}',
'svn_urls': [
'https://code.google.com/p/%s/',
],
},
# Chrome projects
{
'projects': [
'blink',
'chrome',
'multivm',
'native_client',
],
'public_url': 'https://src.chromium.org/%s/' \
'{file_path}?revision={revision}',
'svn_urls': [
'svn://chrome-svn/%s',
'svn://chrome-svn.corp.google.com/%s',
'svn://svn-mirror.golo.chromium.org/%s',
'svn://svn.chromium.org/%s',
],
},
]
# A named tuple used to store the information about a repository.
#
# It contains the following members:
# - repo: The URL of the repository;
# - rev: The revision (or hash) of the current checkout.
# - file_list: The list of files coming from this repository.
# - root_path: The root path of this checkout.
# - path_prefix: A prefix to apply to the filename of the files coming from
# this repository.
RevisionInfo = namedtuple('RevisionInfo',
['repo', 'rev', 'files', 'root_path', 'path_prefix'])
def GetCasedFilePath(filename):
"""Return the correctly cased path for a given filename"""
return win32api.GetLongPathName(win32api.GetShortPathName(unicode(filename)))
def FillRepositoriesMap():
""" Fill the repositories map with the whitelisted projects. """
for project_group in PROJECT_GROUPS:
for project in project_group['projects']:
for svn_url in project_group['svn_urls']:
REPO_MAP[svn_url % project] = {
'url': project_group['public_url'] % project,
'base64': False
}
REPO_MAP[project_group['public_url'] % project] = None
FillRepositoriesMap()
def FindFile(filename):
"""Return the full windows path to a file in the same dir as this code."""
thisdir = os.path.dirname(os.path.join(os.path.curdir, __file__))
return os.path.abspath(os.path.join(thisdir, filename))
def RunCommand(*cmd, **kwargs):
"""Runs a command.
Returns what have been printed to stdout by this command.
kwargs:
raise_on_failure: Indicates if an exception should be raised on failure, if
set to false then the function will return None.
"""
kwargs.setdefault('stdin', subprocess.PIPE)
kwargs.setdefault('stdout', subprocess.PIPE)
kwargs.setdefault('stderr', subprocess.PIPE)
kwargs.setdefault('universal_newlines', True)
raise_on_failure = kwargs.pop('raise_on_failure', True)
proc = subprocess.Popen(cmd, **kwargs)
ret, err = proc.communicate()
if proc.returncode != 0:
if raise_on_failure:
print 'Error: %s' % err
raise subprocess.CalledProcessError(proc.returncode, cmd)
return
ret = (ret or '').rstrip('\n')
return ret
def ExtractSourceFiles(pdb_filename):
"""Extract a list of local paths of the source files from a PDB."""
src_files = RunCommand(FindFile('srctool.exe'), '-r', pdb_filename)
if not src_files or src_files.startswith("srctool: "):
raise Exception("srctool failed: " + src_files)
return set(x.lower() for x in src_files.split('\n') if len(x) != 0)
def ReadSourceStream(pdb_filename):
"""Read the contents of the source information stream from a PDB."""
srctool = subprocess.Popen([FindFile('pdbstr.exe'),
'-r', '-s:srcsrv',
'-p:%s' % pdb_filename],
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
data, _ = srctool.communicate()
if ((srctool.returncode != 0 and srctool.returncode != -1) or
data.startswith("pdbstr: ")):
raise Exception("pdbstr failed: " + data)
return data
def WriteSourceStream(pdb_filename, data):
"""Write the contents of the source information stream to a PDB."""
# Write out the data to a temporary filename that we can pass to pdbstr.
(f, fname) = tempfile.mkstemp()
f = os.fdopen(f, "wb")
f.write(data)
f.close()
srctool = subprocess.Popen([FindFile('pdbstr.exe'),
'-w', '-s:srcsrv',
'-i:%s' % fname,
'-p:%s' % pdb_filename],
stdout=subprocess.PIPE, stderr=subprocess.PIPE)
data, _ = srctool.communicate()
if ((srctool.returncode != 0 and srctool.returncode != -1) or
data.startswith("pdbstr: ")):
raise Exception("pdbstr failed: " + data)
os.unlink(fname)
def GetSVNRepoInfo(local_path):
"""Calls svn info to extract the SVN information about a path."""
# We call svn.bat to make sure and get the depot tools SVN and not cygwin.
info = RunCommand('svn.bat', 'info', local_path, raise_on_failure=False)
if not info:
return
# Hack up into a dictionary of the fields printed by svn info.
vals = dict((y.split(': ', 2) for y in info.split('\n') if y))
return vals
def ExtractSVNInfo(local_filename):
"""Checks if a file is coming from a svn repository and if so returns some
information about it.
Args:
local_filename: The name of the file that we want to check.
Returns:
None if the file doesn't come from a svn repository, otherwise it returns a
RevisionInfo tuple.
"""
# Try to get the svn information about this file.
vals = GetSVNRepoInfo(local_filename)
if not vals:
return
repo = vals['Repository Root']
if not vals['URL'].startswith(repo):
raise Exception("URL is not inside of the repository root?!?")
rev = vals['Revision']
svn_local_root = os.path.split(local_filename)[0]
# We need to look at the SVN URL of the current path to handle the case when
# we do a partial SVN checkout inside another checkout of the same repository.
# This happens in Chromium where we do some checkout of
# '/trunk/deps/third_party' in 'src/third_party'.
svn_root_url = os.path.dirname(vals['URL'])
# Don't try to list all the files from this repository as this seem to slow
# down the indexing, instead index one file at a time.
file_list = [local_filename.replace(svn_local_root, '').lstrip(os.path.sep)]
return RevisionInfo(repo=repo, rev=rev, files=file_list,
root_path=svn_local_root, path_prefix=svn_root_url.replace(repo, ''))
def ExtractGitInfo(local_filename):
"""Checks if a file is coming from a git repository and if so returns some
information about it.
Args:
local_filename: The name of the file that we want to check.
Returns:
None if the file doesn't come from a git repository, otherwise it returns a
RevisionInfo tuple.
"""
# Starts by checking if this file is coming from a git repository. For that
# we'll start by calling 'git info' on this file; for this to work we need to
# make sure that the current working directory is correctly cased. It turns
# out that even on Windows the casing of the path passed in the |cwd| argument
# of subprocess.Popen matters and if it's not correctly cased then 'git info'
# will return None even if the file is coming from a git repository. This
# is not the case if we're just interested in checking if the path containing
# |local_filename| is coming from a git repository, in this case the casing
# doesn't matter.
local_filename = GetCasedFilePath(local_filename)
local_file_basename = os.path.basename(local_filename)
local_file_dir = os.path.dirname(local_filename)
file_info = RunCommand('git.bat', 'log', '-n', '1', local_file_basename,
cwd=local_file_dir, raise_on_failure=False)
if not file_info:
return
# Get the revision of the master branch.
rev = RunCommand('git.bat', 'rev-parse', 'HEAD', cwd=local_file_dir)
# Get the url of the remote repository.
repo = RunCommand('git.bat', 'config', '--get', 'remote.origin.url',
cwd=local_file_dir)
# If the repository point to a local directory then we need to run this
# command one more time from this directory to get the repository url.
if os.path.isdir(repo):
repo = RunCommand('git.bat', 'config', '--get', 'remote.origin.url',
cwd=repo)
# Don't use the authenticated path.
repo = repo.replace('googlesource.com/a/', 'googlesource.com/')
# Get the relative file path for this file in the git repository.
git_path = RunCommand('git.bat', 'ls-tree', '--full-name', '--name-only',
'HEAD', local_file_basename, cwd=local_file_dir).replace('/','\\')
if not git_path:
return
git_root_path = local_filename.replace(git_path, '')
if repo not in REPO_MAP:
# Automatically adds the project coming from a git GoogleCode repository to
# the repository map. The files from these repositories are accessible via
# gitiles in a base64 encoded format.
if 'chromium.googlesource.com' in repo:
REPO_MAP[repo] = {
'url': '%s/+/{revision}/{file_path}?format=TEXT' % repo,
'base64': True
}
# Get the list of files coming from this repository.
git_file_list = RunCommand('git.bat', 'ls-tree', '--full-name', '--name-only',
'HEAD', '-r', cwd=git_root_path)
file_list = [x for x in git_file_list.splitlines() if len(x) != 0]
return RevisionInfo(repo=repo, rev=rev, files=file_list,
root_path=git_root_path, path_prefix=None)
def IndexFilesFromRepo(local_filename, file_list, output_lines):
"""Checks if a given file is a part of a revision control repository (svn or
git) and index all the files from this repository if it's the case.
Args:
local_filename: The filename of the current file.
file_list: The list of files that should be indexed.
output_lines: The source indexing lines that will be appended to the PDB.
Returns the number of indexed files.
"""
indexed_files = 0
# Try to extract the revision info for the current file.
info = ExtractGitInfo(local_filename)
if not info:
info = ExtractSVNInfo(local_filename)
repo = info.repo
rev = info.rev
files = info.files
root_path = info.root_path.lower()
# Checks if we should index this file and if the source that we'll retrieve
# will be base64 encoded.
should_index = False
base_64 = False
if repo in REPO_MAP:
should_index = True
base_64 = REPO_MAP[repo].get('base64')
else:
repo = None
# Iterates over the files from this repo and index them if needed.
for file_iter in files:
current_filename = file_iter.lower()
full_file_path = os.path.normpath(os.path.join(root_path, current_filename))
# Checks if the file is in the list of files to be indexed.
if full_file_path in file_list:
if should_index:
source_url = ''
current_file = file_iter
# Prefix the filename with the prefix for this repository if needed.
if info.path_prefix:
current_file = os.path.join(info.path_prefix, current_file)
source_url = REPO_MAP[repo].get('url').format(revision=rev,
file_path=os.path.normpath(current_file).replace('\\', '/'))
output_lines.append('%s*%s*%s*%s*%s' % (full_file_path, current_file,
rev, source_url, 'base64.b64decode' if base_64 else ''))
indexed_files += 1
file_list.remove(full_file_path)
# The input file should have been removed from the list of files to index.
if indexed_files and local_filename in file_list:
print '%s shouldn\'t be in the list of files to index anymore.' % \
local_filename
# TODO(sebmarchand): Turn this into an exception once I've confirmed that
# this doesn't happen on the official builder.
file_list.remove(local_filename)
return indexed_files
def DirectoryIsUnderPublicVersionControl(local_dir):
# Checks if this directory is from a Git checkout.
info = RunCommand('git.bat', 'config', '--get', 'remote.origin.url',
cwd=local_dir, raise_on_failure=False)
if info:
return True
# If not checks if it's from a SVN checkout.
info = GetSVNRepoInfo(local_dir)
if info:
return True
return False
def UpdatePDB(pdb_filename, verbose=True, build_dir=None, toolchain_dir=None):
"""Update a pdb file with source information."""
dir_blacklist = { }
if build_dir:
# Blacklisting the build directory allows skipping the generated files, for
# Chromium this makes the indexing ~10x faster.
build_dir = (os.path.normpath(build_dir)).lower()
for directory, _, _ in os.walk(build_dir):
dir_blacklist[directory.lower()] = True
dir_blacklist[build_dir.lower()] = True
if toolchain_dir:
# Blacklisting the directories from the toolchain as we don't have revision
# info for them.
toolchain_dir = (os.path.normpath(toolchain_dir)).lower()
for directory, _, _ in os.walk(build_dir):
dir_blacklist[directory.lower()] = True
dir_blacklist[toolchain_dir.lower()] = True
# Writes the header of the source index stream.
#
# Here's the description of the variables used in the SRC_* macros (those
# variables have to be defined for every source file that we want to index):
# var1: The file path.
# var2: The name of the file without its path.
# var3: The revision or the hash of this file's repository.
# var4: The URL to this file.
# var5: (optional) The python method to call to decode this file, e.g. for
# a base64 encoded file this value should be 'base64.b64decode'.
lines = [
'SRCSRV: ini ------------------------------------------------',
'VERSION=1',
'INDEXVERSION=2',
'VERCTRL=Subversion',
'DATETIME=%s' % time.asctime(),
'SRCSRV: variables ------------------------------------------',
'SRC_EXTRACT_TARGET_DIR=%targ%\%fnbksl%(%var2%)\%var3%',
'SRC_EXTRACT_TARGET=%SRC_EXTRACT_TARGET_DIR%\%fnfile%(%var1%)',
'SRC_EXTRACT_CMD=cmd /c "mkdir "%SRC_EXTRACT_TARGET_DIR%" & python -c '
'"import urllib2, base64;'
'url = \\\"%var4%\\\";'
'u = urllib2.urlopen(url);'
'print %var5%(u.read());" > "%SRC_EXTRACT_TARGET%""',
'SRCSRVTRG=%SRC_EXTRACT_TARGET%',
'SRCSRVCMD=%SRC_EXTRACT_CMD%',
'SRCSRV: source files ---------------------------------------',
]
if ReadSourceStream(pdb_filename):
raise Exception("PDB already has source indexing information!")
filelist = ExtractSourceFiles(pdb_filename)
number_of_files = len(filelist)
indexed_files_total = 0
while filelist:
filename = next(iter(filelist))
filedir = os.path.dirname(filename)
if verbose:
print "[%d / %d] Processing: %s" % (number_of_files - len(filelist),
number_of_files, filename)
# This directory is blacklisted, either because it's not part of a
# repository, or from one we're not interested in indexing.
if dir_blacklist.get(filedir, False):
if verbose:
print " skipping, directory is blacklisted."
filelist.remove(filename)
continue
# Skip the files that don't exist on the current machine.
if not os.path.exists(filename):
filelist.remove(filename)
continue
# Try to index the current file and all the ones coming from the same
# repository.
indexed_files = IndexFilesFromRepo(filename, filelist, lines)
if not indexed_files:
if not DirectoryIsUnderPublicVersionControl(filedir):
dir_blacklist[filedir] = True
if verbose:
print "Adding %s to the blacklist." % filedir
filelist.remove(filename)
continue
indexed_files_total += indexed_files
if verbose:
print " %d files have been indexed." % indexed_files
lines.append('SRCSRV: end ------------------------------------------------')
WriteSourceStream(pdb_filename, '\r\n'.join(lines))
if verbose:
print "%d / %d files have been indexed." % (indexed_files_total,
number_of_files)
def main():
parser = optparse.OptionParser()
parser.add_option('-v', '--verbose', action='store_true', default=False)
parser.add_option('--build-dir', help='The original build directory, if set '
'all the files present in this directory (or one of its subdirectories) '
'will be skipped.')
parser.add_option('--toolchain-dir', help='The directory containing the '
'toolchain that has been used for this build. If set all the files '
'present in this directory (or one of its subdirectories) will be '
'skipped.')
options, args = parser.parse_args()
if not args:
parser.error('Specify a pdb')
for pdb in args:
UpdatePDB(pdb, options.verbose, options.build_dir)
return 0
if __name__ == '__main__':
sys.exit(main())
| mit |
apache/airflow | airflow/contrib/task_runner/cgroup_task_runner.py | 2 | 1134 | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""This module is deprecated. Please use :mod:`airflow.task.task_runner.cgroup_task_runner`."""
import warnings
from airflow.task.task_runner.cgroup_task_runner import CgroupTaskRunner # noqa
warnings.warn(
"This module is deprecated. Please use `airflow.task.task_runner.cgroup_task_runner`.",
DeprecationWarning,
stacklevel=2,
)
| apache-2.0 |
saisaizhang/Food | flask/lib/python2.7/site-packages/setuptools/command/test.py | 16 | 6469 | from distutils.errors import DistutilsOptionError
from unittest import TestLoader
import unittest
import sys
from pkg_resources import (resource_listdir, resource_exists, normalize_path,
working_set, _namespace_packages,
add_activation_listener, require, EntryPoint)
from setuptools import Command
from setuptools.compat import PY3
from setuptools.py31compat import unittest_main
class ScanningLoader(TestLoader):
def loadTestsFromModule(self, module):
"""Return a suite of all tests cases contained in the given module
If the module is a package, load tests from all the modules in it.
If the module has an ``additional_tests`` function, call it and add
the return value to the tests.
"""
tests = []
tests.append(TestLoader.loadTestsFromModule(self, module))
if hasattr(module, "additional_tests"):
tests.append(module.additional_tests())
if hasattr(module, '__path__'):
for file in resource_listdir(module.__name__, ''):
if file.endswith('.py') and file != '__init__.py':
submodule = module.__name__ + '.' + file[:-3]
else:
if resource_exists(module.__name__, file + '/__init__.py'):
submodule = module.__name__ + '.' + file
else:
continue
tests.append(self.loadTestsFromName(submodule))
if len(tests) != 1:
return self.suiteClass(tests)
else:
return tests[0] # don't create a nested suite for only one return
class test(Command):
"""Command to run unit tests after in-place build"""
description = "run unit tests after in-place build"
user_options = [
('test-module=', 'm', "Run 'test_suite' in specified module"),
('test-suite=', 's',
"Test suite to run (e.g. 'some_module.test_suite')"),
('test-runner=', 'r', "Test runner to use"),
]
def initialize_options(self):
self.test_suite = None
self.test_module = None
self.test_loader = None
self.test_runner = None
def finalize_options(self):
if self.test_suite is None:
if self.test_module is None:
self.test_suite = self.distribution.test_suite
else:
self.test_suite = self.test_module + ".test_suite"
elif self.test_module:
raise DistutilsOptionError(
"You may specify a module or a suite, but not both"
)
self.test_args = [self.test_suite]
if self.verbose:
self.test_args.insert(0, '--verbose')
if self.test_loader is None:
self.test_loader = getattr(self.distribution, 'test_loader', None)
if self.test_loader is None:
self.test_loader = "setuptools.command.test:ScanningLoader"
if self.test_runner is None:
self.test_runner = getattr(self.distribution, 'test_runner', None)
def with_project_on_sys_path(self, func):
with_2to3 = PY3 and getattr(self.distribution, 'use_2to3', False)
if with_2to3:
# If we run 2to3 we can not do this inplace:
# Ensure metadata is up-to-date
self.reinitialize_command('build_py', inplace=0)
self.run_command('build_py')
bpy_cmd = self.get_finalized_command("build_py")
build_path = normalize_path(bpy_cmd.build_lib)
# Build extensions
self.reinitialize_command('egg_info', egg_base=build_path)
self.run_command('egg_info')
self.reinitialize_command('build_ext', inplace=0)
self.run_command('build_ext')
else:
# Without 2to3 inplace works fine:
self.run_command('egg_info')
# Build extensions in-place
self.reinitialize_command('build_ext', inplace=1)
self.run_command('build_ext')
ei_cmd = self.get_finalized_command("egg_info")
old_path = sys.path[:]
old_modules = sys.modules.copy()
try:
sys.path.insert(0, normalize_path(ei_cmd.egg_base))
working_set.__init__()
add_activation_listener(lambda dist: dist.activate())
require('%s==%s' % (ei_cmd.egg_name, ei_cmd.egg_version))
func()
finally:
sys.path[:] = old_path
sys.modules.clear()
sys.modules.update(old_modules)
working_set.__init__()
def run(self):
if self.distribution.install_requires:
self.distribution.fetch_build_eggs(
self.distribution.install_requires)
if self.distribution.tests_require:
self.distribution.fetch_build_eggs(self.distribution.tests_require)
if self.test_suite:
cmd = ' '.join(self.test_args)
if self.dry_run:
self.announce('skipping "unittest %s" (dry run)' % cmd)
else:
self.announce('running "unittest %s"' % cmd)
self.with_project_on_sys_path(self.run_tests)
def run_tests(self):
# Purge modules under test from sys.modules. The test loader will
# re-import them from the build location. Required when 2to3 is used
# with namespace packages.
if PY3 and getattr(self.distribution, 'use_2to3', False):
module = self.test_args[-1].split('.')[0]
if module in _namespace_packages:
del_modules = []
if module in sys.modules:
del_modules.append(module)
module += '.'
for name in sys.modules:
if name.startswith(module):
del_modules.append(name)
list(map(sys.modules.__delitem__, del_modules))
unittest_main(
None, None, [unittest.__file__] + self.test_args,
testLoader=self._resolve_as_ep(self.test_loader),
testRunner=self._resolve_as_ep(self.test_runner),
)
@staticmethod
def _resolve_as_ep(val):
"""
Load the indicated attribute value, called, as a as if it were
specified as an entry point.
"""
if val is None:
return
parsed = EntryPoint.parse("x=" + val)
return parsed._load()()
| bsd-3-clause |
linktlh/Toontown-journey | toontown/fishing/DistributedFishingPondAI.py | 5 | 1691 | from direct.directnotify.DirectNotifyGlobal import *
from direct.distributed.DistributedObjectAI import DistributedObjectAI
from toontown.fishing import FishingTargetGlobals
from toontown.fishing.DistributedFishingTargetAI import DistributedFishingTargetAI
class DistributedFishingPondAI(DistributedObjectAI):
notify = directNotify.newCategory("DistributedFishingPondAI")
def __init__(self, air):
DistributedObjectAI.__init__(self, air)
self.area = None
self.targets = {}
self.spots = {}
self.bingoMgr = None
def start(self):
for _ in xrange(FishingTargetGlobals.getNumTargets(self.area)):
fishingTarget = DistributedFishingTargetAI(simbase.air)
fishingTarget.setPondDoId(self.doId)
fishingTarget.generateWithRequired(self.zoneId)
def hitTarget(self, target):
avId = self.air.getAvatarIdFromSender()
if self.targets.get(target) is None:
self.air.writeServerEvent('suspicious', avId, 'Toon tried to hit nonexistent fishing target!')
return
spot = self.hasToon(avId)
if spot:
spot.rewardIfValid(target)
return
self.air.writeServerEvent('suspicious', avId, 'Toon tried to catch fish while not fishing!')
def addTarget(self, target):
self.targets[target.doId] = target
def addSpot(self, spot):
self.spots[spot.doId] = spot
def setArea(self, area):
self.area = area
def getArea(self):
return self.area
def hasToon(self, avId):
for spot in self.spots:
if self.spots[spot].avId == avId:
return self.spots[spot]
| apache-2.0 |
0Chencc/CTFCrackTools | Lib/encodings/iso8859_14.py | 593 | 13908 | """ Python Character Mapping Codec iso8859_14 generated from 'MAPPINGS/ISO8859/8859-14.TXT' with gencodec.py.
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_table)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_table)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.charmap_encode(input,self.errors,encoding_table)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
return codecs.charmap_decode(input,self.errors,decoding_table)[0]
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='iso8859-14',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
### Decoding Table
decoding_table = (
u'\x00' # 0x00 -> NULL
u'\x01' # 0x01 -> START OF HEADING
u'\x02' # 0x02 -> START OF TEXT
u'\x03' # 0x03 -> END OF TEXT
u'\x04' # 0x04 -> END OF TRANSMISSION
u'\x05' # 0x05 -> ENQUIRY
u'\x06' # 0x06 -> ACKNOWLEDGE
u'\x07' # 0x07 -> BELL
u'\x08' # 0x08 -> BACKSPACE
u'\t' # 0x09 -> HORIZONTAL TABULATION
u'\n' # 0x0A -> LINE FEED
u'\x0b' # 0x0B -> VERTICAL TABULATION
u'\x0c' # 0x0C -> FORM FEED
u'\r' # 0x0D -> CARRIAGE RETURN
u'\x0e' # 0x0E -> SHIFT OUT
u'\x0f' # 0x0F -> SHIFT IN
u'\x10' # 0x10 -> DATA LINK ESCAPE
u'\x11' # 0x11 -> DEVICE CONTROL ONE
u'\x12' # 0x12 -> DEVICE CONTROL TWO
u'\x13' # 0x13 -> DEVICE CONTROL THREE
u'\x14' # 0x14 -> DEVICE CONTROL FOUR
u'\x15' # 0x15 -> NEGATIVE ACKNOWLEDGE
u'\x16' # 0x16 -> SYNCHRONOUS IDLE
u'\x17' # 0x17 -> END OF TRANSMISSION BLOCK
u'\x18' # 0x18 -> CANCEL
u'\x19' # 0x19 -> END OF MEDIUM
u'\x1a' # 0x1A -> SUBSTITUTE
u'\x1b' # 0x1B -> ESCAPE
u'\x1c' # 0x1C -> FILE SEPARATOR
u'\x1d' # 0x1D -> GROUP SEPARATOR
u'\x1e' # 0x1E -> RECORD SEPARATOR
u'\x1f' # 0x1F -> UNIT SEPARATOR
u' ' # 0x20 -> SPACE
u'!' # 0x21 -> EXCLAMATION MARK
u'"' # 0x22 -> QUOTATION MARK
u'#' # 0x23 -> NUMBER SIGN
u'$' # 0x24 -> DOLLAR SIGN
u'%' # 0x25 -> PERCENT SIGN
u'&' # 0x26 -> AMPERSAND
u"'" # 0x27 -> APOSTROPHE
u'(' # 0x28 -> LEFT PARENTHESIS
u')' # 0x29 -> RIGHT PARENTHESIS
u'*' # 0x2A -> ASTERISK
u'+' # 0x2B -> PLUS SIGN
u',' # 0x2C -> COMMA
u'-' # 0x2D -> HYPHEN-MINUS
u'.' # 0x2E -> FULL STOP
u'/' # 0x2F -> SOLIDUS
u'0' # 0x30 -> DIGIT ZERO
u'1' # 0x31 -> DIGIT ONE
u'2' # 0x32 -> DIGIT TWO
u'3' # 0x33 -> DIGIT THREE
u'4' # 0x34 -> DIGIT FOUR
u'5' # 0x35 -> DIGIT FIVE
u'6' # 0x36 -> DIGIT SIX
u'7' # 0x37 -> DIGIT SEVEN
u'8' # 0x38 -> DIGIT EIGHT
u'9' # 0x39 -> DIGIT NINE
u':' # 0x3A -> COLON
u';' # 0x3B -> SEMICOLON
u'<' # 0x3C -> LESS-THAN SIGN
u'=' # 0x3D -> EQUALS SIGN
u'>' # 0x3E -> GREATER-THAN SIGN
u'?' # 0x3F -> QUESTION MARK
u'@' # 0x40 -> COMMERCIAL AT
u'A' # 0x41 -> LATIN CAPITAL LETTER A
u'B' # 0x42 -> LATIN CAPITAL LETTER B
u'C' # 0x43 -> LATIN CAPITAL LETTER C
u'D' # 0x44 -> LATIN CAPITAL LETTER D
u'E' # 0x45 -> LATIN CAPITAL LETTER E
u'F' # 0x46 -> LATIN CAPITAL LETTER F
u'G' # 0x47 -> LATIN CAPITAL LETTER G
u'H' # 0x48 -> LATIN CAPITAL LETTER H
u'I' # 0x49 -> LATIN CAPITAL LETTER I
u'J' # 0x4A -> LATIN CAPITAL LETTER J
u'K' # 0x4B -> LATIN CAPITAL LETTER K
u'L' # 0x4C -> LATIN CAPITAL LETTER L
u'M' # 0x4D -> LATIN CAPITAL LETTER M
u'N' # 0x4E -> LATIN CAPITAL LETTER N
u'O' # 0x4F -> LATIN CAPITAL LETTER O
u'P' # 0x50 -> LATIN CAPITAL LETTER P
u'Q' # 0x51 -> LATIN CAPITAL LETTER Q
u'R' # 0x52 -> LATIN CAPITAL LETTER R
u'S' # 0x53 -> LATIN CAPITAL LETTER S
u'T' # 0x54 -> LATIN CAPITAL LETTER T
u'U' # 0x55 -> LATIN CAPITAL LETTER U
u'V' # 0x56 -> LATIN CAPITAL LETTER V
u'W' # 0x57 -> LATIN CAPITAL LETTER W
u'X' # 0x58 -> LATIN CAPITAL LETTER X
u'Y' # 0x59 -> LATIN CAPITAL LETTER Y
u'Z' # 0x5A -> LATIN CAPITAL LETTER Z
u'[' # 0x5B -> LEFT SQUARE BRACKET
u'\\' # 0x5C -> REVERSE SOLIDUS
u']' # 0x5D -> RIGHT SQUARE BRACKET
u'^' # 0x5E -> CIRCUMFLEX ACCENT
u'_' # 0x5F -> LOW LINE
u'`' # 0x60 -> GRAVE ACCENT
u'a' # 0x61 -> LATIN SMALL LETTER A
u'b' # 0x62 -> LATIN SMALL LETTER B
u'c' # 0x63 -> LATIN SMALL LETTER C
u'd' # 0x64 -> LATIN SMALL LETTER D
u'e' # 0x65 -> LATIN SMALL LETTER E
u'f' # 0x66 -> LATIN SMALL LETTER F
u'g' # 0x67 -> LATIN SMALL LETTER G
u'h' # 0x68 -> LATIN SMALL LETTER H
u'i' # 0x69 -> LATIN SMALL LETTER I
u'j' # 0x6A -> LATIN SMALL LETTER J
u'k' # 0x6B -> LATIN SMALL LETTER K
u'l' # 0x6C -> LATIN SMALL LETTER L
u'm' # 0x6D -> LATIN SMALL LETTER M
u'n' # 0x6E -> LATIN SMALL LETTER N
u'o' # 0x6F -> LATIN SMALL LETTER O
u'p' # 0x70 -> LATIN SMALL LETTER P
u'q' # 0x71 -> LATIN SMALL LETTER Q
u'r' # 0x72 -> LATIN SMALL LETTER R
u's' # 0x73 -> LATIN SMALL LETTER S
u't' # 0x74 -> LATIN SMALL LETTER T
u'u' # 0x75 -> LATIN SMALL LETTER U
u'v' # 0x76 -> LATIN SMALL LETTER V
u'w' # 0x77 -> LATIN SMALL LETTER W
u'x' # 0x78 -> LATIN SMALL LETTER X
u'y' # 0x79 -> LATIN SMALL LETTER Y
u'z' # 0x7A -> LATIN SMALL LETTER Z
u'{' # 0x7B -> LEFT CURLY BRACKET
u'|' # 0x7C -> VERTICAL LINE
u'}' # 0x7D -> RIGHT CURLY BRACKET
u'~' # 0x7E -> TILDE
u'\x7f' # 0x7F -> DELETE
u'\x80' # 0x80 -> <control>
u'\x81' # 0x81 -> <control>
u'\x82' # 0x82 -> <control>
u'\x83' # 0x83 -> <control>
u'\x84' # 0x84 -> <control>
u'\x85' # 0x85 -> <control>
u'\x86' # 0x86 -> <control>
u'\x87' # 0x87 -> <control>
u'\x88' # 0x88 -> <control>
u'\x89' # 0x89 -> <control>
u'\x8a' # 0x8A -> <control>
u'\x8b' # 0x8B -> <control>
u'\x8c' # 0x8C -> <control>
u'\x8d' # 0x8D -> <control>
u'\x8e' # 0x8E -> <control>
u'\x8f' # 0x8F -> <control>
u'\x90' # 0x90 -> <control>
u'\x91' # 0x91 -> <control>
u'\x92' # 0x92 -> <control>
u'\x93' # 0x93 -> <control>
u'\x94' # 0x94 -> <control>
u'\x95' # 0x95 -> <control>
u'\x96' # 0x96 -> <control>
u'\x97' # 0x97 -> <control>
u'\x98' # 0x98 -> <control>
u'\x99' # 0x99 -> <control>
u'\x9a' # 0x9A -> <control>
u'\x9b' # 0x9B -> <control>
u'\x9c' # 0x9C -> <control>
u'\x9d' # 0x9D -> <control>
u'\x9e' # 0x9E -> <control>
u'\x9f' # 0x9F -> <control>
u'\xa0' # 0xA0 -> NO-BREAK SPACE
u'\u1e02' # 0xA1 -> LATIN CAPITAL LETTER B WITH DOT ABOVE
u'\u1e03' # 0xA2 -> LATIN SMALL LETTER B WITH DOT ABOVE
u'\xa3' # 0xA3 -> POUND SIGN
u'\u010a' # 0xA4 -> LATIN CAPITAL LETTER C WITH DOT ABOVE
u'\u010b' # 0xA5 -> LATIN SMALL LETTER C WITH DOT ABOVE
u'\u1e0a' # 0xA6 -> LATIN CAPITAL LETTER D WITH DOT ABOVE
u'\xa7' # 0xA7 -> SECTION SIGN
u'\u1e80' # 0xA8 -> LATIN CAPITAL LETTER W WITH GRAVE
u'\xa9' # 0xA9 -> COPYRIGHT SIGN
u'\u1e82' # 0xAA -> LATIN CAPITAL LETTER W WITH ACUTE
u'\u1e0b' # 0xAB -> LATIN SMALL LETTER D WITH DOT ABOVE
u'\u1ef2' # 0xAC -> LATIN CAPITAL LETTER Y WITH GRAVE
u'\xad' # 0xAD -> SOFT HYPHEN
u'\xae' # 0xAE -> REGISTERED SIGN
u'\u0178' # 0xAF -> LATIN CAPITAL LETTER Y WITH DIAERESIS
u'\u1e1e' # 0xB0 -> LATIN CAPITAL LETTER F WITH DOT ABOVE
u'\u1e1f' # 0xB1 -> LATIN SMALL LETTER F WITH DOT ABOVE
u'\u0120' # 0xB2 -> LATIN CAPITAL LETTER G WITH DOT ABOVE
u'\u0121' # 0xB3 -> LATIN SMALL LETTER G WITH DOT ABOVE
u'\u1e40' # 0xB4 -> LATIN CAPITAL LETTER M WITH DOT ABOVE
u'\u1e41' # 0xB5 -> LATIN SMALL LETTER M WITH DOT ABOVE
u'\xb6' # 0xB6 -> PILCROW SIGN
u'\u1e56' # 0xB7 -> LATIN CAPITAL LETTER P WITH DOT ABOVE
u'\u1e81' # 0xB8 -> LATIN SMALL LETTER W WITH GRAVE
u'\u1e57' # 0xB9 -> LATIN SMALL LETTER P WITH DOT ABOVE
u'\u1e83' # 0xBA -> LATIN SMALL LETTER W WITH ACUTE
u'\u1e60' # 0xBB -> LATIN CAPITAL LETTER S WITH DOT ABOVE
u'\u1ef3' # 0xBC -> LATIN SMALL LETTER Y WITH GRAVE
u'\u1e84' # 0xBD -> LATIN CAPITAL LETTER W WITH DIAERESIS
u'\u1e85' # 0xBE -> LATIN SMALL LETTER W WITH DIAERESIS
u'\u1e61' # 0xBF -> LATIN SMALL LETTER S WITH DOT ABOVE
u'\xc0' # 0xC0 -> LATIN CAPITAL LETTER A WITH GRAVE
u'\xc1' # 0xC1 -> LATIN CAPITAL LETTER A WITH ACUTE
u'\xc2' # 0xC2 -> LATIN CAPITAL LETTER A WITH CIRCUMFLEX
u'\xc3' # 0xC3 -> LATIN CAPITAL LETTER A WITH TILDE
u'\xc4' # 0xC4 -> LATIN CAPITAL LETTER A WITH DIAERESIS
u'\xc5' # 0xC5 -> LATIN CAPITAL LETTER A WITH RING ABOVE
u'\xc6' # 0xC6 -> LATIN CAPITAL LETTER AE
u'\xc7' # 0xC7 -> LATIN CAPITAL LETTER C WITH CEDILLA
u'\xc8' # 0xC8 -> LATIN CAPITAL LETTER E WITH GRAVE
u'\xc9' # 0xC9 -> LATIN CAPITAL LETTER E WITH ACUTE
u'\xca' # 0xCA -> LATIN CAPITAL LETTER E WITH CIRCUMFLEX
u'\xcb' # 0xCB -> LATIN CAPITAL LETTER E WITH DIAERESIS
u'\xcc' # 0xCC -> LATIN CAPITAL LETTER I WITH GRAVE
u'\xcd' # 0xCD -> LATIN CAPITAL LETTER I WITH ACUTE
u'\xce' # 0xCE -> LATIN CAPITAL LETTER I WITH CIRCUMFLEX
u'\xcf' # 0xCF -> LATIN CAPITAL LETTER I WITH DIAERESIS
u'\u0174' # 0xD0 -> LATIN CAPITAL LETTER W WITH CIRCUMFLEX
u'\xd1' # 0xD1 -> LATIN CAPITAL LETTER N WITH TILDE
u'\xd2' # 0xD2 -> LATIN CAPITAL LETTER O WITH GRAVE
u'\xd3' # 0xD3 -> LATIN CAPITAL LETTER O WITH ACUTE
u'\xd4' # 0xD4 -> LATIN CAPITAL LETTER O WITH CIRCUMFLEX
u'\xd5' # 0xD5 -> LATIN CAPITAL LETTER O WITH TILDE
u'\xd6' # 0xD6 -> LATIN CAPITAL LETTER O WITH DIAERESIS
u'\u1e6a' # 0xD7 -> LATIN CAPITAL LETTER T WITH DOT ABOVE
u'\xd8' # 0xD8 -> LATIN CAPITAL LETTER O WITH STROKE
u'\xd9' # 0xD9 -> LATIN CAPITAL LETTER U WITH GRAVE
u'\xda' # 0xDA -> LATIN CAPITAL LETTER U WITH ACUTE
u'\xdb' # 0xDB -> LATIN CAPITAL LETTER U WITH CIRCUMFLEX
u'\xdc' # 0xDC -> LATIN CAPITAL LETTER U WITH DIAERESIS
u'\xdd' # 0xDD -> LATIN CAPITAL LETTER Y WITH ACUTE
u'\u0176' # 0xDE -> LATIN CAPITAL LETTER Y WITH CIRCUMFLEX
u'\xdf' # 0xDF -> LATIN SMALL LETTER SHARP S
u'\xe0' # 0xE0 -> LATIN SMALL LETTER A WITH GRAVE
u'\xe1' # 0xE1 -> LATIN SMALL LETTER A WITH ACUTE
u'\xe2' # 0xE2 -> LATIN SMALL LETTER A WITH CIRCUMFLEX
u'\xe3' # 0xE3 -> LATIN SMALL LETTER A WITH TILDE
u'\xe4' # 0xE4 -> LATIN SMALL LETTER A WITH DIAERESIS
u'\xe5' # 0xE5 -> LATIN SMALL LETTER A WITH RING ABOVE
u'\xe6' # 0xE6 -> LATIN SMALL LETTER AE
u'\xe7' # 0xE7 -> LATIN SMALL LETTER C WITH CEDILLA
u'\xe8' # 0xE8 -> LATIN SMALL LETTER E WITH GRAVE
u'\xe9' # 0xE9 -> LATIN SMALL LETTER E WITH ACUTE
u'\xea' # 0xEA -> LATIN SMALL LETTER E WITH CIRCUMFLEX
u'\xeb' # 0xEB -> LATIN SMALL LETTER E WITH DIAERESIS
u'\xec' # 0xEC -> LATIN SMALL LETTER I WITH GRAVE
u'\xed' # 0xED -> LATIN SMALL LETTER I WITH ACUTE
u'\xee' # 0xEE -> LATIN SMALL LETTER I WITH CIRCUMFLEX
u'\xef' # 0xEF -> LATIN SMALL LETTER I WITH DIAERESIS
u'\u0175' # 0xF0 -> LATIN SMALL LETTER W WITH CIRCUMFLEX
u'\xf1' # 0xF1 -> LATIN SMALL LETTER N WITH TILDE
u'\xf2' # 0xF2 -> LATIN SMALL LETTER O WITH GRAVE
u'\xf3' # 0xF3 -> LATIN SMALL LETTER O WITH ACUTE
u'\xf4' # 0xF4 -> LATIN SMALL LETTER O WITH CIRCUMFLEX
u'\xf5' # 0xF5 -> LATIN SMALL LETTER O WITH TILDE
u'\xf6' # 0xF6 -> LATIN SMALL LETTER O WITH DIAERESIS
u'\u1e6b' # 0xF7 -> LATIN SMALL LETTER T WITH DOT ABOVE
u'\xf8' # 0xF8 -> LATIN SMALL LETTER O WITH STROKE
u'\xf9' # 0xF9 -> LATIN SMALL LETTER U WITH GRAVE
u'\xfa' # 0xFA -> LATIN SMALL LETTER U WITH ACUTE
u'\xfb' # 0xFB -> LATIN SMALL LETTER U WITH CIRCUMFLEX
u'\xfc' # 0xFC -> LATIN SMALL LETTER U WITH DIAERESIS
u'\xfd' # 0xFD -> LATIN SMALL LETTER Y WITH ACUTE
u'\u0177' # 0xFE -> LATIN SMALL LETTER Y WITH CIRCUMFLEX
u'\xff' # 0xFF -> LATIN SMALL LETTER Y WITH DIAERESIS
)
### Encoding table
encoding_table=codecs.charmap_build(decoding_table)
| gpl-3.0 |
michalliu/OpenWrt-Firefly-Libraries | staging_dir/host/lib/python3.4/encodings/cp775.py | 272 | 34476 | """ Python Character Mapping Codec cp775 generated from 'VENDORS/MICSFT/PC/CP775.TXT' with gencodec.py.
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_map)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_table)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.charmap_encode(input,self.errors,encoding_map)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
return codecs.charmap_decode(input,self.errors,decoding_table)[0]
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='cp775',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
### Decoding Map
decoding_map = codecs.make_identity_dict(range(256))
decoding_map.update({
0x0080: 0x0106, # LATIN CAPITAL LETTER C WITH ACUTE
0x0081: 0x00fc, # LATIN SMALL LETTER U WITH DIAERESIS
0x0082: 0x00e9, # LATIN SMALL LETTER E WITH ACUTE
0x0083: 0x0101, # LATIN SMALL LETTER A WITH MACRON
0x0084: 0x00e4, # LATIN SMALL LETTER A WITH DIAERESIS
0x0085: 0x0123, # LATIN SMALL LETTER G WITH CEDILLA
0x0086: 0x00e5, # LATIN SMALL LETTER A WITH RING ABOVE
0x0087: 0x0107, # LATIN SMALL LETTER C WITH ACUTE
0x0088: 0x0142, # LATIN SMALL LETTER L WITH STROKE
0x0089: 0x0113, # LATIN SMALL LETTER E WITH MACRON
0x008a: 0x0156, # LATIN CAPITAL LETTER R WITH CEDILLA
0x008b: 0x0157, # LATIN SMALL LETTER R WITH CEDILLA
0x008c: 0x012b, # LATIN SMALL LETTER I WITH MACRON
0x008d: 0x0179, # LATIN CAPITAL LETTER Z WITH ACUTE
0x008e: 0x00c4, # LATIN CAPITAL LETTER A WITH DIAERESIS
0x008f: 0x00c5, # LATIN CAPITAL LETTER A WITH RING ABOVE
0x0090: 0x00c9, # LATIN CAPITAL LETTER E WITH ACUTE
0x0091: 0x00e6, # LATIN SMALL LIGATURE AE
0x0092: 0x00c6, # LATIN CAPITAL LIGATURE AE
0x0093: 0x014d, # LATIN SMALL LETTER O WITH MACRON
0x0094: 0x00f6, # LATIN SMALL LETTER O WITH DIAERESIS
0x0095: 0x0122, # LATIN CAPITAL LETTER G WITH CEDILLA
0x0096: 0x00a2, # CENT SIGN
0x0097: 0x015a, # LATIN CAPITAL LETTER S WITH ACUTE
0x0098: 0x015b, # LATIN SMALL LETTER S WITH ACUTE
0x0099: 0x00d6, # LATIN CAPITAL LETTER O WITH DIAERESIS
0x009a: 0x00dc, # LATIN CAPITAL LETTER U WITH DIAERESIS
0x009b: 0x00f8, # LATIN SMALL LETTER O WITH STROKE
0x009c: 0x00a3, # POUND SIGN
0x009d: 0x00d8, # LATIN CAPITAL LETTER O WITH STROKE
0x009e: 0x00d7, # MULTIPLICATION SIGN
0x009f: 0x00a4, # CURRENCY SIGN
0x00a0: 0x0100, # LATIN CAPITAL LETTER A WITH MACRON
0x00a1: 0x012a, # LATIN CAPITAL LETTER I WITH MACRON
0x00a2: 0x00f3, # LATIN SMALL LETTER O WITH ACUTE
0x00a3: 0x017b, # LATIN CAPITAL LETTER Z WITH DOT ABOVE
0x00a4: 0x017c, # LATIN SMALL LETTER Z WITH DOT ABOVE
0x00a5: 0x017a, # LATIN SMALL LETTER Z WITH ACUTE
0x00a6: 0x201d, # RIGHT DOUBLE QUOTATION MARK
0x00a7: 0x00a6, # BROKEN BAR
0x00a8: 0x00a9, # COPYRIGHT SIGN
0x00a9: 0x00ae, # REGISTERED SIGN
0x00aa: 0x00ac, # NOT SIGN
0x00ab: 0x00bd, # VULGAR FRACTION ONE HALF
0x00ac: 0x00bc, # VULGAR FRACTION ONE QUARTER
0x00ad: 0x0141, # LATIN CAPITAL LETTER L WITH STROKE
0x00ae: 0x00ab, # LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
0x00af: 0x00bb, # RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
0x00b0: 0x2591, # LIGHT SHADE
0x00b1: 0x2592, # MEDIUM SHADE
0x00b2: 0x2593, # DARK SHADE
0x00b3: 0x2502, # BOX DRAWINGS LIGHT VERTICAL
0x00b4: 0x2524, # BOX DRAWINGS LIGHT VERTICAL AND LEFT
0x00b5: 0x0104, # LATIN CAPITAL LETTER A WITH OGONEK
0x00b6: 0x010c, # LATIN CAPITAL LETTER C WITH CARON
0x00b7: 0x0118, # LATIN CAPITAL LETTER E WITH OGONEK
0x00b8: 0x0116, # LATIN CAPITAL LETTER E WITH DOT ABOVE
0x00b9: 0x2563, # BOX DRAWINGS DOUBLE VERTICAL AND LEFT
0x00ba: 0x2551, # BOX DRAWINGS DOUBLE VERTICAL
0x00bb: 0x2557, # BOX DRAWINGS DOUBLE DOWN AND LEFT
0x00bc: 0x255d, # BOX DRAWINGS DOUBLE UP AND LEFT
0x00bd: 0x012e, # LATIN CAPITAL LETTER I WITH OGONEK
0x00be: 0x0160, # LATIN CAPITAL LETTER S WITH CARON
0x00bf: 0x2510, # BOX DRAWINGS LIGHT DOWN AND LEFT
0x00c0: 0x2514, # BOX DRAWINGS LIGHT UP AND RIGHT
0x00c1: 0x2534, # BOX DRAWINGS LIGHT UP AND HORIZONTAL
0x00c2: 0x252c, # BOX DRAWINGS LIGHT DOWN AND HORIZONTAL
0x00c3: 0x251c, # BOX DRAWINGS LIGHT VERTICAL AND RIGHT
0x00c4: 0x2500, # BOX DRAWINGS LIGHT HORIZONTAL
0x00c5: 0x253c, # BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL
0x00c6: 0x0172, # LATIN CAPITAL LETTER U WITH OGONEK
0x00c7: 0x016a, # LATIN CAPITAL LETTER U WITH MACRON
0x00c8: 0x255a, # BOX DRAWINGS DOUBLE UP AND RIGHT
0x00c9: 0x2554, # BOX DRAWINGS DOUBLE DOWN AND RIGHT
0x00ca: 0x2569, # BOX DRAWINGS DOUBLE UP AND HORIZONTAL
0x00cb: 0x2566, # BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL
0x00cc: 0x2560, # BOX DRAWINGS DOUBLE VERTICAL AND RIGHT
0x00cd: 0x2550, # BOX DRAWINGS DOUBLE HORIZONTAL
0x00ce: 0x256c, # BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL
0x00cf: 0x017d, # LATIN CAPITAL LETTER Z WITH CARON
0x00d0: 0x0105, # LATIN SMALL LETTER A WITH OGONEK
0x00d1: 0x010d, # LATIN SMALL LETTER C WITH CARON
0x00d2: 0x0119, # LATIN SMALL LETTER E WITH OGONEK
0x00d3: 0x0117, # LATIN SMALL LETTER E WITH DOT ABOVE
0x00d4: 0x012f, # LATIN SMALL LETTER I WITH OGONEK
0x00d5: 0x0161, # LATIN SMALL LETTER S WITH CARON
0x00d6: 0x0173, # LATIN SMALL LETTER U WITH OGONEK
0x00d7: 0x016b, # LATIN SMALL LETTER U WITH MACRON
0x00d8: 0x017e, # LATIN SMALL LETTER Z WITH CARON
0x00d9: 0x2518, # BOX DRAWINGS LIGHT UP AND LEFT
0x00da: 0x250c, # BOX DRAWINGS LIGHT DOWN AND RIGHT
0x00db: 0x2588, # FULL BLOCK
0x00dc: 0x2584, # LOWER HALF BLOCK
0x00dd: 0x258c, # LEFT HALF BLOCK
0x00de: 0x2590, # RIGHT HALF BLOCK
0x00df: 0x2580, # UPPER HALF BLOCK
0x00e0: 0x00d3, # LATIN CAPITAL LETTER O WITH ACUTE
0x00e1: 0x00df, # LATIN SMALL LETTER SHARP S (GERMAN)
0x00e2: 0x014c, # LATIN CAPITAL LETTER O WITH MACRON
0x00e3: 0x0143, # LATIN CAPITAL LETTER N WITH ACUTE
0x00e4: 0x00f5, # LATIN SMALL LETTER O WITH TILDE
0x00e5: 0x00d5, # LATIN CAPITAL LETTER O WITH TILDE
0x00e6: 0x00b5, # MICRO SIGN
0x00e7: 0x0144, # LATIN SMALL LETTER N WITH ACUTE
0x00e8: 0x0136, # LATIN CAPITAL LETTER K WITH CEDILLA
0x00e9: 0x0137, # LATIN SMALL LETTER K WITH CEDILLA
0x00ea: 0x013b, # LATIN CAPITAL LETTER L WITH CEDILLA
0x00eb: 0x013c, # LATIN SMALL LETTER L WITH CEDILLA
0x00ec: 0x0146, # LATIN SMALL LETTER N WITH CEDILLA
0x00ed: 0x0112, # LATIN CAPITAL LETTER E WITH MACRON
0x00ee: 0x0145, # LATIN CAPITAL LETTER N WITH CEDILLA
0x00ef: 0x2019, # RIGHT SINGLE QUOTATION MARK
0x00f0: 0x00ad, # SOFT HYPHEN
0x00f1: 0x00b1, # PLUS-MINUS SIGN
0x00f2: 0x201c, # LEFT DOUBLE QUOTATION MARK
0x00f3: 0x00be, # VULGAR FRACTION THREE QUARTERS
0x00f4: 0x00b6, # PILCROW SIGN
0x00f5: 0x00a7, # SECTION SIGN
0x00f6: 0x00f7, # DIVISION SIGN
0x00f7: 0x201e, # DOUBLE LOW-9 QUOTATION MARK
0x00f8: 0x00b0, # DEGREE SIGN
0x00f9: 0x2219, # BULLET OPERATOR
0x00fa: 0x00b7, # MIDDLE DOT
0x00fb: 0x00b9, # SUPERSCRIPT ONE
0x00fc: 0x00b3, # SUPERSCRIPT THREE
0x00fd: 0x00b2, # SUPERSCRIPT TWO
0x00fe: 0x25a0, # BLACK SQUARE
0x00ff: 0x00a0, # NO-BREAK SPACE
})
### Decoding Table
decoding_table = (
'\x00' # 0x0000 -> NULL
'\x01' # 0x0001 -> START OF HEADING
'\x02' # 0x0002 -> START OF TEXT
'\x03' # 0x0003 -> END OF TEXT
'\x04' # 0x0004 -> END OF TRANSMISSION
'\x05' # 0x0005 -> ENQUIRY
'\x06' # 0x0006 -> ACKNOWLEDGE
'\x07' # 0x0007 -> BELL
'\x08' # 0x0008 -> BACKSPACE
'\t' # 0x0009 -> HORIZONTAL TABULATION
'\n' # 0x000a -> LINE FEED
'\x0b' # 0x000b -> VERTICAL TABULATION
'\x0c' # 0x000c -> FORM FEED
'\r' # 0x000d -> CARRIAGE RETURN
'\x0e' # 0x000e -> SHIFT OUT
'\x0f' # 0x000f -> SHIFT IN
'\x10' # 0x0010 -> DATA LINK ESCAPE
'\x11' # 0x0011 -> DEVICE CONTROL ONE
'\x12' # 0x0012 -> DEVICE CONTROL TWO
'\x13' # 0x0013 -> DEVICE CONTROL THREE
'\x14' # 0x0014 -> DEVICE CONTROL FOUR
'\x15' # 0x0015 -> NEGATIVE ACKNOWLEDGE
'\x16' # 0x0016 -> SYNCHRONOUS IDLE
'\x17' # 0x0017 -> END OF TRANSMISSION BLOCK
'\x18' # 0x0018 -> CANCEL
'\x19' # 0x0019 -> END OF MEDIUM
'\x1a' # 0x001a -> SUBSTITUTE
'\x1b' # 0x001b -> ESCAPE
'\x1c' # 0x001c -> FILE SEPARATOR
'\x1d' # 0x001d -> GROUP SEPARATOR
'\x1e' # 0x001e -> RECORD SEPARATOR
'\x1f' # 0x001f -> UNIT SEPARATOR
' ' # 0x0020 -> SPACE
'!' # 0x0021 -> EXCLAMATION MARK
'"' # 0x0022 -> QUOTATION MARK
'#' # 0x0023 -> NUMBER SIGN
'$' # 0x0024 -> DOLLAR SIGN
'%' # 0x0025 -> PERCENT SIGN
'&' # 0x0026 -> AMPERSAND
"'" # 0x0027 -> APOSTROPHE
'(' # 0x0028 -> LEFT PARENTHESIS
')' # 0x0029 -> RIGHT PARENTHESIS
'*' # 0x002a -> ASTERISK
'+' # 0x002b -> PLUS SIGN
',' # 0x002c -> COMMA
'-' # 0x002d -> HYPHEN-MINUS
'.' # 0x002e -> FULL STOP
'/' # 0x002f -> SOLIDUS
'0' # 0x0030 -> DIGIT ZERO
'1' # 0x0031 -> DIGIT ONE
'2' # 0x0032 -> DIGIT TWO
'3' # 0x0033 -> DIGIT THREE
'4' # 0x0034 -> DIGIT FOUR
'5' # 0x0035 -> DIGIT FIVE
'6' # 0x0036 -> DIGIT SIX
'7' # 0x0037 -> DIGIT SEVEN
'8' # 0x0038 -> DIGIT EIGHT
'9' # 0x0039 -> DIGIT NINE
':' # 0x003a -> COLON
';' # 0x003b -> SEMICOLON
'<' # 0x003c -> LESS-THAN SIGN
'=' # 0x003d -> EQUALS SIGN
'>' # 0x003e -> GREATER-THAN SIGN
'?' # 0x003f -> QUESTION MARK
'@' # 0x0040 -> COMMERCIAL AT
'A' # 0x0041 -> LATIN CAPITAL LETTER A
'B' # 0x0042 -> LATIN CAPITAL LETTER B
'C' # 0x0043 -> LATIN CAPITAL LETTER C
'D' # 0x0044 -> LATIN CAPITAL LETTER D
'E' # 0x0045 -> LATIN CAPITAL LETTER E
'F' # 0x0046 -> LATIN CAPITAL LETTER F
'G' # 0x0047 -> LATIN CAPITAL LETTER G
'H' # 0x0048 -> LATIN CAPITAL LETTER H
'I' # 0x0049 -> LATIN CAPITAL LETTER I
'J' # 0x004a -> LATIN CAPITAL LETTER J
'K' # 0x004b -> LATIN CAPITAL LETTER K
'L' # 0x004c -> LATIN CAPITAL LETTER L
'M' # 0x004d -> LATIN CAPITAL LETTER M
'N' # 0x004e -> LATIN CAPITAL LETTER N
'O' # 0x004f -> LATIN CAPITAL LETTER O
'P' # 0x0050 -> LATIN CAPITAL LETTER P
'Q' # 0x0051 -> LATIN CAPITAL LETTER Q
'R' # 0x0052 -> LATIN CAPITAL LETTER R
'S' # 0x0053 -> LATIN CAPITAL LETTER S
'T' # 0x0054 -> LATIN CAPITAL LETTER T
'U' # 0x0055 -> LATIN CAPITAL LETTER U
'V' # 0x0056 -> LATIN CAPITAL LETTER V
'W' # 0x0057 -> LATIN CAPITAL LETTER W
'X' # 0x0058 -> LATIN CAPITAL LETTER X
'Y' # 0x0059 -> LATIN CAPITAL LETTER Y
'Z' # 0x005a -> LATIN CAPITAL LETTER Z
'[' # 0x005b -> LEFT SQUARE BRACKET
'\\' # 0x005c -> REVERSE SOLIDUS
']' # 0x005d -> RIGHT SQUARE BRACKET
'^' # 0x005e -> CIRCUMFLEX ACCENT
'_' # 0x005f -> LOW LINE
'`' # 0x0060 -> GRAVE ACCENT
'a' # 0x0061 -> LATIN SMALL LETTER A
'b' # 0x0062 -> LATIN SMALL LETTER B
'c' # 0x0063 -> LATIN SMALL LETTER C
'd' # 0x0064 -> LATIN SMALL LETTER D
'e' # 0x0065 -> LATIN SMALL LETTER E
'f' # 0x0066 -> LATIN SMALL LETTER F
'g' # 0x0067 -> LATIN SMALL LETTER G
'h' # 0x0068 -> LATIN SMALL LETTER H
'i' # 0x0069 -> LATIN SMALL LETTER I
'j' # 0x006a -> LATIN SMALL LETTER J
'k' # 0x006b -> LATIN SMALL LETTER K
'l' # 0x006c -> LATIN SMALL LETTER L
'm' # 0x006d -> LATIN SMALL LETTER M
'n' # 0x006e -> LATIN SMALL LETTER N
'o' # 0x006f -> LATIN SMALL LETTER O
'p' # 0x0070 -> LATIN SMALL LETTER P
'q' # 0x0071 -> LATIN SMALL LETTER Q
'r' # 0x0072 -> LATIN SMALL LETTER R
's' # 0x0073 -> LATIN SMALL LETTER S
't' # 0x0074 -> LATIN SMALL LETTER T
'u' # 0x0075 -> LATIN SMALL LETTER U
'v' # 0x0076 -> LATIN SMALL LETTER V
'w' # 0x0077 -> LATIN SMALL LETTER W
'x' # 0x0078 -> LATIN SMALL LETTER X
'y' # 0x0079 -> LATIN SMALL LETTER Y
'z' # 0x007a -> LATIN SMALL LETTER Z
'{' # 0x007b -> LEFT CURLY BRACKET
'|' # 0x007c -> VERTICAL LINE
'}' # 0x007d -> RIGHT CURLY BRACKET
'~' # 0x007e -> TILDE
'\x7f' # 0x007f -> DELETE
'\u0106' # 0x0080 -> LATIN CAPITAL LETTER C WITH ACUTE
'\xfc' # 0x0081 -> LATIN SMALL LETTER U WITH DIAERESIS
'\xe9' # 0x0082 -> LATIN SMALL LETTER E WITH ACUTE
'\u0101' # 0x0083 -> LATIN SMALL LETTER A WITH MACRON
'\xe4' # 0x0084 -> LATIN SMALL LETTER A WITH DIAERESIS
'\u0123' # 0x0085 -> LATIN SMALL LETTER G WITH CEDILLA
'\xe5' # 0x0086 -> LATIN SMALL LETTER A WITH RING ABOVE
'\u0107' # 0x0087 -> LATIN SMALL LETTER C WITH ACUTE
'\u0142' # 0x0088 -> LATIN SMALL LETTER L WITH STROKE
'\u0113' # 0x0089 -> LATIN SMALL LETTER E WITH MACRON
'\u0156' # 0x008a -> LATIN CAPITAL LETTER R WITH CEDILLA
'\u0157' # 0x008b -> LATIN SMALL LETTER R WITH CEDILLA
'\u012b' # 0x008c -> LATIN SMALL LETTER I WITH MACRON
'\u0179' # 0x008d -> LATIN CAPITAL LETTER Z WITH ACUTE
'\xc4' # 0x008e -> LATIN CAPITAL LETTER A WITH DIAERESIS
'\xc5' # 0x008f -> LATIN CAPITAL LETTER A WITH RING ABOVE
'\xc9' # 0x0090 -> LATIN CAPITAL LETTER E WITH ACUTE
'\xe6' # 0x0091 -> LATIN SMALL LIGATURE AE
'\xc6' # 0x0092 -> LATIN CAPITAL LIGATURE AE
'\u014d' # 0x0093 -> LATIN SMALL LETTER O WITH MACRON
'\xf6' # 0x0094 -> LATIN SMALL LETTER O WITH DIAERESIS
'\u0122' # 0x0095 -> LATIN CAPITAL LETTER G WITH CEDILLA
'\xa2' # 0x0096 -> CENT SIGN
'\u015a' # 0x0097 -> LATIN CAPITAL LETTER S WITH ACUTE
'\u015b' # 0x0098 -> LATIN SMALL LETTER S WITH ACUTE
'\xd6' # 0x0099 -> LATIN CAPITAL LETTER O WITH DIAERESIS
'\xdc' # 0x009a -> LATIN CAPITAL LETTER U WITH DIAERESIS
'\xf8' # 0x009b -> LATIN SMALL LETTER O WITH STROKE
'\xa3' # 0x009c -> POUND SIGN
'\xd8' # 0x009d -> LATIN CAPITAL LETTER O WITH STROKE
'\xd7' # 0x009e -> MULTIPLICATION SIGN
'\xa4' # 0x009f -> CURRENCY SIGN
'\u0100' # 0x00a0 -> LATIN CAPITAL LETTER A WITH MACRON
'\u012a' # 0x00a1 -> LATIN CAPITAL LETTER I WITH MACRON
'\xf3' # 0x00a2 -> LATIN SMALL LETTER O WITH ACUTE
'\u017b' # 0x00a3 -> LATIN CAPITAL LETTER Z WITH DOT ABOVE
'\u017c' # 0x00a4 -> LATIN SMALL LETTER Z WITH DOT ABOVE
'\u017a' # 0x00a5 -> LATIN SMALL LETTER Z WITH ACUTE
'\u201d' # 0x00a6 -> RIGHT DOUBLE QUOTATION MARK
'\xa6' # 0x00a7 -> BROKEN BAR
'\xa9' # 0x00a8 -> COPYRIGHT SIGN
'\xae' # 0x00a9 -> REGISTERED SIGN
'\xac' # 0x00aa -> NOT SIGN
'\xbd' # 0x00ab -> VULGAR FRACTION ONE HALF
'\xbc' # 0x00ac -> VULGAR FRACTION ONE QUARTER
'\u0141' # 0x00ad -> LATIN CAPITAL LETTER L WITH STROKE
'\xab' # 0x00ae -> LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
'\xbb' # 0x00af -> RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
'\u2591' # 0x00b0 -> LIGHT SHADE
'\u2592' # 0x00b1 -> MEDIUM SHADE
'\u2593' # 0x00b2 -> DARK SHADE
'\u2502' # 0x00b3 -> BOX DRAWINGS LIGHT VERTICAL
'\u2524' # 0x00b4 -> BOX DRAWINGS LIGHT VERTICAL AND LEFT
'\u0104' # 0x00b5 -> LATIN CAPITAL LETTER A WITH OGONEK
'\u010c' # 0x00b6 -> LATIN CAPITAL LETTER C WITH CARON
'\u0118' # 0x00b7 -> LATIN CAPITAL LETTER E WITH OGONEK
'\u0116' # 0x00b8 -> LATIN CAPITAL LETTER E WITH DOT ABOVE
'\u2563' # 0x00b9 -> BOX DRAWINGS DOUBLE VERTICAL AND LEFT
'\u2551' # 0x00ba -> BOX DRAWINGS DOUBLE VERTICAL
'\u2557' # 0x00bb -> BOX DRAWINGS DOUBLE DOWN AND LEFT
'\u255d' # 0x00bc -> BOX DRAWINGS DOUBLE UP AND LEFT
'\u012e' # 0x00bd -> LATIN CAPITAL LETTER I WITH OGONEK
'\u0160' # 0x00be -> LATIN CAPITAL LETTER S WITH CARON
'\u2510' # 0x00bf -> BOX DRAWINGS LIGHT DOWN AND LEFT
'\u2514' # 0x00c0 -> BOX DRAWINGS LIGHT UP AND RIGHT
'\u2534' # 0x00c1 -> BOX DRAWINGS LIGHT UP AND HORIZONTAL
'\u252c' # 0x00c2 -> BOX DRAWINGS LIGHT DOWN AND HORIZONTAL
'\u251c' # 0x00c3 -> BOX DRAWINGS LIGHT VERTICAL AND RIGHT
'\u2500' # 0x00c4 -> BOX DRAWINGS LIGHT HORIZONTAL
'\u253c' # 0x00c5 -> BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL
'\u0172' # 0x00c6 -> LATIN CAPITAL LETTER U WITH OGONEK
'\u016a' # 0x00c7 -> LATIN CAPITAL LETTER U WITH MACRON
'\u255a' # 0x00c8 -> BOX DRAWINGS DOUBLE UP AND RIGHT
'\u2554' # 0x00c9 -> BOX DRAWINGS DOUBLE DOWN AND RIGHT
'\u2569' # 0x00ca -> BOX DRAWINGS DOUBLE UP AND HORIZONTAL
'\u2566' # 0x00cb -> BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL
'\u2560' # 0x00cc -> BOX DRAWINGS DOUBLE VERTICAL AND RIGHT
'\u2550' # 0x00cd -> BOX DRAWINGS DOUBLE HORIZONTAL
'\u256c' # 0x00ce -> BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL
'\u017d' # 0x00cf -> LATIN CAPITAL LETTER Z WITH CARON
'\u0105' # 0x00d0 -> LATIN SMALL LETTER A WITH OGONEK
'\u010d' # 0x00d1 -> LATIN SMALL LETTER C WITH CARON
'\u0119' # 0x00d2 -> LATIN SMALL LETTER E WITH OGONEK
'\u0117' # 0x00d3 -> LATIN SMALL LETTER E WITH DOT ABOVE
'\u012f' # 0x00d4 -> LATIN SMALL LETTER I WITH OGONEK
'\u0161' # 0x00d5 -> LATIN SMALL LETTER S WITH CARON
'\u0173' # 0x00d6 -> LATIN SMALL LETTER U WITH OGONEK
'\u016b' # 0x00d7 -> LATIN SMALL LETTER U WITH MACRON
'\u017e' # 0x00d8 -> LATIN SMALL LETTER Z WITH CARON
'\u2518' # 0x00d9 -> BOX DRAWINGS LIGHT UP AND LEFT
'\u250c' # 0x00da -> BOX DRAWINGS LIGHT DOWN AND RIGHT
'\u2588' # 0x00db -> FULL BLOCK
'\u2584' # 0x00dc -> LOWER HALF BLOCK
'\u258c' # 0x00dd -> LEFT HALF BLOCK
'\u2590' # 0x00de -> RIGHT HALF BLOCK
'\u2580' # 0x00df -> UPPER HALF BLOCK
'\xd3' # 0x00e0 -> LATIN CAPITAL LETTER O WITH ACUTE
'\xdf' # 0x00e1 -> LATIN SMALL LETTER SHARP S (GERMAN)
'\u014c' # 0x00e2 -> LATIN CAPITAL LETTER O WITH MACRON
'\u0143' # 0x00e3 -> LATIN CAPITAL LETTER N WITH ACUTE
'\xf5' # 0x00e4 -> LATIN SMALL LETTER O WITH TILDE
'\xd5' # 0x00e5 -> LATIN CAPITAL LETTER O WITH TILDE
'\xb5' # 0x00e6 -> MICRO SIGN
'\u0144' # 0x00e7 -> LATIN SMALL LETTER N WITH ACUTE
'\u0136' # 0x00e8 -> LATIN CAPITAL LETTER K WITH CEDILLA
'\u0137' # 0x00e9 -> LATIN SMALL LETTER K WITH CEDILLA
'\u013b' # 0x00ea -> LATIN CAPITAL LETTER L WITH CEDILLA
'\u013c' # 0x00eb -> LATIN SMALL LETTER L WITH CEDILLA
'\u0146' # 0x00ec -> LATIN SMALL LETTER N WITH CEDILLA
'\u0112' # 0x00ed -> LATIN CAPITAL LETTER E WITH MACRON
'\u0145' # 0x00ee -> LATIN CAPITAL LETTER N WITH CEDILLA
'\u2019' # 0x00ef -> RIGHT SINGLE QUOTATION MARK
'\xad' # 0x00f0 -> SOFT HYPHEN
'\xb1' # 0x00f1 -> PLUS-MINUS SIGN
'\u201c' # 0x00f2 -> LEFT DOUBLE QUOTATION MARK
'\xbe' # 0x00f3 -> VULGAR FRACTION THREE QUARTERS
'\xb6' # 0x00f4 -> PILCROW SIGN
'\xa7' # 0x00f5 -> SECTION SIGN
'\xf7' # 0x00f6 -> DIVISION SIGN
'\u201e' # 0x00f7 -> DOUBLE LOW-9 QUOTATION MARK
'\xb0' # 0x00f8 -> DEGREE SIGN
'\u2219' # 0x00f9 -> BULLET OPERATOR
'\xb7' # 0x00fa -> MIDDLE DOT
'\xb9' # 0x00fb -> SUPERSCRIPT ONE
'\xb3' # 0x00fc -> SUPERSCRIPT THREE
'\xb2' # 0x00fd -> SUPERSCRIPT TWO
'\u25a0' # 0x00fe -> BLACK SQUARE
'\xa0' # 0x00ff -> NO-BREAK SPACE
)
### Encoding Map
encoding_map = {
0x0000: 0x0000, # NULL
0x0001: 0x0001, # START OF HEADING
0x0002: 0x0002, # START OF TEXT
0x0003: 0x0003, # END OF TEXT
0x0004: 0x0004, # END OF TRANSMISSION
0x0005: 0x0005, # ENQUIRY
0x0006: 0x0006, # ACKNOWLEDGE
0x0007: 0x0007, # BELL
0x0008: 0x0008, # BACKSPACE
0x0009: 0x0009, # HORIZONTAL TABULATION
0x000a: 0x000a, # LINE FEED
0x000b: 0x000b, # VERTICAL TABULATION
0x000c: 0x000c, # FORM FEED
0x000d: 0x000d, # CARRIAGE RETURN
0x000e: 0x000e, # SHIFT OUT
0x000f: 0x000f, # SHIFT IN
0x0010: 0x0010, # DATA LINK ESCAPE
0x0011: 0x0011, # DEVICE CONTROL ONE
0x0012: 0x0012, # DEVICE CONTROL TWO
0x0013: 0x0013, # DEVICE CONTROL THREE
0x0014: 0x0014, # DEVICE CONTROL FOUR
0x0015: 0x0015, # NEGATIVE ACKNOWLEDGE
0x0016: 0x0016, # SYNCHRONOUS IDLE
0x0017: 0x0017, # END OF TRANSMISSION BLOCK
0x0018: 0x0018, # CANCEL
0x0019: 0x0019, # END OF MEDIUM
0x001a: 0x001a, # SUBSTITUTE
0x001b: 0x001b, # ESCAPE
0x001c: 0x001c, # FILE SEPARATOR
0x001d: 0x001d, # GROUP SEPARATOR
0x001e: 0x001e, # RECORD SEPARATOR
0x001f: 0x001f, # UNIT SEPARATOR
0x0020: 0x0020, # SPACE
0x0021: 0x0021, # EXCLAMATION MARK
0x0022: 0x0022, # QUOTATION MARK
0x0023: 0x0023, # NUMBER SIGN
0x0024: 0x0024, # DOLLAR SIGN
0x0025: 0x0025, # PERCENT SIGN
0x0026: 0x0026, # AMPERSAND
0x0027: 0x0027, # APOSTROPHE
0x0028: 0x0028, # LEFT PARENTHESIS
0x0029: 0x0029, # RIGHT PARENTHESIS
0x002a: 0x002a, # ASTERISK
0x002b: 0x002b, # PLUS SIGN
0x002c: 0x002c, # COMMA
0x002d: 0x002d, # HYPHEN-MINUS
0x002e: 0x002e, # FULL STOP
0x002f: 0x002f, # SOLIDUS
0x0030: 0x0030, # DIGIT ZERO
0x0031: 0x0031, # DIGIT ONE
0x0032: 0x0032, # DIGIT TWO
0x0033: 0x0033, # DIGIT THREE
0x0034: 0x0034, # DIGIT FOUR
0x0035: 0x0035, # DIGIT FIVE
0x0036: 0x0036, # DIGIT SIX
0x0037: 0x0037, # DIGIT SEVEN
0x0038: 0x0038, # DIGIT EIGHT
0x0039: 0x0039, # DIGIT NINE
0x003a: 0x003a, # COLON
0x003b: 0x003b, # SEMICOLON
0x003c: 0x003c, # LESS-THAN SIGN
0x003d: 0x003d, # EQUALS SIGN
0x003e: 0x003e, # GREATER-THAN SIGN
0x003f: 0x003f, # QUESTION MARK
0x0040: 0x0040, # COMMERCIAL AT
0x0041: 0x0041, # LATIN CAPITAL LETTER A
0x0042: 0x0042, # LATIN CAPITAL LETTER B
0x0043: 0x0043, # LATIN CAPITAL LETTER C
0x0044: 0x0044, # LATIN CAPITAL LETTER D
0x0045: 0x0045, # LATIN CAPITAL LETTER E
0x0046: 0x0046, # LATIN CAPITAL LETTER F
0x0047: 0x0047, # LATIN CAPITAL LETTER G
0x0048: 0x0048, # LATIN CAPITAL LETTER H
0x0049: 0x0049, # LATIN CAPITAL LETTER I
0x004a: 0x004a, # LATIN CAPITAL LETTER J
0x004b: 0x004b, # LATIN CAPITAL LETTER K
0x004c: 0x004c, # LATIN CAPITAL LETTER L
0x004d: 0x004d, # LATIN CAPITAL LETTER M
0x004e: 0x004e, # LATIN CAPITAL LETTER N
0x004f: 0x004f, # LATIN CAPITAL LETTER O
0x0050: 0x0050, # LATIN CAPITAL LETTER P
0x0051: 0x0051, # LATIN CAPITAL LETTER Q
0x0052: 0x0052, # LATIN CAPITAL LETTER R
0x0053: 0x0053, # LATIN CAPITAL LETTER S
0x0054: 0x0054, # LATIN CAPITAL LETTER T
0x0055: 0x0055, # LATIN CAPITAL LETTER U
0x0056: 0x0056, # LATIN CAPITAL LETTER V
0x0057: 0x0057, # LATIN CAPITAL LETTER W
0x0058: 0x0058, # LATIN CAPITAL LETTER X
0x0059: 0x0059, # LATIN CAPITAL LETTER Y
0x005a: 0x005a, # LATIN CAPITAL LETTER Z
0x005b: 0x005b, # LEFT SQUARE BRACKET
0x005c: 0x005c, # REVERSE SOLIDUS
0x005d: 0x005d, # RIGHT SQUARE BRACKET
0x005e: 0x005e, # CIRCUMFLEX ACCENT
0x005f: 0x005f, # LOW LINE
0x0060: 0x0060, # GRAVE ACCENT
0x0061: 0x0061, # LATIN SMALL LETTER A
0x0062: 0x0062, # LATIN SMALL LETTER B
0x0063: 0x0063, # LATIN SMALL LETTER C
0x0064: 0x0064, # LATIN SMALL LETTER D
0x0065: 0x0065, # LATIN SMALL LETTER E
0x0066: 0x0066, # LATIN SMALL LETTER F
0x0067: 0x0067, # LATIN SMALL LETTER G
0x0068: 0x0068, # LATIN SMALL LETTER H
0x0069: 0x0069, # LATIN SMALL LETTER I
0x006a: 0x006a, # LATIN SMALL LETTER J
0x006b: 0x006b, # LATIN SMALL LETTER K
0x006c: 0x006c, # LATIN SMALL LETTER L
0x006d: 0x006d, # LATIN SMALL LETTER M
0x006e: 0x006e, # LATIN SMALL LETTER N
0x006f: 0x006f, # LATIN SMALL LETTER O
0x0070: 0x0070, # LATIN SMALL LETTER P
0x0071: 0x0071, # LATIN SMALL LETTER Q
0x0072: 0x0072, # LATIN SMALL LETTER R
0x0073: 0x0073, # LATIN SMALL LETTER S
0x0074: 0x0074, # LATIN SMALL LETTER T
0x0075: 0x0075, # LATIN SMALL LETTER U
0x0076: 0x0076, # LATIN SMALL LETTER V
0x0077: 0x0077, # LATIN SMALL LETTER W
0x0078: 0x0078, # LATIN SMALL LETTER X
0x0079: 0x0079, # LATIN SMALL LETTER Y
0x007a: 0x007a, # LATIN SMALL LETTER Z
0x007b: 0x007b, # LEFT CURLY BRACKET
0x007c: 0x007c, # VERTICAL LINE
0x007d: 0x007d, # RIGHT CURLY BRACKET
0x007e: 0x007e, # TILDE
0x007f: 0x007f, # DELETE
0x00a0: 0x00ff, # NO-BREAK SPACE
0x00a2: 0x0096, # CENT SIGN
0x00a3: 0x009c, # POUND SIGN
0x00a4: 0x009f, # CURRENCY SIGN
0x00a6: 0x00a7, # BROKEN BAR
0x00a7: 0x00f5, # SECTION SIGN
0x00a9: 0x00a8, # COPYRIGHT SIGN
0x00ab: 0x00ae, # LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
0x00ac: 0x00aa, # NOT SIGN
0x00ad: 0x00f0, # SOFT HYPHEN
0x00ae: 0x00a9, # REGISTERED SIGN
0x00b0: 0x00f8, # DEGREE SIGN
0x00b1: 0x00f1, # PLUS-MINUS SIGN
0x00b2: 0x00fd, # SUPERSCRIPT TWO
0x00b3: 0x00fc, # SUPERSCRIPT THREE
0x00b5: 0x00e6, # MICRO SIGN
0x00b6: 0x00f4, # PILCROW SIGN
0x00b7: 0x00fa, # MIDDLE DOT
0x00b9: 0x00fb, # SUPERSCRIPT ONE
0x00bb: 0x00af, # RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
0x00bc: 0x00ac, # VULGAR FRACTION ONE QUARTER
0x00bd: 0x00ab, # VULGAR FRACTION ONE HALF
0x00be: 0x00f3, # VULGAR FRACTION THREE QUARTERS
0x00c4: 0x008e, # LATIN CAPITAL LETTER A WITH DIAERESIS
0x00c5: 0x008f, # LATIN CAPITAL LETTER A WITH RING ABOVE
0x00c6: 0x0092, # LATIN CAPITAL LIGATURE AE
0x00c9: 0x0090, # LATIN CAPITAL LETTER E WITH ACUTE
0x00d3: 0x00e0, # LATIN CAPITAL LETTER O WITH ACUTE
0x00d5: 0x00e5, # LATIN CAPITAL LETTER O WITH TILDE
0x00d6: 0x0099, # LATIN CAPITAL LETTER O WITH DIAERESIS
0x00d7: 0x009e, # MULTIPLICATION SIGN
0x00d8: 0x009d, # LATIN CAPITAL LETTER O WITH STROKE
0x00dc: 0x009a, # LATIN CAPITAL LETTER U WITH DIAERESIS
0x00df: 0x00e1, # LATIN SMALL LETTER SHARP S (GERMAN)
0x00e4: 0x0084, # LATIN SMALL LETTER A WITH DIAERESIS
0x00e5: 0x0086, # LATIN SMALL LETTER A WITH RING ABOVE
0x00e6: 0x0091, # LATIN SMALL LIGATURE AE
0x00e9: 0x0082, # LATIN SMALL LETTER E WITH ACUTE
0x00f3: 0x00a2, # LATIN SMALL LETTER O WITH ACUTE
0x00f5: 0x00e4, # LATIN SMALL LETTER O WITH TILDE
0x00f6: 0x0094, # LATIN SMALL LETTER O WITH DIAERESIS
0x00f7: 0x00f6, # DIVISION SIGN
0x00f8: 0x009b, # LATIN SMALL LETTER O WITH STROKE
0x00fc: 0x0081, # LATIN SMALL LETTER U WITH DIAERESIS
0x0100: 0x00a0, # LATIN CAPITAL LETTER A WITH MACRON
0x0101: 0x0083, # LATIN SMALL LETTER A WITH MACRON
0x0104: 0x00b5, # LATIN CAPITAL LETTER A WITH OGONEK
0x0105: 0x00d0, # LATIN SMALL LETTER A WITH OGONEK
0x0106: 0x0080, # LATIN CAPITAL LETTER C WITH ACUTE
0x0107: 0x0087, # LATIN SMALL LETTER C WITH ACUTE
0x010c: 0x00b6, # LATIN CAPITAL LETTER C WITH CARON
0x010d: 0x00d1, # LATIN SMALL LETTER C WITH CARON
0x0112: 0x00ed, # LATIN CAPITAL LETTER E WITH MACRON
0x0113: 0x0089, # LATIN SMALL LETTER E WITH MACRON
0x0116: 0x00b8, # LATIN CAPITAL LETTER E WITH DOT ABOVE
0x0117: 0x00d3, # LATIN SMALL LETTER E WITH DOT ABOVE
0x0118: 0x00b7, # LATIN CAPITAL LETTER E WITH OGONEK
0x0119: 0x00d2, # LATIN SMALL LETTER E WITH OGONEK
0x0122: 0x0095, # LATIN CAPITAL LETTER G WITH CEDILLA
0x0123: 0x0085, # LATIN SMALL LETTER G WITH CEDILLA
0x012a: 0x00a1, # LATIN CAPITAL LETTER I WITH MACRON
0x012b: 0x008c, # LATIN SMALL LETTER I WITH MACRON
0x012e: 0x00bd, # LATIN CAPITAL LETTER I WITH OGONEK
0x012f: 0x00d4, # LATIN SMALL LETTER I WITH OGONEK
0x0136: 0x00e8, # LATIN CAPITAL LETTER K WITH CEDILLA
0x0137: 0x00e9, # LATIN SMALL LETTER K WITH CEDILLA
0x013b: 0x00ea, # LATIN CAPITAL LETTER L WITH CEDILLA
0x013c: 0x00eb, # LATIN SMALL LETTER L WITH CEDILLA
0x0141: 0x00ad, # LATIN CAPITAL LETTER L WITH STROKE
0x0142: 0x0088, # LATIN SMALL LETTER L WITH STROKE
0x0143: 0x00e3, # LATIN CAPITAL LETTER N WITH ACUTE
0x0144: 0x00e7, # LATIN SMALL LETTER N WITH ACUTE
0x0145: 0x00ee, # LATIN CAPITAL LETTER N WITH CEDILLA
0x0146: 0x00ec, # LATIN SMALL LETTER N WITH CEDILLA
0x014c: 0x00e2, # LATIN CAPITAL LETTER O WITH MACRON
0x014d: 0x0093, # LATIN SMALL LETTER O WITH MACRON
0x0156: 0x008a, # LATIN CAPITAL LETTER R WITH CEDILLA
0x0157: 0x008b, # LATIN SMALL LETTER R WITH CEDILLA
0x015a: 0x0097, # LATIN CAPITAL LETTER S WITH ACUTE
0x015b: 0x0098, # LATIN SMALL LETTER S WITH ACUTE
0x0160: 0x00be, # LATIN CAPITAL LETTER S WITH CARON
0x0161: 0x00d5, # LATIN SMALL LETTER S WITH CARON
0x016a: 0x00c7, # LATIN CAPITAL LETTER U WITH MACRON
0x016b: 0x00d7, # LATIN SMALL LETTER U WITH MACRON
0x0172: 0x00c6, # LATIN CAPITAL LETTER U WITH OGONEK
0x0173: 0x00d6, # LATIN SMALL LETTER U WITH OGONEK
0x0179: 0x008d, # LATIN CAPITAL LETTER Z WITH ACUTE
0x017a: 0x00a5, # LATIN SMALL LETTER Z WITH ACUTE
0x017b: 0x00a3, # LATIN CAPITAL LETTER Z WITH DOT ABOVE
0x017c: 0x00a4, # LATIN SMALL LETTER Z WITH DOT ABOVE
0x017d: 0x00cf, # LATIN CAPITAL LETTER Z WITH CARON
0x017e: 0x00d8, # LATIN SMALL LETTER Z WITH CARON
0x2019: 0x00ef, # RIGHT SINGLE QUOTATION MARK
0x201c: 0x00f2, # LEFT DOUBLE QUOTATION MARK
0x201d: 0x00a6, # RIGHT DOUBLE QUOTATION MARK
0x201e: 0x00f7, # DOUBLE LOW-9 QUOTATION MARK
0x2219: 0x00f9, # BULLET OPERATOR
0x2500: 0x00c4, # BOX DRAWINGS LIGHT HORIZONTAL
0x2502: 0x00b3, # BOX DRAWINGS LIGHT VERTICAL
0x250c: 0x00da, # BOX DRAWINGS LIGHT DOWN AND RIGHT
0x2510: 0x00bf, # BOX DRAWINGS LIGHT DOWN AND LEFT
0x2514: 0x00c0, # BOX DRAWINGS LIGHT UP AND RIGHT
0x2518: 0x00d9, # BOX DRAWINGS LIGHT UP AND LEFT
0x251c: 0x00c3, # BOX DRAWINGS LIGHT VERTICAL AND RIGHT
0x2524: 0x00b4, # BOX DRAWINGS LIGHT VERTICAL AND LEFT
0x252c: 0x00c2, # BOX DRAWINGS LIGHT DOWN AND HORIZONTAL
0x2534: 0x00c1, # BOX DRAWINGS LIGHT UP AND HORIZONTAL
0x253c: 0x00c5, # BOX DRAWINGS LIGHT VERTICAL AND HORIZONTAL
0x2550: 0x00cd, # BOX DRAWINGS DOUBLE HORIZONTAL
0x2551: 0x00ba, # BOX DRAWINGS DOUBLE VERTICAL
0x2554: 0x00c9, # BOX DRAWINGS DOUBLE DOWN AND RIGHT
0x2557: 0x00bb, # BOX DRAWINGS DOUBLE DOWN AND LEFT
0x255a: 0x00c8, # BOX DRAWINGS DOUBLE UP AND RIGHT
0x255d: 0x00bc, # BOX DRAWINGS DOUBLE UP AND LEFT
0x2560: 0x00cc, # BOX DRAWINGS DOUBLE VERTICAL AND RIGHT
0x2563: 0x00b9, # BOX DRAWINGS DOUBLE VERTICAL AND LEFT
0x2566: 0x00cb, # BOX DRAWINGS DOUBLE DOWN AND HORIZONTAL
0x2569: 0x00ca, # BOX DRAWINGS DOUBLE UP AND HORIZONTAL
0x256c: 0x00ce, # BOX DRAWINGS DOUBLE VERTICAL AND HORIZONTAL
0x2580: 0x00df, # UPPER HALF BLOCK
0x2584: 0x00dc, # LOWER HALF BLOCK
0x2588: 0x00db, # FULL BLOCK
0x258c: 0x00dd, # LEFT HALF BLOCK
0x2590: 0x00de, # RIGHT HALF BLOCK
0x2591: 0x00b0, # LIGHT SHADE
0x2592: 0x00b1, # MEDIUM SHADE
0x2593: 0x00b2, # DARK SHADE
0x25a0: 0x00fe, # BLACK SQUARE
}
| gpl-2.0 |
neilpelow/wmap-django | venv/lib/python3.5/site-packages/django/utils/jslex.py | 335 | 7778 | """JsLex: a lexer for Javascript"""
# Originally from https://bitbucket.org/ned/jslex
from __future__ import unicode_literals
import re
class Tok(object):
"""
A specification for a token class.
"""
num = 0
def __init__(self, name, regex, next=None):
self.id = Tok.num
Tok.num += 1
self.name = name
self.regex = regex
self.next = next
def literals(choices, prefix="", suffix=""):
"""
Create a regex from a space-separated list of literal `choices`.
If provided, `prefix` and `suffix` will be attached to each choice
individually.
"""
return "|".join(prefix + re.escape(c) + suffix for c in choices.split())
class Lexer(object):
"""
A generic multi-state regex-based lexer.
"""
def __init__(self, states, first):
self.regexes = {}
self.toks = {}
for state, rules in states.items():
parts = []
for tok in rules:
groupid = "t%d" % tok.id
self.toks[groupid] = tok
parts.append("(?P<%s>%s)" % (groupid, tok.regex))
self.regexes[state] = re.compile("|".join(parts), re.MULTILINE | re.VERBOSE)
self.state = first
def lex(self, text):
"""
Lexically analyze `text`.
Yields pairs (`name`, `tokentext`).
"""
end = len(text)
state = self.state
regexes = self.regexes
toks = self.toks
start = 0
while start < end:
for match in regexes[state].finditer(text, start):
name = match.lastgroup
tok = toks[name]
toktext = match.group(name)
start += len(toktext)
yield (tok.name, toktext)
if tok.next:
state = tok.next
break
self.state = state
class JsLexer(Lexer):
"""
A Javascript lexer
>>> lexer = JsLexer()
>>> list(lexer.lex("a = 1"))
[('id', 'a'), ('ws', ' '), ('punct', '='), ('ws', ' '), ('dnum', '1')]
This doesn't properly handle non-ASCII characters in the Javascript source.
"""
# Because these tokens are matched as alternatives in a regex, longer
# possibilities must appear in the list before shorter ones, for example,
# '>>' before '>'.
#
# Note that we don't have to detect malformed Javascript, only properly
# lex correct Javascript, so much of this is simplified.
# Details of Javascript lexical structure are taken from
# http://www.ecma-international.org/publications/files/ECMA-ST/ECMA-262.pdf
# A useful explanation of automatic semicolon insertion is at
# http://inimino.org/~inimino/blog/javascript_semicolons
both_before = [
Tok("comment", r"/\*(.|\n)*?\*/"),
Tok("linecomment", r"//.*?$"),
Tok("ws", r"\s+"),
Tok("keyword", literals("""
break case catch class const continue debugger
default delete do else enum export extends
finally for function if import in instanceof
new return super switch this throw try typeof
var void while with
""", suffix=r"\b"), next='reg'),
Tok("reserved", literals("null true false", suffix=r"\b"), next='div'),
Tok("id", r"""
([a-zA-Z_$ ]|\\u[0-9a-fA-Z]{4}) # first char
([a-zA-Z_$0-9]|\\u[0-9a-fA-F]{4})* # rest chars
""", next='div'),
Tok("hnum", r"0[xX][0-9a-fA-F]+", next='div'),
Tok("onum", r"0[0-7]+"),
Tok("dnum", r"""
( (0|[1-9][0-9]*) # DecimalIntegerLiteral
\. # dot
[0-9]* # DecimalDigits-opt
([eE][-+]?[0-9]+)? # ExponentPart-opt
|
\. # dot
[0-9]+ # DecimalDigits
([eE][-+]?[0-9]+)? # ExponentPart-opt
|
(0|[1-9][0-9]*) # DecimalIntegerLiteral
([eE][-+]?[0-9]+)? # ExponentPart-opt
)
""", next='div'),
Tok("punct", literals("""
>>>= === !== >>> <<= >>= <= >= == != << >> &&
|| += -= *= %= &= |= ^=
"""), next="reg"),
Tok("punct", literals("++ -- ) ]"), next='div'),
Tok("punct", literals("{ } ( [ . ; , < > + - * % & | ^ ! ~ ? : ="), next='reg'),
Tok("string", r'"([^"\\]|(\\(.|\n)))*?"', next='div'),
Tok("string", r"'([^'\\]|(\\(.|\n)))*?'", next='div'),
]
both_after = [
Tok("other", r"."),
]
states = {
# slash will mean division
'div': both_before + [
Tok("punct", literals("/= /"), next='reg'),
] + both_after,
# slash will mean regex
'reg': both_before + [
Tok("regex",
r"""
/ # opening slash
# First character is..
( [^*\\/[] # anything but * \ / or [
| \\. # or an escape sequence
| \[ # or a class, which has
( [^\]\\] # anything but \ or ]
| \\. # or an escape sequence
)* # many times
\]
)
# Following characters are same, except for excluding a star
( [^\\/[] # anything but \ / or [
| \\. # or an escape sequence
| \[ # or a class, which has
( [^\]\\] # anything but \ or ]
| \\. # or an escape sequence
)* # many times
\]
)* # many times
/ # closing slash
[a-zA-Z0-9]* # trailing flags
""", next='div'),
] + both_after,
}
def __init__(self):
super(JsLexer, self).__init__(self.states, 'reg')
def prepare_js_for_gettext(js):
"""
Convert the Javascript source `js` into something resembling C for
xgettext.
What actually happens is that all the regex literals are replaced with
"REGEX".
"""
def escape_quotes(m):
"""Used in a regex to properly escape double quotes."""
s = m.group(0)
if s == '"':
return r'\"'
else:
return s
lexer = JsLexer()
c = []
for name, tok in lexer.lex(js):
if name == 'regex':
# C doesn't grok regexes, and they aren't needed for gettext,
# so just output a string instead.
tok = '"REGEX"'
elif name == 'string':
# C doesn't have single-quoted strings, so make all strings
# double-quoted.
if tok.startswith("'"):
guts = re.sub(r"\\.|.", escape_quotes, tok[1:-1])
tok = '"' + guts + '"'
elif name == 'id':
# C can't deal with Unicode escapes in identifiers. We don't
# need them for gettext anyway, so replace them with something
# innocuous
tok = tok.replace("\\", "U")
c.append(tok)
return ''.join(c)
| gpl-3.0 |
stefanseefeld/numba | numba/tests/test_python_int.py | 8 | 1843 | from __future__ import print_function
import numba.unittest_support as unittest
from numba.compiler import compile_isolated, Flags
from numba import types
force_pyobj_flags = Flags()
force_pyobj_flags.set("force_pyobject")
no_pyobj_flags = Flags()
def return_int(a, b):
return a + b
class TestPythonInt(unittest.TestCase):
# Issue #474: ints should be returned rather than longs under Python 2,
# as much as possible.
def test_int_return_type(self, flags=force_pyobj_flags,
int_type=types.int64, operands=(3, 4)):
pyfunc = return_int
cr = compile_isolated(pyfunc, (int_type, int_type), flags=flags)
cfunc = cr.entry_point
expected = pyfunc(*operands)
got = cfunc(*operands)
self.assertIs(type(got), type(expected))
self.assertEqual(got, expected)
def test_int_return_type_npm(self):
self.test_int_return_type(flags=no_pyobj_flags)
def test_unsigned_int_return_type(self, flags=force_pyobj_flags):
self.test_int_return_type(int_type=types.uint64, flags=flags)
def test_unsigned_int_return_type_npm(self):
self.test_unsigned_int_return_type(flags=no_pyobj_flags)
def test_long_int_return_type(self, flags=force_pyobj_flags):
# Same but returning a 64-bit integer. The return type should be
# `int` on 64-bit builds, `long` on 32-bit ones (or Windows).
self.test_int_return_type(flags=flags, operands=(2**33, 2**40))
def test_long_int_return_type_npm(self):
self.test_long_int_return_type(flags=no_pyobj_flags)
def test_longer_int_return_type(self, flags=force_pyobj_flags):
# This won't be supported in nopython mode.
self.test_int_return_type(flags=flags, operands=(2**70, 2**75))
if __name__ == '__main__':
unittest.main()
| bsd-2-clause |
bolkedebruin/airflow | airflow/contrib/operators/sql_to_gcs.py | 1 | 1678 | # -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""This module is deprecated. Please use `airflow.providers.google.cloud.operators.sql_to_gcs`."""
import warnings
from airflow.providers.google.cloud.operators.sql_to_gcs import BaseSQLToGCSOperator
warnings.warn(
"This module is deprecated. Please use `airflow.providers.google.cloud.operators.sql_to_gcs`.",
DeprecationWarning, stacklevel=2
)
class BaseSQLToGoogleCloudStorageOperator(BaseSQLToGCSOperator):
"""
This class is deprecated.
Please use `airflow.providers.google.cloud.operators.sql_to_gcs.BaseSQLToGCSOperator`.
"""
def __init__(self, *args, **kwargs):
warnings.warn(
"""This class is deprecated.
Please use `airflow.providers.google.cloud.operators.sql_to_gcs.BaseSQLToGCSOperator`.""",
DeprecationWarning, stacklevel=2
)
super().__init__(*args, **kwargs)
| apache-2.0 |
wangyikai/grpc | src/python/grpcio_test/grpc_test/beta/_utilities_test.py | 8 | 4650 | # Copyright 2015, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Tests of grpc.beta.utilities."""
import threading
import time
import unittest
from grpc._adapter import _low
from grpc._adapter import _types
from grpc.beta import implementations
from grpc.beta import utilities
from grpc.framework.foundation import future
from grpc_test.framework.common import test_constants
def _drive_completion_queue(completion_queue):
while True:
event = completion_queue.next(time.time() + 24 * 60 * 60)
if event.type == _types.EventType.QUEUE_SHUTDOWN:
break
class _Callback(object):
def __init__(self):
self._condition = threading.Condition()
self._value = None
def accept_value(self, value):
with self._condition:
self._value = value
self._condition.notify_all()
def block_until_called(self):
with self._condition:
while self._value is None:
self._condition.wait()
return self._value
class ChannelConnectivityTest(unittest.TestCase):
def test_lonely_channel_connectivity(self):
channel = implementations.insecure_channel('localhost', 12345)
callback = _Callback()
ready_future = utilities.channel_ready_future(channel)
ready_future.add_done_callback(callback.accept_value)
with self.assertRaises(future.TimeoutError):
ready_future.result(test_constants.SHORT_TIMEOUT)
self.assertFalse(ready_future.cancelled())
self.assertFalse(ready_future.done())
self.assertTrue(ready_future.running())
ready_future.cancel()
value_passed_to_callback = callback.block_until_called()
self.assertIs(ready_future, value_passed_to_callback)
self.assertTrue(ready_future.cancelled())
self.assertTrue(ready_future.done())
self.assertFalse(ready_future.running())
def test_immediately_connectable_channel_connectivity(self):
server_completion_queue = _low.CompletionQueue()
server = _low.Server(server_completion_queue, [])
port = server.add_http2_port('[::]:0')
server.start()
server_completion_queue_thread = threading.Thread(
target=_drive_completion_queue, args=(server_completion_queue,))
server_completion_queue_thread.start()
channel = implementations.insecure_channel('localhost', port)
callback = _Callback()
try:
ready_future = utilities.channel_ready_future(channel)
ready_future.add_done_callback(callback.accept_value)
self.assertIsNone(
ready_future.result(test_constants.SHORT_TIMEOUT))
value_passed_to_callback = callback.block_until_called()
self.assertIs(ready_future, value_passed_to_callback)
self.assertFalse(ready_future.cancelled())
self.assertTrue(ready_future.done())
self.assertFalse(ready_future.running())
# Cancellation after maturity has no effect.
ready_future.cancel()
self.assertFalse(ready_future.cancelled())
self.assertTrue(ready_future.done())
self.assertFalse(ready_future.running())
finally:
ready_future.cancel()
server.shutdown()
server_completion_queue.shutdown()
server_completion_queue_thread.join()
if __name__ == '__main__':
unittest.main(verbosity=2)
| bsd-3-clause |
ThinkingBridge/platform_external_chromium_org | third_party/jinja2/sandbox.py | 122 | 13114 | # -*- coding: utf-8 -*-
"""
jinja2.sandbox
~~~~~~~~~~~~~~
Adds a sandbox layer to Jinja as it was the default behavior in the old
Jinja 1 releases. This sandbox is slightly different from Jinja 1 as the
default behavior is easier to use.
The behavior can be changed by subclassing the environment.
:copyright: (c) 2010 by the Jinja Team.
:license: BSD.
"""
import operator
from jinja2.environment import Environment
from jinja2.exceptions import SecurityError
from jinja2.utils import FunctionType, MethodType, TracebackType, CodeType, \
FrameType, GeneratorType
#: maximum number of items a range may produce
MAX_RANGE = 100000
#: attributes of function objects that are considered unsafe.
UNSAFE_FUNCTION_ATTRIBUTES = set(['func_closure', 'func_code', 'func_dict',
'func_defaults', 'func_globals'])
#: unsafe method attributes. function attributes are unsafe for methods too
UNSAFE_METHOD_ATTRIBUTES = set(['im_class', 'im_func', 'im_self'])
import warnings
# make sure we don't warn in python 2.6 about stuff we don't care about
warnings.filterwarnings('ignore', 'the sets module', DeprecationWarning,
module='jinja2.sandbox')
from collections import deque
_mutable_set_types = (set,)
_mutable_mapping_types = (dict,)
_mutable_sequence_types = (list,)
# on python 2.x we can register the user collection types
try:
from UserDict import UserDict, DictMixin
from UserList import UserList
_mutable_mapping_types += (UserDict, DictMixin)
_mutable_set_types += (UserList,)
except ImportError:
pass
# if sets is still available, register the mutable set from there as well
try:
from sets import Set
_mutable_set_types += (Set,)
except ImportError:
pass
#: register Python 2.6 abstract base classes
try:
from collections import MutableSet, MutableMapping, MutableSequence
_mutable_set_types += (MutableSet,)
_mutable_mapping_types += (MutableMapping,)
_mutable_sequence_types += (MutableSequence,)
except ImportError:
pass
_mutable_spec = (
(_mutable_set_types, frozenset([
'add', 'clear', 'difference_update', 'discard', 'pop', 'remove',
'symmetric_difference_update', 'update'
])),
(_mutable_mapping_types, frozenset([
'clear', 'pop', 'popitem', 'setdefault', 'update'
])),
(_mutable_sequence_types, frozenset([
'append', 'reverse', 'insert', 'sort', 'extend', 'remove'
])),
(deque, frozenset([
'append', 'appendleft', 'clear', 'extend', 'extendleft', 'pop',
'popleft', 'remove', 'rotate'
]))
)
def safe_range(*args):
"""A range that can't generate ranges with a length of more than
MAX_RANGE items.
"""
rng = xrange(*args)
if len(rng) > MAX_RANGE:
raise OverflowError('range too big, maximum size for range is %d' %
MAX_RANGE)
return rng
def unsafe(f):
"""Marks a function or method as unsafe.
::
@unsafe
def delete(self):
pass
"""
f.unsafe_callable = True
return f
def is_internal_attribute(obj, attr):
"""Test if the attribute given is an internal python attribute. For
example this function returns `True` for the `func_code` attribute of
python objects. This is useful if the environment method
:meth:`~SandboxedEnvironment.is_safe_attribute` is overriden.
>>> from jinja2.sandbox import is_internal_attribute
>>> is_internal_attribute(lambda: None, "func_code")
True
>>> is_internal_attribute((lambda x:x).func_code, 'co_code')
True
>>> is_internal_attribute(str, "upper")
False
"""
if isinstance(obj, FunctionType):
if attr in UNSAFE_FUNCTION_ATTRIBUTES:
return True
elif isinstance(obj, MethodType):
if attr in UNSAFE_FUNCTION_ATTRIBUTES or \
attr in UNSAFE_METHOD_ATTRIBUTES:
return True
elif isinstance(obj, type):
if attr == 'mro':
return True
elif isinstance(obj, (CodeType, TracebackType, FrameType)):
return True
elif isinstance(obj, GeneratorType):
if attr == 'gi_frame':
return True
return attr.startswith('__')
def modifies_known_mutable(obj, attr):
"""This function checks if an attribute on a builtin mutable object
(list, dict, set or deque) would modify it if called. It also supports
the "user"-versions of the objects (`sets.Set`, `UserDict.*` etc.) and
with Python 2.6 onwards the abstract base classes `MutableSet`,
`MutableMapping`, and `MutableSequence`.
>>> modifies_known_mutable({}, "clear")
True
>>> modifies_known_mutable({}, "keys")
False
>>> modifies_known_mutable([], "append")
True
>>> modifies_known_mutable([], "index")
False
If called with an unsupported object (such as unicode) `False` is
returned.
>>> modifies_known_mutable("foo", "upper")
False
"""
for typespec, unsafe in _mutable_spec:
if isinstance(obj, typespec):
return attr in unsafe
return False
class SandboxedEnvironment(Environment):
"""The sandboxed environment. It works like the regular environment but
tells the compiler to generate sandboxed code. Additionally subclasses of
this environment may override the methods that tell the runtime what
attributes or functions are safe to access.
If the template tries to access insecure code a :exc:`SecurityError` is
raised. However also other exceptions may occour during the rendering so
the caller has to ensure that all exceptions are catched.
"""
sandboxed = True
#: default callback table for the binary operators. A copy of this is
#: available on each instance of a sandboxed environment as
#: :attr:`binop_table`
default_binop_table = {
'+': operator.add,
'-': operator.sub,
'*': operator.mul,
'/': operator.truediv,
'//': operator.floordiv,
'**': operator.pow,
'%': operator.mod
}
#: default callback table for the unary operators. A copy of this is
#: available on each instance of a sandboxed environment as
#: :attr:`unop_table`
default_unop_table = {
'+': operator.pos,
'-': operator.neg
}
#: a set of binary operators that should be intercepted. Each operator
#: that is added to this set (empty by default) is delegated to the
#: :meth:`call_binop` method that will perform the operator. The default
#: operator callback is specified by :attr:`binop_table`.
#:
#: The following binary operators are interceptable:
#: ``//``, ``%``, ``+``, ``*``, ``-``, ``/``, and ``**``
#:
#: The default operation form the operator table corresponds to the
#: builtin function. Intercepted calls are always slower than the native
#: operator call, so make sure only to intercept the ones you are
#: interested in.
#:
#: .. versionadded:: 2.6
intercepted_binops = frozenset()
#: a set of unary operators that should be intercepted. Each operator
#: that is added to this set (empty by default) is delegated to the
#: :meth:`call_unop` method that will perform the operator. The default
#: operator callback is specified by :attr:`unop_table`.
#:
#: The following unary operators are interceptable: ``+``, ``-``
#:
#: The default operation form the operator table corresponds to the
#: builtin function. Intercepted calls are always slower than the native
#: operator call, so make sure only to intercept the ones you are
#: interested in.
#:
#: .. versionadded:: 2.6
intercepted_unops = frozenset()
def intercept_unop(self, operator):
"""Called during template compilation with the name of a unary
operator to check if it should be intercepted at runtime. If this
method returns `True`, :meth:`call_unop` is excuted for this unary
operator. The default implementation of :meth:`call_unop` will use
the :attr:`unop_table` dictionary to perform the operator with the
same logic as the builtin one.
The following unary operators are interceptable: ``+`` and ``-``
Intercepted calls are always slower than the native operator call,
so make sure only to intercept the ones you are interested in.
.. versionadded:: 2.6
"""
return False
def __init__(self, *args, **kwargs):
Environment.__init__(self, *args, **kwargs)
self.globals['range'] = safe_range
self.binop_table = self.default_binop_table.copy()
self.unop_table = self.default_unop_table.copy()
def is_safe_attribute(self, obj, attr, value):
"""The sandboxed environment will call this method to check if the
attribute of an object is safe to access. Per default all attributes
starting with an underscore are considered private as well as the
special attributes of internal python objects as returned by the
:func:`is_internal_attribute` function.
"""
return not (attr.startswith('_') or is_internal_attribute(obj, attr))
def is_safe_callable(self, obj):
"""Check if an object is safely callable. Per default a function is
considered safe unless the `unsafe_callable` attribute exists and is
True. Override this method to alter the behavior, but this won't
affect the `unsafe` decorator from this module.
"""
return not (getattr(obj, 'unsafe_callable', False) or
getattr(obj, 'alters_data', False))
def call_binop(self, context, operator, left, right):
"""For intercepted binary operator calls (:meth:`intercepted_binops`)
this function is executed instead of the builtin operator. This can
be used to fine tune the behavior of certain operators.
.. versionadded:: 2.6
"""
return self.binop_table[operator](left, right)
def call_unop(self, context, operator, arg):
"""For intercepted unary operator calls (:meth:`intercepted_unops`)
this function is executed instead of the builtin operator. This can
be used to fine tune the behavior of certain operators.
.. versionadded:: 2.6
"""
return self.unop_table[operator](arg)
def getitem(self, obj, argument):
"""Subscribe an object from sandboxed code."""
try:
return obj[argument]
except (TypeError, LookupError):
if isinstance(argument, basestring):
try:
attr = str(argument)
except Exception:
pass
else:
try:
value = getattr(obj, attr)
except AttributeError:
pass
else:
if self.is_safe_attribute(obj, argument, value):
return value
return self.unsafe_undefined(obj, argument)
return self.undefined(obj=obj, name=argument)
def getattr(self, obj, attribute):
"""Subscribe an object from sandboxed code and prefer the
attribute. The attribute passed *must* be a bytestring.
"""
try:
value = getattr(obj, attribute)
except AttributeError:
try:
return obj[attribute]
except (TypeError, LookupError):
pass
else:
if self.is_safe_attribute(obj, attribute, value):
return value
return self.unsafe_undefined(obj, attribute)
return self.undefined(obj=obj, name=attribute)
def unsafe_undefined(self, obj, attribute):
"""Return an undefined object for unsafe attributes."""
return self.undefined('access to attribute %r of %r '
'object is unsafe.' % (
attribute,
obj.__class__.__name__
), name=attribute, obj=obj, exc=SecurityError)
def call(__self, __context, __obj, *args, **kwargs):
"""Call an object from sandboxed code."""
# the double prefixes are to avoid double keyword argument
# errors when proxying the call.
if not __self.is_safe_callable(__obj):
raise SecurityError('%r is not safely callable' % (__obj,))
return __context.call(__obj, *args, **kwargs)
class ImmutableSandboxedEnvironment(SandboxedEnvironment):
"""Works exactly like the regular `SandboxedEnvironment` but does not
permit modifications on the builtin mutable objects `list`, `set`, and
`dict` by using the :func:`modifies_known_mutable` function.
"""
def is_safe_attribute(self, obj, attr, value):
if not SandboxedEnvironment.is_safe_attribute(self, obj, attr, value):
return False
return not modifies_known_mutable(obj, attr)
| bsd-3-clause |
mikewiebe-ansible/ansible | lib/ansible/modules/network/aci/aci_interface_policy_leaf_profile.py | 8 | 6901 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2017, Bruno Calogero <brunocalogero@hotmail.com>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'certified'}
DOCUMENTATION = r'''
---
module: aci_interface_policy_leaf_profile
short_description: Manage fabric interface policy leaf profiles (infra:AccPortP)
description:
- Manage fabric interface policy leaf profiles on Cisco ACI fabrics.
version_added: '2.5'
options:
leaf_interface_profile:
description:
- The name of the Fabric access policy leaf interface profile.
type: str
required: yes
aliases: [ name, leaf_interface_profile_name ]
description:
description:
- Description for the Fabric access policy leaf interface profile.
type: str
aliases: [ descr ]
state:
description:
- Use C(present) or C(absent) for adding or removing.
- Use C(query) for listing an object or multiple objects.
type: str
choices: [ absent, present, query ]
default: present
extends_documentation_fragment: aci
seealso:
- name: APIC Management Information Model reference
description: More information about the internal APIC class B(infra:AccPortP).
link: https://developer.cisco.com/docs/apic-mim-ref/
author:
- Bruno Calogero (@brunocalogero)
'''
EXAMPLES = r'''
- name: Add a new leaf_interface_profile
aci_interface_policy_leaf_profile:
host: apic
username: admin
password: SomeSecretPassword
leaf_interface_profile: leafintprfname
description: leafintprfname description
state: present
delegate_to: localhost
- name: Remove a leaf_interface_profile
aci_interface_policy_leaf_profile:
host: apic
username: admin
password: SomeSecretPassword
leaf_interface_profile: leafintprfname
state: absent
delegate_to: localhost
- name: Remove all leaf_interface_profiles
aci_interface_policy_leaf_profile:
host: apic
username: admin
password: SomeSecretPassword
state: absent
delegate_to: localhost
- name: Query a leaf_interface_profile
aci_interface_policy_leaf_profile:
host: apic
username: admin
password: SomeSecretPassword
leaf_interface_profile: leafintprfname
state: query
delegate_to: localhost
register: query_result
'''
RETURN = r'''
current:
description: The existing configuration from the APIC after the module has finished
returned: success
type: list
sample:
[
{
"fvTenant": {
"attributes": {
"descr": "Production environment",
"dn": "uni/tn-production",
"name": "production",
"nameAlias": "",
"ownerKey": "",
"ownerTag": ""
}
}
}
]
error:
description: The error information as returned from the APIC
returned: failure
type: dict
sample:
{
"code": "122",
"text": "unknown managed object class foo"
}
raw:
description: The raw output returned by the APIC REST API (xml or json)
returned: parse error
type: str
sample: '<?xml version="1.0" encoding="UTF-8"?><imdata totalCount="1"><error code="122" text="unknown managed object class foo"/></imdata>'
sent:
description: The actual/minimal configuration pushed to the APIC
returned: info
type: list
sample:
{
"fvTenant": {
"attributes": {
"descr": "Production environment"
}
}
}
previous:
description: The original configuration from the APIC before the module has started
returned: info
type: list
sample:
[
{
"fvTenant": {
"attributes": {
"descr": "Production",
"dn": "uni/tn-production",
"name": "production",
"nameAlias": "",
"ownerKey": "",
"ownerTag": ""
}
}
}
]
proposed:
description: The assembled configuration from the user-provided parameters
returned: info
type: dict
sample:
{
"fvTenant": {
"attributes": {
"descr": "Production environment",
"name": "production"
}
}
}
filter_string:
description: The filter string used for the request
returned: failure or debug
type: str
sample: ?rsp-prop-include=config-only
method:
description: The HTTP method used for the request to the APIC
returned: failure or debug
type: str
sample: POST
response:
description: The HTTP response from the APIC
returned: failure or debug
type: str
sample: OK (30 bytes)
status:
description: The HTTP status from the APIC
returned: failure or debug
type: int
sample: 200
url:
description: The HTTP url used for the request to the APIC
returned: failure or debug
type: str
sample: https://10.11.12.13/api/mo/uni/tn-production.json
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.network.aci.aci import ACIModule, aci_argument_spec
def main():
argument_spec = aci_argument_spec()
argument_spec.update(
leaf_interface_profile=dict(type='str', aliases=['name', 'leaf_interface_profile_name']), # Not required for querying all objects
description=dict(type='str', aliases=['descr']),
state=dict(type='str', default='present', choices=['absent', 'present', 'query']),
)
module = AnsibleModule(
argument_spec=argument_spec,
supports_check_mode=True,
required_if=[
['state', 'absent', ['leaf_interface_profile']],
['state', 'present', ['leaf_interface_profile']],
],
)
leaf_interface_profile = module.params.get('leaf_interface_profile')
description = module.params.get('description')
state = module.params.get('state')
aci = ACIModule(module)
aci.construct_url(
root_class=dict(
aci_class='infraAccPortP',
aci_rn='infra/accportprof-{0}'.format(leaf_interface_profile),
module_object=leaf_interface_profile,
target_filter={'name': leaf_interface_profile},
),
)
aci.get_existing()
if state == 'present':
aci.payload(
aci_class='infraAccPortP',
class_config=dict(
name=leaf_interface_profile,
descr=description,
),
)
aci.get_diff(aci_class='infraAccPortP')
aci.post_config()
elif state == 'absent':
aci.delete_config()
aci.exit_json()
if __name__ == "__main__":
main()
| gpl-3.0 |
aerickson/ansible | lib/ansible/plugins/lookup/nested.py | 157 | 2100 | # (c) 2012, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from jinja2.exceptions import UndefinedError
from ansible.errors import AnsibleError, AnsibleUndefinedVariable
from ansible.plugins.lookup import LookupBase
from ansible.utils.listify import listify_lookup_plugin_terms
class LookupModule(LookupBase):
def _lookup_variables(self, terms, variables):
results = []
for x in terms:
try:
intermediate = listify_lookup_plugin_terms(x, templar=self._templar, loader=self._loader, fail_on_undefined=True)
except UndefinedError as e:
raise AnsibleUndefinedVariable("One of the nested variables was undefined. The error was: %s" % e)
results.append(intermediate)
return results
def run(self, terms, variables=None, **kwargs):
terms = self._lookup_variables(terms, variables)
my_list = terms[:]
my_list.reverse()
result = []
if len(my_list) == 0:
raise AnsibleError("with_nested requires at least one element in the nested list")
result = my_list.pop()
while len(my_list) > 0:
result2 = self._combine(result, my_list.pop())
result = result2
new_result = []
for x in result:
new_result.append(self._flatten(x))
return new_result
| gpl-3.0 |
wmbutler/courtlistener | alert/userHandling/urls.py | 2 | 2321 | from alert.AuthenticationBackend import ConfirmedEmailAuthenticationForm
from alert.userHandling.forms import (
CustomPasswordResetForm, CustomSetPasswordForm,
)
from alert.userHandling.views import (
confirmEmail, deleteProfile, deleteProfileDone, emailConfirmSuccess,
password_change, register, register_success,
request_email_confirmation, view_favorites, view_alerts, view_settings
)
from django.conf.urls import patterns, url
from django.contrib.auth.views import (
login, logout, password_reset, password_reset_done, password_reset_confirm
)
from django.views.generic import RedirectView
urlpatterns = patterns('',
# Sign in/out and password pages
url(r'^sign-in/$', login, {
'authentication_form': ConfirmedEmailAuthenticationForm,
'extra_context': {'private': False}},
name="sign-in"),
(r'^sign-out/$', logout, {'extra_context': {'private': False}}),
(r'^reset-password/$', password_reset,
{'extra_context': {'private': False},
'password_reset_form': CustomPasswordResetForm}),
(r'^reset-password/instructions-sent/$', password_reset_done,
{'extra_context': {'private': False}}),
(r'^confirm-password/(?P<uidb36>.*)/(?P<token>.*)/$',
password_reset_confirm,
{'post_reset_redirect': '/reset-password/complete/',
'set_password_form': CustomSetPasswordForm,
'extra_context': {'private': False}}),
(r'^reset-password/complete/$', login, {
'template_name': 'registration/password_reset_complete.html',
'extra_context': {'private': False}}),
# Settings pages
url(r'^profile/settings/$', view_settings, name='view_settings'),
(r'^profile/$', RedirectView.as_view(
url='/profile/settings/',
permanent=True)
),
(r'^profile/favorites/$', view_favorites),
(r'^profile/alerts/$', view_alerts),
(r'^profile/password/change/$', password_change),
(r'^profile/delete/$', deleteProfile),
(r'^profile/delete/done/$', deleteProfileDone),
url(r'^register/$', register, name="register"),
(r'^register/success/$', register_success),
# Registration pages
(r'^email/confirm/([0-9a-f]{40})/$', confirmEmail),
(r'^email-confirmation/request/$', request_email_confirmation),
(r'^email-confirmation/success/$', emailConfirmSuccess),
)
| agpl-3.0 |
dol-sen/portage | pym/_emerge/depgraph.py | 2 | 344824 | # Copyright 1999-2017 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
from __future__ import division, print_function, unicode_literals
import collections
import errno
import functools
import io
import logging
import stat
import sys
import textwrap
import warnings
from collections import deque
from itertools import chain
import portage
from portage import os, OrderedDict
from portage import _unicode_decode, _unicode_encode, _encodings
from portage.const import PORTAGE_PACKAGE_ATOM, USER_CONFIG_PATH, VCS_DIRS
from portage.dbapi import dbapi
from portage.dbapi.dep_expand import dep_expand
from portage.dbapi.DummyTree import DummyTree
from portage.dbapi.IndexedPortdb import IndexedPortdb
from portage.dbapi._similar_name_search import similar_name_search
from portage.dep import Atom, best_match_to_list, extract_affecting_use, \
check_required_use, human_readable_required_use, match_from_list, \
_repo_separator
from portage.dep._slot_operator import (ignore_built_slot_operator_deps,
strip_slots)
from portage.eapi import eapi_has_strong_blocks, eapi_has_required_use, \
_get_eapi_attrs
from portage.exception import (InvalidAtom, InvalidData, InvalidDependString,
PackageNotFound, PortageException)
from portage.localization import _
from portage.output import colorize, create_color_func, \
darkgreen, green
bad = create_color_func("BAD")
from portage.package.ebuild.config import _get_feature_flags
from portage.package.ebuild.getmaskingstatus import \
_getmaskingstatus, _MaskReason
from portage._sets import SETPREFIX
from portage._sets.base import InternalPackageSet
from portage.util import ConfigProtect, shlex_split, new_protect_filename
from portage.util import cmp_sort_key, writemsg, writemsg_stdout
from portage.util import ensure_dirs
from portage.util import writemsg_level, write_atomic
from portage.util.digraph import digraph
from portage.util._async.TaskScheduler import TaskScheduler
from portage.util._eventloop.EventLoop import EventLoop
from portage.util._eventloop.global_event_loop import global_event_loop
from portage.versions import catpkgsplit
from _emerge.AtomArg import AtomArg
from _emerge.Blocker import Blocker
from _emerge.BlockerCache import BlockerCache
from _emerge.BlockerDepPriority import BlockerDepPriority
from .chk_updated_cfg_files import chk_updated_cfg_files
from _emerge.countdown import countdown
from _emerge.create_world_atom import create_world_atom
from _emerge.Dependency import Dependency
from _emerge.DependencyArg import DependencyArg
from _emerge.DepPriority import DepPriority
from _emerge.DepPriorityNormalRange import DepPriorityNormalRange
from _emerge.DepPrioritySatisfiedRange import DepPrioritySatisfiedRange
from _emerge.EbuildMetadataPhase import EbuildMetadataPhase
from _emerge.FakeVartree import FakeVartree
from _emerge._find_deep_system_runtime_deps import _find_deep_system_runtime_deps
from _emerge.is_valid_package_atom import insert_category_into_atom, \
is_valid_package_atom
from _emerge.Package import Package
from _emerge.PackageArg import PackageArg
from _emerge.PackageVirtualDbapi import PackageVirtualDbapi
from _emerge.RootConfig import RootConfig
from _emerge.search import search
from _emerge.SetArg import SetArg
from _emerge.show_invalid_depstring_notice import show_invalid_depstring_notice
from _emerge.UnmergeDepPriority import UnmergeDepPriority
from _emerge.UseFlagDisplay import pkg_use_display
from _emerge.UserQuery import UserQuery
from _emerge.resolver.backtracking import Backtracker, BacktrackParameter
from _emerge.resolver.DbapiProvidesIndex import DbapiProvidesIndex
from _emerge.resolver.package_tracker import PackageTracker, PackageTrackerDbapiWrapper
from _emerge.resolver.slot_collision import slot_conflict_handler
from _emerge.resolver.circular_dependency import circular_dependency_handler
from _emerge.resolver.output import Display, format_unmatched_atom
if sys.hexversion >= 0x3000000:
basestring = str
long = int
_unicode = str
else:
_unicode = unicode
class _scheduler_graph_config(object):
def __init__(self, trees, pkg_cache, graph, mergelist):
self.trees = trees
self.pkg_cache = pkg_cache
self.graph = graph
self.mergelist = mergelist
def _wildcard_set(atoms):
pkgs = InternalPackageSet(allow_wildcard=True)
for x in atoms:
try:
x = Atom(x, allow_wildcard=True, allow_repo=False)
except portage.exception.InvalidAtom:
x = Atom("*/" + x, allow_wildcard=True, allow_repo=False)
pkgs.add(x)
return pkgs
class _frozen_depgraph_config(object):
def __init__(self, settings, trees, myopts, params, spinner):
self.settings = settings
self.target_root = settings["EROOT"]
self.myopts = myopts
self.edebug = 0
if settings.get("PORTAGE_DEBUG", "") == "1":
self.edebug = 1
self.spinner = spinner
self.requested_depth = params.get("deep", 0)
self._running_root = trees[trees._running_eroot]["root_config"]
self.pkgsettings = {}
self.trees = {}
self._trees_orig = trees
self.roots = {}
# All Package instances
self._pkg_cache = {}
self._highest_license_masked = {}
# We can't know that an soname dep is unsatisfied if there are
# any unbuilt ebuilds in the graph, since unbuilt ebuilds have
# no soname data. Therefore, only enable soname dependency
# resolution if --usepkgonly is enabled, or for removal actions.
self.soname_deps_enabled = (
("--usepkgonly" in myopts or "remove" in params) and
params.get("ignore_soname_deps") != "y")
dynamic_deps = myopts.get("--dynamic-deps", "y") != "n"
ignore_built_slot_operator_deps = myopts.get(
"--ignore-built-slot-operator-deps", "n") == "y"
for myroot in trees:
self.trees[myroot] = {}
# Create a RootConfig instance that references
# the FakeVartree instead of the real one.
self.roots[myroot] = RootConfig(
trees[myroot]["vartree"].settings,
self.trees[myroot],
trees[myroot]["root_config"].setconfig)
for tree in ("porttree", "bintree"):
self.trees[myroot][tree] = trees[myroot][tree]
self.trees[myroot]["vartree"] = \
FakeVartree(trees[myroot]["root_config"],
pkg_cache=self._pkg_cache,
pkg_root_config=self.roots[myroot],
dynamic_deps=dynamic_deps,
ignore_built_slot_operator_deps=ignore_built_slot_operator_deps,
soname_deps=self.soname_deps_enabled)
self.pkgsettings[myroot] = portage.config(
clone=self.trees[myroot]["vartree"].settings)
if self.soname_deps_enabled and "remove" not in params:
self.trees[myroot]["bintree"] = DummyTree(
DbapiProvidesIndex(trees[myroot]["bintree"].dbapi))
self._required_set_names = set(["world"])
atoms = ' '.join(myopts.get("--exclude", [])).split()
self.excluded_pkgs = _wildcard_set(atoms)
atoms = ' '.join(myopts.get("--reinstall-atoms", [])).split()
self.reinstall_atoms = _wildcard_set(atoms)
atoms = ' '.join(myopts.get("--usepkg-exclude", [])).split()
self.usepkg_exclude = _wildcard_set(atoms)
atoms = ' '.join(myopts.get("--useoldpkg-atoms", [])).split()
self.useoldpkg_atoms = _wildcard_set(atoms)
atoms = ' '.join(myopts.get("--rebuild-exclude", [])).split()
self.rebuild_exclude = _wildcard_set(atoms)
atoms = ' '.join(myopts.get("--rebuild-ignore", [])).split()
self.rebuild_ignore = _wildcard_set(atoms)
self.rebuild_if_new_rev = "--rebuild-if-new-rev" in myopts
self.rebuild_if_new_ver = "--rebuild-if-new-ver" in myopts
self.rebuild_if_unbuilt = "--rebuild-if-unbuilt" in myopts
class _depgraph_sets(object):
def __init__(self):
# contains all sets added to the graph
self.sets = {}
# contains non-set atoms given as arguments
self.sets['__non_set_args__'] = InternalPackageSet(allow_repo=True)
# contains all atoms from all sets added to the graph, including
# atoms given as arguments
self.atoms = InternalPackageSet(allow_repo=True)
self.atom_arg_map = {}
class _rebuild_config(object):
def __init__(self, frozen_config, backtrack_parameters):
self._graph = digraph()
self._frozen_config = frozen_config
self.rebuild_list = backtrack_parameters.rebuild_list.copy()
self.orig_rebuild_list = self.rebuild_list.copy()
self.reinstall_list = backtrack_parameters.reinstall_list.copy()
self.rebuild_if_new_rev = frozen_config.rebuild_if_new_rev
self.rebuild_if_new_ver = frozen_config.rebuild_if_new_ver
self.rebuild_if_unbuilt = frozen_config.rebuild_if_unbuilt
self.rebuild = (self.rebuild_if_new_rev or self.rebuild_if_new_ver or
self.rebuild_if_unbuilt)
def add(self, dep_pkg, dep):
parent = dep.collapsed_parent
priority = dep.collapsed_priority
rebuild_exclude = self._frozen_config.rebuild_exclude
rebuild_ignore = self._frozen_config.rebuild_ignore
if (self.rebuild and isinstance(parent, Package) and
parent.built and priority.buildtime and
isinstance(dep_pkg, Package) and
not rebuild_exclude.findAtomForPackage(parent) and
not rebuild_ignore.findAtomForPackage(dep_pkg)):
self._graph.add(dep_pkg, parent, priority)
def _needs_rebuild(self, dep_pkg):
"""Check whether packages that depend on dep_pkg need to be rebuilt."""
dep_root_slot = (dep_pkg.root, dep_pkg.slot_atom)
if dep_pkg.built or dep_root_slot in self.orig_rebuild_list:
return False
if self.rebuild_if_unbuilt:
# dep_pkg is being installed from source, so binary
# packages for parents are invalid. Force rebuild
return True
trees = self._frozen_config.trees
vardb = trees[dep_pkg.root]["vartree"].dbapi
if self.rebuild_if_new_rev:
# Parent packages are valid if a package with the same
# cpv is already installed.
return dep_pkg.cpv not in vardb.match(dep_pkg.slot_atom)
# Otherwise, parent packages are valid if a package with the same
# version (excluding revision) is already installed.
assert self.rebuild_if_new_ver
cpv_norev = catpkgsplit(dep_pkg.cpv)[:-1]
for inst_cpv in vardb.match(dep_pkg.slot_atom):
inst_cpv_norev = catpkgsplit(inst_cpv)[:-1]
if inst_cpv_norev == cpv_norev:
return False
return True
def _trigger_rebuild(self, parent, build_deps):
root_slot = (parent.root, parent.slot_atom)
if root_slot in self.rebuild_list:
return False
trees = self._frozen_config.trees
reinstall = False
for slot_atom, dep_pkg in build_deps.items():
dep_root_slot = (dep_pkg.root, slot_atom)
if self._needs_rebuild(dep_pkg):
self.rebuild_list.add(root_slot)
return True
elif ("--usepkg" in self._frozen_config.myopts and
(dep_root_slot in self.reinstall_list or
dep_root_slot in self.rebuild_list or
not dep_pkg.installed)):
# A direct rebuild dependency is being installed. We
# should update the parent as well to the latest binary,
# if that binary is valid.
#
# To validate the binary, we check whether all of the
# rebuild dependencies are present on the same binhost.
#
# 1) If parent is present on the binhost, but one of its
# rebuild dependencies is not, then the parent should
# be rebuilt from source.
# 2) Otherwise, the parent binary is assumed to be valid,
# because all of its rebuild dependencies are
# consistent.
bintree = trees[parent.root]["bintree"]
uri = bintree.get_pkgindex_uri(parent.cpv)
dep_uri = bintree.get_pkgindex_uri(dep_pkg.cpv)
bindb = bintree.dbapi
if self.rebuild_if_new_ver and uri and uri != dep_uri:
cpv_norev = catpkgsplit(dep_pkg.cpv)[:-1]
for cpv in bindb.match(dep_pkg.slot_atom):
if cpv_norev == catpkgsplit(cpv)[:-1]:
dep_uri = bintree.get_pkgindex_uri(cpv)
if uri == dep_uri:
break
if uri and uri != dep_uri:
# 1) Remote binary package is invalid because it was
# built without dep_pkg. Force rebuild.
self.rebuild_list.add(root_slot)
return True
elif (parent.installed and
root_slot not in self.reinstall_list):
try:
bin_build_time, = bindb.aux_get(parent.cpv,
["BUILD_TIME"])
except KeyError:
continue
if bin_build_time != _unicode(parent.build_time):
# 2) Remote binary package is valid, and local package
# is not up to date. Force reinstall.
reinstall = True
if reinstall:
self.reinstall_list.add(root_slot)
return reinstall
def trigger_rebuilds(self):
"""
Trigger rebuilds where necessary. If pkgA has been updated, and pkgB
depends on pkgA at both build-time and run-time, pkgB needs to be
rebuilt.
"""
need_restart = False
graph = self._graph
build_deps = {}
leaf_nodes = deque(graph.leaf_nodes())
# Trigger rebuilds bottom-up (starting with the leaves) so that parents
# will always know which children are being rebuilt.
while graph:
if not leaf_nodes:
# We'll have to drop an edge. This should be quite rare.
leaf_nodes.append(graph.order[-1])
node = leaf_nodes.popleft()
if node not in graph:
# This can be triggered by circular dependencies.
continue
slot_atom = node.slot_atom
# Remove our leaf node from the graph, keeping track of deps.
parents = graph.parent_nodes(node)
graph.remove(node)
node_build_deps = build_deps.get(node, {})
for parent in parents:
if parent == node:
# Ignore a direct cycle.
continue
parent_bdeps = build_deps.setdefault(parent, {})
parent_bdeps[slot_atom] = node
if not graph.child_nodes(parent):
leaf_nodes.append(parent)
# Trigger rebuilds for our leaf node. Because all of our children
# have been processed, the build_deps will be completely filled in,
# and self.rebuild_list / self.reinstall_list will tell us whether
# any of our children need to be rebuilt or reinstalled.
if self._trigger_rebuild(node, node_build_deps):
need_restart = True
return need_restart
class _dynamic_depgraph_config(object):
def __init__(self, depgraph, myparams, allow_backtracking, backtrack_parameters):
self.myparams = myparams.copy()
self._vdb_loaded = False
self._allow_backtracking = allow_backtracking
# Maps nodes to the reasons they were selected for reinstallation.
self._reinstall_nodes = {}
# Contains a filtered view of preferred packages that are selected
# from available repositories.
self._filtered_trees = {}
# Contains installed packages and new packages that have been added
# to the graph.
self._graph_trees = {}
# Caches visible packages returned from _select_package, for use in
# depgraph._iter_atoms_for_pkg() SLOT logic.
self._visible_pkgs = {}
#contains the args created by select_files
self._initial_arg_list = []
self.digraph = portage.digraph()
# manages sets added to the graph
self.sets = {}
# contains all nodes pulled in by self.sets
self._set_nodes = set()
# Contains only Blocker -> Uninstall edges
self._blocker_uninstalls = digraph()
# Contains only Package -> Blocker edges
self._blocker_parents = digraph()
# Contains only irrelevant Package -> Blocker edges
self._irrelevant_blockers = digraph()
# Contains only unsolvable Package -> Blocker edges
self._unsolvable_blockers = digraph()
# Contains all Blocker -> Blocked Package edges
# Do not initialize this until the depgraph _validate_blockers
# method is called, so that the _in_blocker_conflict method can
# assert that _validate_blockers has been called first.
self._blocked_pkgs = None
# Contains world packages that have been protected from
# uninstallation but may not have been added to the graph
# if the graph is not complete yet.
self._blocked_world_pkgs = {}
# Contains packages whose dependencies have been traversed.
# This use used to check if we have accounted for blockers
# relevant to a package.
self._traversed_pkg_deps = set()
self._parent_atoms = {}
self._slot_conflict_handler = None
self._circular_dependency_handler = None
self._serialized_tasks_cache = None
self._scheduler_graph = None
self._displayed_list = None
self._pprovided_args = []
self._missing_args = []
self._masked_installed = set()
self._masked_license_updates = set()
self._unsatisfied_deps_for_display = []
self._unsatisfied_blockers_for_display = None
self._circular_deps_for_display = None
self._dep_stack = []
self._dep_disjunctive_stack = []
self._unsatisfied_deps = []
self._initially_unsatisfied_deps = []
self._ignored_deps = []
self._highest_pkg_cache = {}
self._highest_pkg_cache_cp_map = {}
self._flatten_atoms_cache = {}
# Binary packages that have been rejected because their USE
# didn't match the user's config. It maps packages to a set
# of flags causing the rejection.
self.ignored_binaries = {}
self._needed_unstable_keywords = backtrack_parameters.needed_unstable_keywords
self._needed_p_mask_changes = backtrack_parameters.needed_p_mask_changes
self._needed_license_changes = backtrack_parameters.needed_license_changes
self._needed_use_config_changes = backtrack_parameters.needed_use_config_changes
self._runtime_pkg_mask = backtrack_parameters.runtime_pkg_mask
self._slot_operator_replace_installed = backtrack_parameters.slot_operator_replace_installed
self._prune_rebuilds = backtrack_parameters.prune_rebuilds
self._need_restart = False
self._need_config_reload = False
# For conditions that always require user intervention, such as
# unsatisfied REQUIRED_USE (currently has no autounmask support).
self._skip_restart = False
self._backtrack_infos = {}
self._buildpkgonly_deps_unsatisfied = False
self._autounmask = depgraph._frozen_config.myopts.get('--autounmask') != 'n'
self._displayed_autounmask = False
self._success_without_autounmask = False
self._autounmask_backtrack_disabled = False
self._required_use_unsatisfied = False
self._traverse_ignored_deps = False
self._complete_mode = False
self._slot_operator_deps = {}
self._installed_sonames = collections.defaultdict(list)
self._package_tracker = PackageTracker(
soname_deps=depgraph._frozen_config.soname_deps_enabled)
# Track missed updates caused by solved conflicts.
self._conflict_missed_update = collections.defaultdict(dict)
for myroot in depgraph._frozen_config.trees:
self.sets[myroot] = _depgraph_sets()
vardb = depgraph._frozen_config.trees[myroot]["vartree"].dbapi
# This dbapi instance will model the state that the vdb will
# have after new packages have been installed.
fakedb = PackageTrackerDbapiWrapper(myroot, self._package_tracker)
def graph_tree():
pass
graph_tree.dbapi = fakedb
self._graph_trees[myroot] = {}
self._filtered_trees[myroot] = {}
# Substitute the graph tree for the vartree in dep_check() since we
# want atom selections to be consistent with package selections
# have already been made.
self._graph_trees[myroot]["porttree"] = graph_tree
self._graph_trees[myroot]["vartree"] = graph_tree
self._graph_trees[myroot]["graph_db"] = graph_tree.dbapi
self._graph_trees[myroot]["graph"] = self.digraph
self._graph_trees[myroot]["want_update_pkg"] = depgraph._want_update_pkg
self._graph_trees[myroot]["downgrade_probe"] = depgraph._downgrade_probe
def filtered_tree():
pass
filtered_tree.dbapi = _dep_check_composite_db(depgraph, myroot)
self._filtered_trees[myroot]["porttree"] = filtered_tree
self._visible_pkgs[myroot] = PackageVirtualDbapi(vardb.settings)
# Passing in graph_tree as the vartree here could lead to better
# atom selections in some cases by causing atoms for packages that
# have been added to the graph to be preferred over other choices.
# However, it can trigger atom selections that result in
# unresolvable direct circular dependencies. For example, this
# happens with gwydion-dylan which depends on either itself or
# gwydion-dylan-bin. In case gwydion-dylan is not yet installed,
# gwydion-dylan-bin needs to be selected in order to avoid a
# an unresolvable direct circular dependency.
#
# To solve the problem described above, pass in "graph_db" so that
# packages that have been added to the graph are distinguishable
# from other available packages and installed packages. Also, pass
# the parent package into self._select_atoms() calls so that
# unresolvable direct circular dependencies can be detected and
# avoided when possible.
self._filtered_trees[myroot]["graph_db"] = graph_tree.dbapi
self._filtered_trees[myroot]["graph"] = self.digraph
self._filtered_trees[myroot]["vartree"] = \
depgraph._frozen_config.trees[myroot]["vartree"]
self._filtered_trees[myroot]["want_update_pkg"] = depgraph._want_update_pkg
self._filtered_trees[myroot]["downgrade_probe"] = depgraph._downgrade_probe
dbs = []
# (db, pkg_type, built, installed, db_keys)
if "remove" in self.myparams:
# For removal operations, use _dep_check_composite_db
# for availability and visibility checks. This provides
# consistency with install operations, so we don't
# get install/uninstall cycles like in bug #332719.
self._graph_trees[myroot]["porttree"] = filtered_tree
else:
if "--usepkgonly" not in depgraph._frozen_config.myopts:
portdb = depgraph._frozen_config.trees[myroot]["porttree"].dbapi
db_keys = list(portdb._aux_cache_keys)
dbs.append((portdb, "ebuild", False, False, db_keys))
if "--usepkg" in depgraph._frozen_config.myopts:
bindb = depgraph._frozen_config.trees[myroot]["bintree"].dbapi
db_keys = list(bindb._aux_cache_keys)
dbs.append((bindb, "binary", True, False, db_keys))
vardb = depgraph._frozen_config.trees[myroot]["vartree"].dbapi
db_keys = list(depgraph._frozen_config._trees_orig[myroot
]["vartree"].dbapi._aux_cache_keys)
dbs.append((vardb, "installed", True, True, db_keys))
self._filtered_trees[myroot]["dbs"] = dbs
class depgraph(object):
# Represents the depth of a node that is unreachable from explicit
# user arguments (or their deep dependencies). Such nodes are pulled
# in by the _complete_graph method.
_UNREACHABLE_DEPTH = object()
pkg_tree_map = RootConfig.pkg_tree_map
def __init__(self, settings, trees, myopts, myparams, spinner,
frozen_config=None, backtrack_parameters=BacktrackParameter(), allow_backtracking=False):
if frozen_config is None:
frozen_config = _frozen_depgraph_config(settings, trees,
myopts, myparams, spinner)
self._frozen_config = frozen_config
self._dynamic_config = _dynamic_depgraph_config(self, myparams,
allow_backtracking, backtrack_parameters)
self._rebuild = _rebuild_config(frozen_config, backtrack_parameters)
self._select_atoms = self._select_atoms_highest_available
self._select_package = self._select_pkg_highest_available
self._event_loop = (portage._internal_caller and
global_event_loop() or EventLoop(main=False))
self._select_atoms_parent = None
self.query = UserQuery(myopts).query
def _index_binpkgs(self):
for root in self._frozen_config.trees:
bindb = self._frozen_config.trees[root]["bintree"].dbapi
if bindb._provides_index:
# don't repeat this when backtracking
continue
root_config = self._frozen_config.roots[root]
for cpv in self._frozen_config._trees_orig[
root]["bintree"].dbapi.cpv_all():
bindb._provides_inject(
self._pkg(cpv, "binary", root_config))
def _load_vdb(self):
"""
Load installed package metadata if appropriate. This used to be called
from the constructor, but that wasn't very nice since this procedure
is slow and it generates spinner output. So, now it's called on-demand
by various methods when necessary.
"""
if self._dynamic_config._vdb_loaded:
return
for myroot in self._frozen_config.trees:
dynamic_deps = self._dynamic_config.myparams.get(
"dynamic_deps", "y") != "n"
preload_installed_pkgs = \
"--nodeps" not in self._frozen_config.myopts
fake_vartree = self._frozen_config.trees[myroot]["vartree"]
if not fake_vartree.dbapi:
# This needs to be called for the first depgraph, but not for
# backtracking depgraphs that share the same frozen_config.
fake_vartree.sync()
# FakeVartree.sync() populates virtuals, and we want
# self.pkgsettings to have them populated too.
self._frozen_config.pkgsettings[myroot] = \
portage.config(clone=fake_vartree.settings)
if preload_installed_pkgs:
vardb = fake_vartree.dbapi
if not dynamic_deps:
for pkg in vardb:
self._dynamic_config._package_tracker.add_installed_pkg(pkg)
self._add_installed_sonames(pkg)
else:
max_jobs = self._frozen_config.myopts.get("--jobs")
max_load = self._frozen_config.myopts.get("--load-average")
scheduler = TaskScheduler(
self._dynamic_deps_preload(fake_vartree),
max_jobs=max_jobs,
max_load=max_load,
event_loop=fake_vartree._portdb._event_loop)
scheduler.start()
scheduler.wait()
self._dynamic_config._vdb_loaded = True
def _dynamic_deps_preload(self, fake_vartree):
portdb = fake_vartree._portdb
for pkg in fake_vartree.dbapi:
self._spinner_update()
self._dynamic_config._package_tracker.add_installed_pkg(pkg)
self._add_installed_sonames(pkg)
ebuild_path, repo_path = \
portdb.findname2(pkg.cpv, myrepo=pkg.repo)
if ebuild_path is None:
fake_vartree.dynamic_deps_preload(pkg, None)
continue
metadata, ebuild_hash = portdb._pull_valid_cache(
pkg.cpv, ebuild_path, repo_path)
if metadata is not None:
fake_vartree.dynamic_deps_preload(pkg, metadata)
else:
proc = EbuildMetadataPhase(cpv=pkg.cpv,
ebuild_hash=ebuild_hash,
portdb=portdb, repo_path=repo_path,
settings=portdb.doebuild_settings)
proc.addExitListener(
self._dynamic_deps_proc_exit(pkg, fake_vartree))
yield proc
class _dynamic_deps_proc_exit(object):
__slots__ = ('_pkg', '_fake_vartree')
def __init__(self, pkg, fake_vartree):
self._pkg = pkg
self._fake_vartree = fake_vartree
def __call__(self, proc):
metadata = None
if proc.returncode == os.EX_OK:
metadata = proc.metadata
self._fake_vartree.dynamic_deps_preload(self._pkg, metadata)
def _spinner_update(self):
if self._frozen_config.spinner:
self._frozen_config.spinner.update()
def _compute_abi_rebuild_info(self):
"""
Fill self._forced_rebuilds with packages that cause rebuilds.
"""
debug = "--debug" in self._frozen_config.myopts
installed_sonames = self._dynamic_config._installed_sonames
package_tracker = self._dynamic_config._package_tracker
# Get all atoms that might have caused a forced rebuild.
atoms = {}
for s in self._dynamic_config._initial_arg_list:
if s.force_reinstall:
root = s.root_config.root
atoms.setdefault(root, set()).update(s.pset)
if debug:
writemsg_level("forced reinstall atoms:\n",
level=logging.DEBUG, noiselevel=-1)
for root in atoms:
writemsg_level(" root: %s\n" % root,
level=logging.DEBUG, noiselevel=-1)
for atom in atoms[root]:
writemsg_level(" atom: %s\n" % atom,
level=logging.DEBUG, noiselevel=-1)
writemsg_level("\n\n",
level=logging.DEBUG, noiselevel=-1)
# Go through all slot operator deps and check if one of these deps
# has a parent that is matched by one of the atoms from above.
forced_rebuilds = {}
for root, rebuild_atoms in atoms.items():
for slot_atom in rebuild_atoms:
inst_pkg, reinst_pkg = \
self._select_pkg_from_installed(root, slot_atom)
if inst_pkg is reinst_pkg or reinst_pkg is None:
continue
if (inst_pkg is not None and
inst_pkg.requires is not None):
for atom in inst_pkg.requires:
initial_providers = installed_sonames.get(
(root, atom))
if initial_providers is None:
continue
final_provider = next(
package_tracker.match(root, atom),
None)
if final_provider:
continue
for provider in initial_providers:
# Find the replacement child.
child = next((pkg for pkg in
package_tracker.match(
root, provider.slot_atom)
if not pkg.installed), None)
if child is None:
continue
forced_rebuilds.setdefault(
root, {}).setdefault(
child, set()).add(inst_pkg)
# Generate pseudo-deps for any slot-operator deps of
# inst_pkg. Its deps aren't in _slot_operator_deps
# because it hasn't been added to the graph, but we
# are interested in any rebuilds that it triggered.
built_slot_op_atoms = []
if inst_pkg is not None:
selected_atoms = self._select_atoms_probe(
inst_pkg.root, inst_pkg)
for atom in selected_atoms:
if atom.slot_operator_built:
built_slot_op_atoms.append(atom)
if not built_slot_op_atoms:
continue
# Use a cloned list, since we may append to it below.
deps = self._dynamic_config._slot_operator_deps.get(
(root, slot_atom), [])[:]
if built_slot_op_atoms and reinst_pkg is not None:
for child in self._dynamic_config.digraph.child_nodes(
reinst_pkg):
if child.installed:
continue
for atom in built_slot_op_atoms:
# NOTE: Since atom comes from inst_pkg, and
# reinst_pkg is the replacement parent, there's
# no guarantee that atom will completely match
# child. So, simply use atom.cp and atom.slot
# for matching.
if atom.cp != child.cp:
continue
if atom.slot and atom.slot != child.slot:
continue
deps.append(Dependency(atom=atom, child=child,
root=child.root, parent=reinst_pkg))
for dep in deps:
if dep.child.installed:
# Find the replacement child.
child = next((pkg for pkg in
self._dynamic_config._package_tracker.match(
dep.root, dep.child.slot_atom)
if not pkg.installed), None)
if child is None:
continue
inst_child = dep.child
else:
child = dep.child
inst_child = self._select_pkg_from_installed(
child.root, child.slot_atom)[0]
# Make sure the child's slot/subslot has changed. If it
# hasn't, then another child has forced this rebuild.
if inst_child and inst_child.slot == child.slot and \
inst_child.sub_slot == child.sub_slot:
continue
if dep.parent.installed:
# Find the replacement parent.
parent = next((pkg for pkg in
self._dynamic_config._package_tracker.match(
dep.parent.root, dep.parent.slot_atom)
if not pkg.installed), None)
if parent is None:
continue
else:
parent = dep.parent
# The child has forced a rebuild of the parent
forced_rebuilds.setdefault(root, {}
).setdefault(child, set()).add(parent)
if debug:
writemsg_level("slot operator dependencies:\n",
level=logging.DEBUG, noiselevel=-1)
for (root, slot_atom), deps in self._dynamic_config._slot_operator_deps.items():
writemsg_level(" (%s, %s)\n" % \
(root, slot_atom), level=logging.DEBUG, noiselevel=-1)
for dep in deps:
writemsg_level(" parent: %s\n" % dep.parent, level=logging.DEBUG, noiselevel=-1)
writemsg_level(" child: %s (%s)\n" % (dep.child, dep.priority), level=logging.DEBUG, noiselevel=-1)
writemsg_level("\n\n",
level=logging.DEBUG, noiselevel=-1)
writemsg_level("forced rebuilds:\n",
level=logging.DEBUG, noiselevel=-1)
for root in forced_rebuilds:
writemsg_level(" root: %s\n" % root,
level=logging.DEBUG, noiselevel=-1)
for child in forced_rebuilds[root]:
writemsg_level(" child: %s\n" % child,
level=logging.DEBUG, noiselevel=-1)
for parent in forced_rebuilds[root][child]:
writemsg_level(" parent: %s\n" % parent,
level=logging.DEBUG, noiselevel=-1)
writemsg_level("\n\n",
level=logging.DEBUG, noiselevel=-1)
self._forced_rebuilds = forced_rebuilds
def _show_abi_rebuild_info(self):
if not self._forced_rebuilds:
return
writemsg_stdout("\nThe following packages are causing rebuilds:\n\n", noiselevel=-1)
for root in self._forced_rebuilds:
for child in self._forced_rebuilds[root]:
writemsg_stdout(" %s causes rebuilds for:\n" % (child,), noiselevel=-1)
for parent in self._forced_rebuilds[root][child]:
writemsg_stdout(" %s\n" % (parent,), noiselevel=-1)
def _eliminate_ignored_binaries(self):
"""
Eliminate any package from self._dynamic_config.ignored_binaries
for which a more optimal alternative exists.
"""
for pkg in list(self._dynamic_config.ignored_binaries):
for selected_pkg in self._dynamic_config._package_tracker.match(
pkg.root, pkg.slot_atom):
if selected_pkg > pkg:
self._dynamic_config.ignored_binaries.pop(pkg)
break
# NOTE: The Package.__ge__ implementation accounts for
# differences in build_time, so the warning about "ignored"
# packages will be triggered if both packages are the same
# version and selected_pkg is not the most recent build.
if (selected_pkg.type_name == "binary" and
selected_pkg >= pkg):
self._dynamic_config.ignored_binaries.pop(pkg)
break
if selected_pkg.installed and \
selected_pkg.cpv == pkg.cpv and \
selected_pkg.build_time == pkg.build_time:
# We don't care about ignored binaries when an
# identical installed instance is selected to
# fill the slot.
self._dynamic_config.ignored_binaries.pop(pkg)
break
def _ignored_binaries_autounmask_backtrack(self):
"""
Check if there are ignored binaries that would have been
accepted with the current autounmask USE changes.
@rtype: bool
@return: True if there are unnecessary rebuilds that
can be avoided by backtracking
"""
if not all([
self._dynamic_config._allow_backtracking,
self._dynamic_config._needed_use_config_changes,
self._dynamic_config.ignored_binaries]):
return False
self._eliminate_ignored_binaries()
# _eliminate_ignored_binaries may have eliminated
# all of the ignored binaries
if not self._dynamic_config.ignored_binaries:
return False
use_changes = collections.defaultdict(
functools.partial(collections.defaultdict, dict))
for pkg, (new_use, changes) in self._dynamic_config._needed_use_config_changes.items():
if pkg in self._dynamic_config.digraph:
use_changes[pkg.root][pkg.slot_atom] = (pkg, new_use)
for pkg in self._dynamic_config.ignored_binaries:
selected_pkg, new_use = use_changes[pkg.root].get(
pkg.slot_atom, (None, None))
if new_use is None:
continue
if new_use != pkg.use.enabled:
continue
if selected_pkg > pkg:
continue
return True
return False
def _show_ignored_binaries(self):
"""
Show binaries that have been ignored because their USE didn't
match the user's config.
"""
if not self._dynamic_config.ignored_binaries \
or '--quiet' in self._frozen_config.myopts:
return
self._eliminate_ignored_binaries()
ignored_binaries = {}
for pkg in self._dynamic_config.ignored_binaries:
for reason, info in self._dynamic_config.\
ignored_binaries[pkg].items():
ignored_binaries.setdefault(reason, {})[pkg] = info
if self._dynamic_config.myparams.get(
"binpkg_respect_use") in ("y", "n"):
ignored_binaries.pop("respect_use", None)
if self._dynamic_config.myparams.get(
"binpkg_changed_deps") in ("y", "n"):
ignored_binaries.pop("changed_deps", None)
if not ignored_binaries:
return
self._show_merge_list()
if ignored_binaries.get("respect_use"):
self._show_ignored_binaries_respect_use(
ignored_binaries["respect_use"])
if ignored_binaries.get("changed_deps"):
self._show_ignored_binaries_changed_deps(
ignored_binaries["changed_deps"])
def _show_ignored_binaries_respect_use(self, respect_use):
writemsg("\n!!! The following binary packages have been ignored " + \
"due to non matching USE:\n\n", noiselevel=-1)
for pkg, flags in respect_use.items():
flag_display = []
for flag in sorted(flags):
if flag not in pkg.use.enabled:
flag = "-" + flag
flag_display.append(flag)
flag_display = " ".join(flag_display)
# The user can paste this line into package.use
writemsg(" =%s %s" % (pkg.cpv, flag_display), noiselevel=-1)
if pkg.root_config.settings["ROOT"] != "/":
writemsg(" # for %s" % (pkg.root,), noiselevel=-1)
writemsg("\n", noiselevel=-1)
msg = [
"",
"NOTE: The --binpkg-respect-use=n option will prevent emerge",
" from ignoring these binary packages if possible.",
" Using --binpkg-respect-use=y will silence this warning."
]
for line in msg:
if line:
line = colorize("INFORM", line)
writemsg(line + "\n", noiselevel=-1)
def _show_ignored_binaries_changed_deps(self, changed_deps):
writemsg("\n!!! The following binary packages have been "
"ignored due to changed dependencies:\n\n",
noiselevel=-1)
for pkg in changed_deps:
msg = " %s%s%s" % (pkg.cpv, _repo_separator, pkg.repo)
if pkg.root_config.settings["ROOT"] != "/":
msg += " for %s" % pkg.root
writemsg("%s\n" % msg, noiselevel=-1)
msg = [
"",
"NOTE: The --binpkg-changed-deps=n option will prevent emerge",
" from ignoring these binary packages if possible.",
" Using --binpkg-changed-deps=y will silence this warning."
]
for line in msg:
if line:
line = colorize("INFORM", line)
writemsg(line + "\n", noiselevel=-1)
def _get_missed_updates(self):
# In order to minimize noise, show only the highest
# missed update from each SLOT.
missed_updates = {}
for pkg, mask_reasons in \
chain(self._dynamic_config._runtime_pkg_mask.items(),
self._dynamic_config._conflict_missed_update.items()):
if pkg.installed:
# Exclude installed here since we only
# want to show available updates.
continue
missed_update = True
any_selected = False
for chosen_pkg in self._dynamic_config._package_tracker.match(
pkg.root, pkg.slot_atom):
any_selected = True
if chosen_pkg > pkg or (not chosen_pkg.installed and \
chosen_pkg.version == pkg.version):
missed_update = False
break
if any_selected and missed_update:
k = (pkg.root, pkg.slot_atom)
if k in missed_updates:
other_pkg, mask_type, parent_atoms = missed_updates[k]
if other_pkg > pkg:
continue
for mask_type, parent_atoms in mask_reasons.items():
if not parent_atoms:
continue
missed_updates[k] = (pkg, mask_type, parent_atoms)
break
return missed_updates
def _show_missed_update(self):
missed_updates = self._get_missed_updates()
if not missed_updates:
return
missed_update_types = {}
for pkg, mask_type, parent_atoms in missed_updates.values():
missed_update_types.setdefault(mask_type,
[]).append((pkg, parent_atoms))
if '--quiet' in self._frozen_config.myopts and \
'--debug' not in self._frozen_config.myopts:
missed_update_types.pop("slot conflict", None)
missed_update_types.pop("missing dependency", None)
self._show_missed_update_slot_conflicts(
missed_update_types.get("slot conflict"))
self._show_missed_update_unsatisfied_dep(
missed_update_types.get("missing dependency"))
def _show_missed_update_unsatisfied_dep(self, missed_updates):
if not missed_updates:
return
self._show_merge_list()
backtrack_masked = []
for pkg, parent_atoms in missed_updates:
try:
for parent, root, atom in parent_atoms:
self._show_unsatisfied_dep(root, atom, myparent=parent,
check_backtrack=True)
except self._backtrack_mask:
# This is displayed below in abbreviated form.
backtrack_masked.append((pkg, parent_atoms))
continue
writemsg("\n!!! The following update has been skipped " + \
"due to unsatisfied dependencies:\n\n", noiselevel=-1)
writemsg(str(pkg.slot_atom), noiselevel=-1)
if pkg.root_config.settings["ROOT"] != "/":
writemsg(" for %s" % (pkg.root,), noiselevel=-1)
writemsg("\n\n", noiselevel=-1)
selected_pkg = next(self._dynamic_config._package_tracker.match(
pkg.root, pkg.slot_atom), None)
writemsg(" selected: %s\n" % (selected_pkg,), noiselevel=-1)
writemsg(" skipped: %s (see unsatisfied dependency below)\n"
% (pkg,), noiselevel=-1)
for parent, root, atom in parent_atoms:
self._show_unsatisfied_dep(root, atom, myparent=parent)
writemsg("\n", noiselevel=-1)
if backtrack_masked:
# These are shown in abbreviated form, in order to avoid terminal
# flooding from mask messages as reported in bug #285832.
writemsg("\n!!! The following update(s) have been skipped " + \
"due to unsatisfied dependencies\n" + \
"!!! triggered by backtracking:\n\n", noiselevel=-1)
for pkg, parent_atoms in backtrack_masked:
writemsg(str(pkg.slot_atom), noiselevel=-1)
if pkg.root_config.settings["ROOT"] != "/":
writemsg(" for %s" % (pkg.root,), noiselevel=-1)
writemsg("\n", noiselevel=-1)
def _show_missed_update_slot_conflicts(self, missed_updates):
if not missed_updates:
return
self._show_merge_list()
msg = []
msg.append("\nWARNING: One or more updates/rebuilds have been " + \
"skipped due to a dependency conflict:\n\n")
indent = " "
for pkg, parent_atoms in missed_updates:
msg.append(str(pkg.slot_atom))
if pkg.root_config.settings["ROOT"] != "/":
msg.append(" for %s" % (pkg.root,))
msg.append("\n\n")
msg.append(indent)
msg.append(str(pkg))
msg.append(" conflicts with\n")
for parent, atom in parent_atoms:
if isinstance(parent,
(PackageArg, AtomArg)):
# For PackageArg and AtomArg types, it's
# redundant to display the atom attribute.
msg.append(2*indent)
msg.append(str(parent))
msg.append("\n")
else:
# Display the specific atom from SetArg or
# Package types.
atom, marker = format_unmatched_atom(
pkg, atom, self._pkg_use_enabled)
msg.append(2*indent)
msg.append("%s required by %s\n" % (atom, parent))
msg.append(2*indent)
msg.append(marker)
msg.append("\n")
msg.append("\n")
writemsg("".join(msg), noiselevel=-1)
def _show_slot_collision_notice(self):
"""Show an informational message advising the user to mask one of the
the packages. In some cases it may be possible to resolve this
automatically, but support for backtracking (removal nodes that have
already been selected) will be required in order to handle all possible
cases.
"""
if not any(self._dynamic_config._package_tracker.slot_conflicts()):
return
self._show_merge_list()
if self._dynamic_config._slot_conflict_handler is None:
self._dynamic_config._slot_conflict_handler = slot_conflict_handler(self)
handler = self._dynamic_config._slot_conflict_handler
conflict = handler.get_conflict()
writemsg(conflict, noiselevel=-1)
explanation = handler.get_explanation()
if explanation:
writemsg(explanation, noiselevel=-1)
return
if "--quiet" in self._frozen_config.myopts:
return
msg = []
msg.append("It may be possible to solve this problem ")
msg.append("by using package.mask to prevent one of ")
msg.append("those packages from being selected. ")
msg.append("However, it is also possible that conflicting ")
msg.append("dependencies exist such that they are impossible to ")
msg.append("satisfy simultaneously. If such a conflict exists in ")
msg.append("the dependencies of two different packages, then those ")
msg.append("packages can not be installed simultaneously.")
backtrack_opt = self._frozen_config.myopts.get('--backtrack')
if not self._dynamic_config._allow_backtracking and \
(backtrack_opt is None or \
(backtrack_opt > 0 and backtrack_opt < 30)):
msg.append(" You may want to try a larger value of the ")
msg.append("--backtrack option, such as --backtrack=30, ")
msg.append("in order to see if that will solve this conflict ")
msg.append("automatically.")
for line in textwrap.wrap(''.join(msg), 70):
writemsg(line + '\n', noiselevel=-1)
writemsg('\n', noiselevel=-1)
msg = []
msg.append("For more information, see MASKED PACKAGES ")
msg.append("section in the emerge man page or refer ")
msg.append("to the Gentoo Handbook.")
for line in textwrap.wrap(''.join(msg), 70):
writemsg(line + '\n', noiselevel=-1)
writemsg('\n', noiselevel=-1)
def _solve_non_slot_operator_slot_conflicts(self):
"""
This function solves slot conflicts which can
be solved by simply choosing one of the conflicting
and removing all the other ones.
It is able to solve somewhat more complex cases where
conflicts can only be solved simultaniously.
"""
debug = "--debug" in self._frozen_config.myopts
# List all conflicts. Ignore those that involve slot operator rebuilds
# as the logic there needs special slot conflict behavior which isn't
# provided by this function.
conflicts = []
for conflict in self._dynamic_config._package_tracker.slot_conflicts():
slot_key = conflict.root, conflict.atom
if slot_key not in self._dynamic_config._slot_operator_replace_installed:
conflicts.append(conflict)
if not conflicts:
return
if debug:
writemsg_level(
"\n!!! Slot conflict handler started.\n",
level=logging.DEBUG, noiselevel=-1)
# Get a set of all conflicting packages.
conflict_pkgs = set()
for conflict in conflicts:
conflict_pkgs.update(conflict)
# Get the list of other packages which are only
# required by conflict packages.
indirect_conflict_candidates = set()
for pkg in conflict_pkgs:
indirect_conflict_candidates.update(self._dynamic_config.digraph.child_nodes(pkg))
indirect_conflict_candidates.difference_update(conflict_pkgs)
indirect_conflict_pkgs = set()
while indirect_conflict_candidates:
pkg = indirect_conflict_candidates.pop()
only_conflict_parents = True
for parent, atom in self._dynamic_config._parent_atoms.get(pkg, []):
if parent not in conflict_pkgs and parent not in indirect_conflict_pkgs:
only_conflict_parents = False
break
if not only_conflict_parents:
continue
indirect_conflict_pkgs.add(pkg)
for child in self._dynamic_config.digraph.child_nodes(pkg):
if child in conflict_pkgs or child in indirect_conflict_pkgs:
continue
indirect_conflict_candidates.add(child)
# Create a graph containing the conflict packages
# and a special 'non_conflict_node' that represents
# all non-conflict packages.
conflict_graph = digraph()
non_conflict_node = "(non-conflict package)"
conflict_graph.add(non_conflict_node, None)
for pkg in chain(conflict_pkgs, indirect_conflict_pkgs):
conflict_graph.add(pkg, None)
# Add parent->child edges for each conflict package.
# Parents, which aren't conflict packages are represented
# by 'non_conflict_node'.
# If several conflicting packages are matched, but not all,
# add a tuple with the matched packages to the graph.
class or_tuple(tuple):
"""
Helper class for debug printing.
"""
def __str__(self):
return "(%s)" % ",".join(str(pkg) for pkg in self)
non_matching_forced = set()
for conflict in conflicts:
if debug:
writemsg_level(" conflict:\n", level=logging.DEBUG, noiselevel=-1)
writemsg_level(" root: %s\n" % conflict.root, level=logging.DEBUG, noiselevel=-1)
writemsg_level(" atom: %s\n" % conflict.atom, level=logging.DEBUG, noiselevel=-1)
for pkg in conflict:
writemsg_level(" pkg: %s\n" % pkg, level=logging.DEBUG, noiselevel=-1)
all_parent_atoms = set()
highest_pkg = None
inst_pkg = None
for pkg in conflict:
if pkg.installed:
inst_pkg = pkg
if highest_pkg is None or highest_pkg < pkg:
highest_pkg = pkg
all_parent_atoms.update(
self._dynamic_config._parent_atoms.get(pkg, []))
for parent, atom in all_parent_atoms:
is_arg_parent = isinstance(parent, AtomArg)
is_non_conflict_parent = parent not in conflict_pkgs and \
parent not in indirect_conflict_pkgs
if debug:
writemsg_level(" parent: %s\n" % parent, level=logging.DEBUG, noiselevel=-1)
writemsg_level(" arg, non-conflict: %s, %s\n" % (is_arg_parent, is_non_conflict_parent),
level=logging.DEBUG, noiselevel=-1)
writemsg_level(" atom: %s\n" % atom, level=logging.DEBUG, noiselevel=-1)
if is_non_conflict_parent:
parent = non_conflict_node
matched = []
for pkg in conflict:
if (pkg is highest_pkg and
not highest_pkg.installed and
inst_pkg is not None and
inst_pkg.sub_slot != highest_pkg.sub_slot and
not self._downgrade_probe(highest_pkg)):
# If an upgrade is desired, force the highest
# version into the graph (bug #531656).
non_matching_forced.add(highest_pkg)
if atom.match(pkg.with_use(
self._pkg_use_enabled(pkg))) and \
not (is_arg_parent and pkg.installed):
matched.append(pkg)
if debug:
for match in matched:
writemsg_level(" match: %s\n" % match, level=logging.DEBUG, noiselevel=-1)
if len(matched) > 1:
# Even if all packages match, this parent must still
# be added to the conflict_graph. Otherwise, we risk
# removing all of these packages from the depgraph,
# which could cause a missed update (bug #522084).
conflict_graph.add(or_tuple(matched), parent)
elif len(matched) == 1:
conflict_graph.add(matched[0], parent)
else:
# This typically means that autounmask broke a
# USE-dep, but it could also be due to the slot
# not matching due to multislot (bug #220341).
# Either way, don't try to solve this conflict.
# Instead, force them all into the graph so that
# they are protected from removal.
non_matching_forced.update(conflict)
if debug:
for pkg in conflict:
writemsg_level(" non-match: %s\n" % pkg,
level=logging.DEBUG, noiselevel=-1)
for pkg in indirect_conflict_pkgs:
for parent, atom in self._dynamic_config._parent_atoms.get(pkg, []):
if parent not in conflict_pkgs and \
parent not in indirect_conflict_pkgs:
parent = non_conflict_node
conflict_graph.add(pkg, parent)
if debug:
writemsg_level(
"\n!!! Slot conflict graph:\n",
level=logging.DEBUG, noiselevel=-1)
conflict_graph.debug_print()
# Now select required packages. Collect them in the
# 'forced' set.
forced = set([non_conflict_node])
forced.update(non_matching_forced)
unexplored = set([non_conflict_node])
# or_tuples get special handling. We first explore
# all packages in the hope of having forced one of
# the packages in the tuple. This way we don't have
# to choose one.
unexplored_tuples = set()
explored_nodes = set()
while unexplored:
# Handle all unexplored packages.
while unexplored:
node = unexplored.pop()
for child in conflict_graph.child_nodes(node):
# Don't explore a node more than once, in order
# to avoid infinite recursion. The forced set
# cannot be used for this purpose, since it can
# contain unexplored nodes from non_matching_forced.
if child in explored_nodes:
continue
explored_nodes.add(child)
forced.add(child)
if isinstance(child, Package):
unexplored.add(child)
else:
unexplored_tuples.add(child)
# Now handle unexplored or_tuples. Move on with packages
# once we had to choose one.
while unexplored_tuples:
nodes = unexplored_tuples.pop()
if any(node in forced for node in nodes):
# At least one of the packages in the
# tuple is already forced, which means the
# dependency represented by this tuple
# is satisfied.
continue
# We now have to choose one of packages in the tuple.
# In theory one could solve more conflicts if we'd be
# able to try different choices here, but that has lots
# of other problems. For now choose the package that was
# pulled first, as this should be the most desirable choice
# (otherwise it wouldn't have been the first one).
forced.add(nodes[0])
unexplored.add(nodes[0])
break
# Remove 'non_conflict_node' and or_tuples from 'forced'.
forced = set(pkg for pkg in forced if isinstance(pkg, Package))
non_forced = set(pkg for pkg in conflict_pkgs if pkg not in forced)
if debug:
writemsg_level(
"\n!!! Slot conflict solution:\n",
level=logging.DEBUG, noiselevel=-1)
for conflict in conflicts:
writemsg_level(
" Conflict: (%s, %s)\n" % (conflict.root, conflict.atom),
level=logging.DEBUG, noiselevel=-1)
for pkg in conflict:
if pkg in forced:
writemsg_level(
" keep: %s\n" % pkg,
level=logging.DEBUG, noiselevel=-1)
else:
writemsg_level(
" remove: %s\n" % pkg,
level=logging.DEBUG, noiselevel=-1)
broken_packages = set()
for pkg in non_forced:
for parent, atom in self._dynamic_config._parent_atoms.get(pkg, []):
if isinstance(parent, Package) and parent not in non_forced:
# Non-forcing set args are expected to be a parent of all
# packages in the conflict.
broken_packages.add(parent)
self._remove_pkg(pkg)
# Process the dependencies of choosen conflict packages
# again to properly account for blockers.
broken_packages.update(forced)
# Filter out broken packages which have been removed during
# recursive removal in self._remove_pkg.
broken_packages = list(pkg for pkg in broken_packages if pkg in broken_packages \
if self._dynamic_config._package_tracker.contains(pkg, installed=False))
self._dynamic_config._dep_stack.extend(broken_packages)
if broken_packages:
# Process dependencies. This cannot fail because we just ensured that
# the remaining packages satisfy all dependencies.
self._create_graph()
# Record missed updates.
for conflict in conflicts:
if not any(pkg in non_forced for pkg in conflict):
continue
for pkg in conflict:
if pkg not in non_forced:
continue
for other in conflict:
if other is pkg:
continue
for parent, atom in self._dynamic_config._parent_atoms.get(other, []):
if not atom.match(pkg.with_use(self._pkg_use_enabled(pkg))):
self._dynamic_config._conflict_missed_update[pkg].setdefault(
"slot conflict", set())
self._dynamic_config._conflict_missed_update[pkg]["slot conflict"].add(
(parent, atom))
def _process_slot_conflicts(self):
"""
If there are any slot conflicts and backtracking is enabled,
_complete_graph should complete the graph before this method
is called, so that all relevant reverse dependencies are
available for use in backtracking decisions.
"""
self._solve_non_slot_operator_slot_conflicts()
if not self._validate_blockers():
# Blockers don't trigger the _skip_restart flag, since
# backtracking may solve blockers when it solves slot
# conflicts (or by blind luck).
raise self._unknown_internal_error()
# Both _process_slot_conflict and _slot_operator_trigger_reinstalls
# can call _slot_operator_update_probe, which requires that
# self._dynamic_config._blocked_pkgs has been initialized by a
# call to the _validate_blockers method.
for conflict in self._dynamic_config._package_tracker.slot_conflicts():
self._process_slot_conflict(conflict)
if self._dynamic_config._allow_backtracking:
self._slot_operator_trigger_reinstalls()
def _process_slot_conflict(self, conflict):
"""
Process slot conflict data to identify specific atoms which
lead to conflict. These atoms only match a subset of the
packages that have been pulled into a given slot.
"""
root = conflict.root
slot_atom = conflict.atom
slot_nodes = conflict.pkgs
debug = "--debug" in self._frozen_config.myopts
slot_parent_atoms = set()
for pkg in slot_nodes:
parent_atoms = self._dynamic_config._parent_atoms.get(pkg)
if not parent_atoms:
continue
slot_parent_atoms.update(parent_atoms)
conflict_pkgs = []
conflict_atoms = {}
for pkg in slot_nodes:
if self._dynamic_config._allow_backtracking and \
pkg in self._dynamic_config._runtime_pkg_mask:
if debug:
writemsg_level(
"!!! backtracking loop detected: %s %s\n" % \
(pkg,
self._dynamic_config._runtime_pkg_mask[pkg]),
level=logging.DEBUG, noiselevel=-1)
parent_atoms = self._dynamic_config._parent_atoms.get(pkg)
if parent_atoms is None:
parent_atoms = set()
self._dynamic_config._parent_atoms[pkg] = parent_atoms
all_match = True
for parent_atom in slot_parent_atoms:
if parent_atom in parent_atoms:
continue
parent, atom = parent_atom
if atom.match(pkg.with_use(self._pkg_use_enabled(pkg))):
parent_atoms.add(parent_atom)
else:
all_match = False
conflict_atoms.setdefault(parent_atom, set()).add(pkg)
if not all_match:
conflict_pkgs.append(pkg)
if conflict_pkgs and \
self._dynamic_config._allow_backtracking and \
not self._accept_blocker_conflicts():
remaining = []
for pkg in conflict_pkgs:
if self._slot_conflict_backtrack_abi(pkg,
slot_nodes, conflict_atoms):
backtrack_infos = self._dynamic_config._backtrack_infos
config = backtrack_infos.setdefault("config", {})
config.setdefault("slot_conflict_abi", set()).add(pkg)
else:
remaining.append(pkg)
if remaining:
self._slot_confict_backtrack(root, slot_atom,
slot_parent_atoms, remaining)
def _slot_confict_backtrack(self, root, slot_atom,
all_parents, conflict_pkgs):
debug = "--debug" in self._frozen_config.myopts
existing_node = next(self._dynamic_config._package_tracker.match(
root, slot_atom, installed=False))
# In order to avoid a missed update, first mask lower versions
# that conflict with higher versions (the backtracker visits
# these in reverse order).
conflict_pkgs.sort(reverse=True)
backtrack_data = []
for to_be_masked in conflict_pkgs:
# For missed update messages, find out which
# atoms matched to_be_selected that did not
# match to_be_masked.
parent_atoms = \
self._dynamic_config._parent_atoms.get(to_be_masked, set())
conflict_atoms = set(parent_atom for parent_atom in all_parents \
if parent_atom not in parent_atoms)
backtrack_data.append((to_be_masked, conflict_atoms))
to_be_masked = backtrack_data[-1][0]
self._dynamic_config._backtrack_infos.setdefault(
"slot conflict", []).append(backtrack_data)
self._dynamic_config._need_restart = True
if debug:
msg = []
msg.append("")
msg.append("")
msg.append("backtracking due to slot conflict:")
msg.append(" first package: %s" % existing_node)
msg.append(" package to mask: %s" % to_be_masked)
msg.append(" slot: %s" % slot_atom)
msg.append(" parents: %s" % ", ".join( \
"(%s, '%s')" % (ppkg, atom) for ppkg, atom in all_parents))
msg.append("")
writemsg_level("".join("%s\n" % l for l in msg),
noiselevel=-1, level=logging.DEBUG)
def _slot_conflict_backtrack_abi(self, pkg, slot_nodes, conflict_atoms):
"""
If one or more conflict atoms have a slot/sub-slot dep that can be resolved
by rebuilding the parent package, then schedule the rebuild via
backtracking, and return True. Otherwise, return False.
"""
found_update = False
for parent_atom, conflict_pkgs in conflict_atoms.items():
parent, atom = parent_atom
if not isinstance(parent, Package):
continue
if not parent.built:
continue
if not atom.soname and not (
atom.package and atom.slot_operator_built):
continue
for other_pkg in slot_nodes:
if other_pkg in conflict_pkgs:
continue
dep = Dependency(atom=atom, child=other_pkg,
parent=parent, root=pkg.root)
new_dep = \
self._slot_operator_update_probe_slot_conflict(dep)
if new_dep is not None:
self._slot_operator_update_backtrack(dep,
new_dep=new_dep)
found_update = True
return found_update
def _slot_change_probe(self, dep):
"""
@rtype: bool
@return: True if dep.child should be rebuilt due to a change
in sub-slot (without revbump, as in bug #456208).
"""
if not (isinstance(dep.parent, Package) and \
not dep.parent.built and dep.child.built):
return None
root_config = self._frozen_config.roots[dep.root]
matches = []
try:
matches.append(self._pkg(dep.child.cpv, "ebuild",
root_config, myrepo=dep.child.repo))
except PackageNotFound:
pass
for unbuilt_child in chain(matches,
self._iter_match_pkgs(root_config, "ebuild",
Atom("=%s" % (dep.child.cpv,)))):
if unbuilt_child in self._dynamic_config._runtime_pkg_mask:
continue
if self._frozen_config.excluded_pkgs.findAtomForPackage(
unbuilt_child,
modified_use=self._pkg_use_enabled(unbuilt_child)):
continue
if not self._pkg_visibility_check(unbuilt_child):
continue
break
else:
return None
if unbuilt_child.slot == dep.child.slot and \
unbuilt_child.sub_slot == dep.child.sub_slot:
return None
return unbuilt_child
def _slot_change_backtrack(self, dep, new_child_slot):
child = dep.child
if "--debug" in self._frozen_config.myopts:
msg = []
msg.append("")
msg.append("")
msg.append("backtracking due to slot/sub-slot change:")
msg.append(" child package: %s" % child)
msg.append(" child slot: %s/%s" %
(child.slot, child.sub_slot))
msg.append(" new child: %s" % new_child_slot)
msg.append(" new child slot: %s/%s" %
(new_child_slot.slot, new_child_slot.sub_slot))
msg.append(" parent package: %s" % dep.parent)
msg.append(" atom: %s" % dep.atom)
msg.append("")
writemsg_level("\n".join(msg),
noiselevel=-1, level=logging.DEBUG)
backtrack_infos = self._dynamic_config._backtrack_infos
config = backtrack_infos.setdefault("config", {})
# mask unwanted binary packages if necessary
masks = {}
if not child.installed:
masks.setdefault(dep.child, {})["slot_operator_mask_built"] = None
if masks:
config.setdefault("slot_operator_mask_built", {}).update(masks)
# trigger replacement of installed packages if necessary
reinstalls = set()
if child.installed:
replacement_atom = self._replace_installed_atom(child)
if replacement_atom is not None:
reinstalls.add((child.root, replacement_atom))
if reinstalls:
config.setdefault("slot_operator_replace_installed",
set()).update(reinstalls)
self._dynamic_config._need_restart = True
def _slot_operator_update_backtrack(self, dep, new_child_slot=None,
new_dep=None):
if new_child_slot is None:
child = dep.child
else:
child = new_child_slot
if "--debug" in self._frozen_config.myopts:
msg = []
msg.append("")
msg.append("")
msg.append("backtracking due to missed slot abi update:")
msg.append(" child package: %s" % child)
if new_child_slot is not None:
msg.append(" new child slot package: %s" % new_child_slot)
msg.append(" parent package: %s" % dep.parent)
if new_dep is not None:
msg.append(" new parent pkg: %s" % new_dep.parent)
msg.append(" atom: %s" % dep.atom)
msg.append("")
writemsg_level("\n".join(msg),
noiselevel=-1, level=logging.DEBUG)
backtrack_infos = self._dynamic_config._backtrack_infos
config = backtrack_infos.setdefault("config", {})
# mask unwanted binary packages if necessary
abi_masks = {}
if new_child_slot is None:
if not child.installed:
abi_masks.setdefault(child, {})["slot_operator_mask_built"] = None
if not dep.parent.installed:
abi_masks.setdefault(dep.parent, {})["slot_operator_mask_built"] = None
if abi_masks:
config.setdefault("slot_operator_mask_built", {}).update(abi_masks)
# trigger replacement of installed packages if necessary
abi_reinstalls = set()
if dep.parent.installed:
if new_dep is not None:
replacement_atom = new_dep.parent.slot_atom
else:
replacement_atom = self._replace_installed_atom(dep.parent)
if replacement_atom is not None:
abi_reinstalls.add((dep.parent.root, replacement_atom))
if new_child_slot is None and child.installed:
replacement_atom = self._replace_installed_atom(child)
if replacement_atom is not None:
abi_reinstalls.add((child.root, replacement_atom))
if abi_reinstalls:
config.setdefault("slot_operator_replace_installed",
set()).update(abi_reinstalls)
self._dynamic_config._need_restart = True
def _slot_operator_update_probe_slot_conflict(self, dep):
new_dep = self._slot_operator_update_probe(dep, slot_conflict=True)
if new_dep is not None:
return new_dep
if self._dynamic_config._autounmask is True:
for autounmask_level in self._autounmask_levels():
new_dep = self._slot_operator_update_probe(dep,
slot_conflict=True, autounmask_level=autounmask_level)
if new_dep is not None:
return new_dep
return None
def _slot_operator_update_probe(self, dep, new_child_slot=False,
slot_conflict=False, autounmask_level=None):
"""
slot/sub-slot := operators tend to prevent updates from getting pulled in,
since installed packages pull in packages with the slot/sub-slot that they
were built against. Detect this case so that we can schedule rebuilds
and reinstalls when appropriate.
NOTE: This function only searches for updates that involve upgrades
to higher versions, since the logic required to detect when a
downgrade would be desirable is not implemented.
"""
if dep.child.installed and \
self._frozen_config.excluded_pkgs.findAtomForPackage(dep.child,
modified_use=self._pkg_use_enabled(dep.child)):
return None
if dep.parent.installed and \
self._frozen_config.excluded_pkgs.findAtomForPackage(dep.parent,
modified_use=self._pkg_use_enabled(dep.parent)):
return None
debug = "--debug" in self._frozen_config.myopts
selective = "selective" in self._dynamic_config.myparams
want_downgrade = None
want_downgrade_parent = None
def check_reverse_dependencies(existing_pkg, candidate_pkg,
replacement_parent=None):
"""
Check if candidate_pkg satisfies all of existing_pkg's non-
slot operator parents.
"""
built_slot_operator_parents = set()
for parent, atom in self._dynamic_config._parent_atoms.get(existing_pkg, []):
if atom.soname or atom.slot_operator_built:
built_slot_operator_parents.add(parent)
for parent, atom in self._dynamic_config._parent_atoms.get(existing_pkg, []):
if isinstance(parent, Package):
if parent in built_slot_operator_parents:
# This parent may need to be rebuilt, so its
# dependencies aren't necessarily relevant.
continue
if replacement_parent is not None and \
(replacement_parent.slot_atom == parent.slot_atom
or replacement_parent.cpv == parent.cpv):
# This parent is irrelevant because we intend to
# replace it with replacement_parent.
continue
if any(pkg is not parent and
(pkg.slot_atom == parent.slot_atom or
pkg.cpv == parent.cpv) for pkg in
self._dynamic_config._package_tracker.match(
parent.root, Atom(parent.cp))):
# This parent may need to be eliminated due to a
# slot conflict, so its dependencies aren't
# necessarily relevant.
continue
if (not self._too_deep(parent.depth) and
not self._frozen_config.excluded_pkgs.
findAtomForPackage(parent,
modified_use=self._pkg_use_enabled(parent))):
# Check for common reasons that the parent's
# dependency might be irrelevant.
if self._upgrade_available(parent):
# This parent could be replaced by
# an upgrade (bug 584626).
continue
if parent.installed and self._in_blocker_conflict(parent):
# This parent could be uninstalled in order
# to solve a blocker conflict (bug 612772).
continue
if self._dynamic_config.digraph.has_edge(parent,
existing_pkg):
# There is a direct circular dependency between
# parent and existing_pkg. This type of
# relationship tends to prevent updates
# of packages (bug 612874). Since candidate_pkg
# is available, we risk a missed update if we
# don't try to eliminate this parent from the
# graph. Therefore, we give candidate_pkg a
# chance, and assume that it will be masked
# by backtracking if necessary.
continue
atom_set = InternalPackageSet(initial_atoms=(atom,),
allow_repo=True)
if not atom_set.findAtomForPackage(candidate_pkg,
modified_use=self._pkg_use_enabled(candidate_pkg)):
return False
return True
for replacement_parent in self._iter_similar_available(dep.parent,
dep.parent.slot_atom, autounmask_level=autounmask_level):
if replacement_parent is dep.parent:
continue
if replacement_parent < dep.parent:
if want_downgrade_parent is None:
want_downgrade_parent = self._downgrade_probe(
dep.parent)
if not want_downgrade_parent:
continue
if not check_reverse_dependencies(dep.parent, replacement_parent):
continue
selected_atoms = None
try:
atoms = self._flatten_atoms(replacement_parent,
self._pkg_use_enabled(replacement_parent))
except InvalidDependString:
continue
if replacement_parent.requires is not None:
atoms = list(atoms)
atoms.extend(replacement_parent.requires)
# List of list of child,atom pairs for each atom.
replacement_candidates = []
# Set of all packages all atoms can agree on.
all_candidate_pkgs = None
for atom in atoms:
# The _select_atoms_probe method is expensive, so initialization
# of this variable is only performed on demand.
atom_not_selected = None
if not atom.package:
unevaluated_atom = None
if atom.match(dep.child):
# We are searching for a replacement_parent
# atom that will pull in a different child,
# so continue checking the rest of the atoms.
continue
else:
if atom.blocker or \
atom.cp != dep.child.cp:
continue
# Discard USE deps, we're only searching for an
# approximate pattern, and dealing with USE states
# is too complex for this purpose.
unevaluated_atom = atom.unevaluated_atom
atom = atom.without_use
if replacement_parent.built and \
portage.dep._match_slot(atom, dep.child):
# We are searching for a replacement_parent
# atom that will pull in a different child,
# so continue checking the rest of the atoms.
continue
candidate_pkg_atoms = []
candidate_pkgs = []
for pkg in self._iter_similar_available(
dep.child, atom):
if (dep.atom.package and
pkg.slot == dep.child.slot and
pkg.sub_slot == dep.child.sub_slot):
# If slot/sub-slot is identical, then there's
# no point in updating.
continue
if new_child_slot:
if pkg.slot == dep.child.slot:
continue
if pkg < dep.child:
# the new slot only matters if the
# package version is higher
continue
else:
if pkg.slot != dep.child.slot:
continue
if pkg < dep.child:
if want_downgrade is None:
want_downgrade = self._downgrade_probe(dep.child)
# be careful not to trigger a rebuild when
# the only version available with a
# different slot_operator is an older version
if not want_downgrade:
continue
if pkg.version == dep.child.version and not dep.child.built:
continue
insignificant = False
if not slot_conflict and \
selective and \
dep.parent.installed and \
dep.child.installed and \
dep.parent >= replacement_parent and \
dep.child.cpv == pkg.cpv:
# Then can happen if the child's sub-slot changed
# without a revision bump. The sub-slot change is
# considered insignificant until one of its parent
# packages needs to be rebuilt (which may trigger a
# slot conflict).
insignificant = True
if (not insignificant and
unevaluated_atom is not None):
# Evaluate USE conditionals and || deps, in order
# to see if this atom is really desirable, since
# otherwise we may trigger an undesirable rebuild
# as in bug #460304.
if selected_atoms is None:
selected_atoms = self._select_atoms_probe(
dep.child.root, replacement_parent)
atom_not_selected = unevaluated_atom not in selected_atoms
if atom_not_selected:
break
if not insignificant and \
check_reverse_dependencies(dep.child, pkg,
replacement_parent=replacement_parent):
candidate_pkg_atoms.append(
(pkg, unevaluated_atom or atom))
candidate_pkgs.append(pkg)
# When unevaluated_atom is None, it means that atom is
# an soname atom which is unconditionally selected, and
# _select_atoms_probe is not applicable.
if atom_not_selected is None and unevaluated_atom is not None:
if selected_atoms is None:
selected_atoms = self._select_atoms_probe(
dep.child.root, replacement_parent)
atom_not_selected = unevaluated_atom not in selected_atoms
if atom_not_selected:
continue
replacement_candidates.append(candidate_pkg_atoms)
if all_candidate_pkgs is None:
all_candidate_pkgs = set(candidate_pkgs)
else:
all_candidate_pkgs.intersection_update(candidate_pkgs)
if not all_candidate_pkgs:
# If the atoms that connect parent and child can't agree on
# any replacement child, we can't do anything.
continue
# Now select one of the pkgs as replacement. This is as easy as
# selecting the highest version.
# The more complicated part is to choose an atom for the
# new Dependency object. Choose the one which ranked the selected
# parent highest.
selected = None
for candidate_pkg_atoms in replacement_candidates:
for i, (pkg, atom) in enumerate(candidate_pkg_atoms):
if pkg not in all_candidate_pkgs:
continue
if selected is None or \
selected[0] < pkg or \
(selected[0] is pkg and i < selected[2]):
selected = (pkg, atom, i)
if debug:
msg = []
msg.append("")
msg.append("")
msg.append("slot_operator_update_probe:")
msg.append(" existing child package: %s" % dep.child)
msg.append(" existing parent package: %s" % dep.parent)
msg.append(" new child package: %s" % selected[0])
msg.append(" new parent package: %s" % replacement_parent)
msg.append("")
writemsg_level("\n".join(msg),
noiselevel=-1, level=logging.DEBUG)
return Dependency(parent=replacement_parent,
child=selected[0], atom=selected[1])
if debug:
msg = []
msg.append("")
msg.append("")
msg.append("slot_operator_update_probe:")
msg.append(" existing child package: %s" % dep.child)
msg.append(" existing parent package: %s" % dep.parent)
msg.append(" new child package: %s" % None)
msg.append(" new parent package: %s" % None)
msg.append("")
writemsg_level("\n".join(msg),
noiselevel=-1, level=logging.DEBUG)
return None
def _slot_operator_unsatisfied_probe(self, dep):
if dep.parent.installed and \
self._frozen_config.excluded_pkgs.findAtomForPackage(dep.parent,
modified_use=self._pkg_use_enabled(dep.parent)):
return False
debug = "--debug" in self._frozen_config.myopts
for replacement_parent in self._iter_similar_available(dep.parent,
dep.parent.slot_atom):
for atom in replacement_parent.validated_atoms:
if not atom.slot_operator == "=" or \
atom.blocker or \
atom.cp != dep.atom.cp:
continue
# Discard USE deps, we're only searching for an approximate
# pattern, and dealing with USE states is too complex for
# this purpose.
atom = atom.without_use
pkg, existing_node = self._select_package(dep.root, atom,
onlydeps=dep.onlydeps)
if pkg is not None:
if debug:
msg = []
msg.append("")
msg.append("")
msg.append("slot_operator_unsatisfied_probe:")
msg.append(" existing parent package: %s" % dep.parent)
msg.append(" existing parent atom: %s" % dep.atom)
msg.append(" new parent package: %s" % replacement_parent)
msg.append(" new child package: %s" % pkg)
msg.append("")
writemsg_level("\n".join(msg),
noiselevel=-1, level=logging.DEBUG)
return True
if debug:
msg = []
msg.append("")
msg.append("")
msg.append("slot_operator_unsatisfied_probe:")
msg.append(" existing parent package: %s" % dep.parent)
msg.append(" existing parent atom: %s" % dep.atom)
msg.append(" new parent package: %s" % None)
msg.append(" new child package: %s" % None)
msg.append("")
writemsg_level("\n".join(msg),
noiselevel=-1, level=logging.DEBUG)
return False
def _slot_operator_unsatisfied_backtrack(self, dep):
parent = dep.parent
if "--debug" in self._frozen_config.myopts:
msg = []
msg.append("")
msg.append("")
msg.append("backtracking due to unsatisfied "
"built slot-operator dep:")
msg.append(" parent package: %s" % parent)
msg.append(" atom: %s" % dep.atom)
msg.append("")
writemsg_level("\n".join(msg),
noiselevel=-1, level=logging.DEBUG)
backtrack_infos = self._dynamic_config._backtrack_infos
config = backtrack_infos.setdefault("config", {})
# mask unwanted binary packages if necessary
masks = {}
if not parent.installed:
masks.setdefault(parent, {})["slot_operator_mask_built"] = None
if masks:
config.setdefault("slot_operator_mask_built", {}).update(masks)
# trigger replacement of installed packages if necessary
reinstalls = set()
if parent.installed:
replacement_atom = self._replace_installed_atom(parent)
if replacement_atom is not None:
reinstalls.add((parent.root, replacement_atom))
if reinstalls:
config.setdefault("slot_operator_replace_installed",
set()).update(reinstalls)
self._dynamic_config._need_restart = True
def _in_blocker_conflict(self, pkg):
"""
Check if pkg is involved in a blocker conflict. This method
only works after the _validate_blockers method has been called.
"""
if (self._dynamic_config._blocked_pkgs is None
and not self._validate_blockers()):
raise self._unknown_internal_error()
if pkg in self._dynamic_config._blocked_pkgs:
return True
if pkg in self._dynamic_config._blocker_parents:
return True
return False
def _upgrade_available(self, pkg):
"""
Detect cases where an upgrade of the given package is available
within the same slot.
"""
for available_pkg in self._iter_similar_available(pkg,
pkg.slot_atom):
if available_pkg > pkg:
return True
return False
def _downgrade_probe(self, pkg):
"""
Detect cases where a downgrade of the given package is considered
desirable due to the current version being masked or unavailable.
"""
available_pkg = None
for available_pkg in self._iter_similar_available(pkg,
pkg.slot_atom):
if available_pkg >= pkg:
# There's an available package of the same or higher
# version, so downgrade seems undesirable.
return False
return available_pkg is not None
def _select_atoms_probe(self, root, pkg):
selected_atoms = []
use = self._pkg_use_enabled(pkg)
for k in pkg._dep_keys:
v = pkg._metadata.get(k)
if not v:
continue
selected_atoms.extend(self._select_atoms(
root, v, myuse=use, parent=pkg)[pkg])
return frozenset(x.unevaluated_atom for
x in selected_atoms)
def _flatten_atoms(self, pkg, use):
"""
Evaluate all dependency atoms of the given package, and return
them as a frozenset. For performance, results are cached.
@param pkg: a Package instance
@type pkg: Package
@param pkg: set of enabled USE flags
@type pkg: frozenset
@rtype: frozenset
@return: set of evaluated atoms
"""
cache_key = (pkg, use)
try:
return self._dynamic_config._flatten_atoms_cache[cache_key]
except KeyError:
pass
atoms = []
for dep_key in pkg._dep_keys:
dep_string = pkg._metadata[dep_key]
if not dep_string:
continue
dep_string = portage.dep.use_reduce(
dep_string, uselist=use,
is_valid_flag=pkg.iuse.is_valid_flag,
flat=True, token_class=Atom, eapi=pkg.eapi)
atoms.extend(token for token in dep_string
if isinstance(token, Atom))
atoms = frozenset(atoms)
self._dynamic_config._flatten_atoms_cache[cache_key] = atoms
return atoms
def _iter_similar_available(self, graph_pkg, atom, autounmask_level=None):
"""
Given a package that's in the graph, do a rough check to
see if a similar package is available to install. The given
graph_pkg itself may be yielded only if it's not installed.
"""
usepkgonly = "--usepkgonly" in self._frozen_config.myopts
useoldpkg_atoms = self._frozen_config.useoldpkg_atoms
use_ebuild_visibility = self._frozen_config.myopts.get(
'--use-ebuild-visibility', 'n') != 'n'
for pkg in self._iter_match_pkgs_any(
graph_pkg.root_config, atom):
if pkg.cp != graph_pkg.cp:
# discard old-style virtual match
continue
if pkg.installed:
continue
if pkg in self._dynamic_config._runtime_pkg_mask:
continue
if self._frozen_config.excluded_pkgs.findAtomForPackage(pkg,
modified_use=self._pkg_use_enabled(pkg)):
continue
if pkg.built:
if self._equiv_binary_installed(pkg):
continue
if not (not use_ebuild_visibility and
(usepkgonly or useoldpkg_atoms.findAtomForPackage(
pkg, modified_use=self._pkg_use_enabled(pkg)))) and \
not self._equiv_ebuild_visible(pkg,
autounmask_level=autounmask_level):
continue
if not self._pkg_visibility_check(pkg,
autounmask_level=autounmask_level):
continue
yield pkg
def _replace_installed_atom(self, inst_pkg):
"""
Given an installed package, generate an atom suitable for
slot_operator_replace_installed backtracking info. The replacement
SLOT may differ from the installed SLOT, so first search by cpv.
"""
built_pkgs = []
for pkg in self._iter_similar_available(inst_pkg,
Atom("=%s" % inst_pkg.cpv)):
if not pkg.built:
return pkg.slot_atom
elif not pkg.installed:
# avoid using SLOT from a built instance
built_pkgs.append(pkg)
for pkg in self._iter_similar_available(inst_pkg, inst_pkg.slot_atom):
if not pkg.built:
return pkg.slot_atom
elif not pkg.installed:
# avoid using SLOT from a built instance
built_pkgs.append(pkg)
if built_pkgs:
best_version = None
for pkg in built_pkgs:
if best_version is None or pkg > best_version:
best_version = pkg
return best_version.slot_atom
return None
def _slot_operator_trigger_reinstalls(self):
"""
Search for packages with slot-operator deps on older slots, and schedule
rebuilds if they can link to a newer slot that's in the graph.
"""
rebuild_if_new_slot = self._dynamic_config.myparams.get(
"rebuild_if_new_slot", "y") == "y"
for slot_key, slot_info in self._dynamic_config._slot_operator_deps.items():
for dep in slot_info:
atom = dep.atom
if not (atom.soname or atom.slot_operator_built):
new_child_slot = self._slot_change_probe(dep)
if new_child_slot is not None:
self._slot_change_backtrack(dep, new_child_slot)
continue
if not (dep.parent and
isinstance(dep.parent, Package) and dep.parent.built):
continue
# Check for slot update first, since we don't want to
# trigger reinstall of the child package when a newer
# slot will be used instead.
if rebuild_if_new_slot and dep.want_update:
new_dep = self._slot_operator_update_probe(dep,
new_child_slot=True)
if new_dep is not None:
self._slot_operator_update_backtrack(dep,
new_child_slot=new_dep.child)
if dep.want_update:
if self._slot_operator_update_probe(dep):
self._slot_operator_update_backtrack(dep)
def _reinstall_for_flags(self, pkg, forced_flags,
orig_use, orig_iuse, cur_use, cur_iuse):
"""Return a set of flags that trigger reinstallation, or None if there
are no such flags."""
# binpkg_respect_use: Behave like newuse by default. If newuse is
# False and changed_use is True, then behave like changed_use.
binpkg_respect_use = (pkg.built and
self._dynamic_config.myparams.get("binpkg_respect_use")
in ("y", "auto"))
newuse = "--newuse" in self._frozen_config.myopts
changed_use = "changed-use" == self._frozen_config.myopts.get("--reinstall")
feature_flags = _get_feature_flags(
_get_eapi_attrs(pkg.eapi))
if newuse or (binpkg_respect_use and not changed_use):
flags = set(orig_iuse.symmetric_difference(
cur_iuse).difference(forced_flags))
flags.update(orig_iuse.intersection(orig_use).symmetric_difference(
cur_iuse.intersection(cur_use)))
flags.difference_update(feature_flags)
if flags:
return flags
elif changed_use or binpkg_respect_use:
flags = set(orig_iuse.intersection(orig_use).symmetric_difference(
cur_iuse.intersection(cur_use)))
flags.difference_update(feature_flags)
if flags:
return flags
return None
def _changed_deps(self, pkg):
ebuild = None
try:
ebuild = self._pkg(pkg.cpv, "ebuild",
pkg.root_config, myrepo=pkg.repo)
except PackageNotFound:
# Use first available instance of the same version.
for ebuild in self._iter_match_pkgs(
pkg.root_config, "ebuild", Atom("=" + pkg.cpv)):
break
if ebuild is None:
changed = False
else:
if self._dynamic_config.myparams.get("bdeps") in ("y", "auto"):
depvars = Package._dep_keys
else:
depvars = Package._runtime_keys
# Use _raw_metadata, in order to avoid interaction
# with --dynamic-deps.
try:
built_deps = []
for k in depvars:
dep_struct = portage.dep.use_reduce(
pkg._raw_metadata[k], uselist=pkg.use.enabled,
eapi=pkg.eapi, token_class=Atom)
strip_slots(dep_struct)
built_deps.append(dep_struct)
except InvalidDependString:
changed = True
else:
unbuilt_deps = []
for k in depvars:
dep_struct = portage.dep.use_reduce(
ebuild._raw_metadata[k],
uselist=pkg.use.enabled,
eapi=ebuild.eapi, token_class=Atom)
strip_slots(dep_struct)
unbuilt_deps.append(dep_struct)
changed = built_deps != unbuilt_deps
return changed
def _create_graph(self, allow_unsatisfied=False):
dep_stack = self._dynamic_config._dep_stack
dep_disjunctive_stack = self._dynamic_config._dep_disjunctive_stack
while dep_stack or dep_disjunctive_stack:
self._spinner_update()
while dep_stack:
dep = dep_stack.pop()
if isinstance(dep, Package):
if not self._add_pkg_deps(dep,
allow_unsatisfied=allow_unsatisfied):
return 0
continue
if not self._add_dep(dep, allow_unsatisfied=allow_unsatisfied):
return 0
if dep_disjunctive_stack:
if not self._pop_disjunction(allow_unsatisfied):
return 0
return 1
def _expand_set_args(self, input_args, add_to_digraph=False):
"""
Iterate over a list of DependencyArg instances and yield all
instances given in the input together with additional SetArg
instances that are generated from nested sets.
@param input_args: An iterable of DependencyArg instances
@type input_args: Iterable
@param add_to_digraph: If True then add SetArg instances
to the digraph, in order to record parent -> child
relationships from nested sets
@type add_to_digraph: Boolean
@rtype: Iterable
@return: All args given in the input together with additional
SetArg instances that are generated from nested sets
"""
traversed_set_args = set()
for arg in input_args:
if not isinstance(arg, SetArg):
yield arg
continue
root_config = arg.root_config
depgraph_sets = self._dynamic_config.sets[root_config.root]
arg_stack = [arg]
while arg_stack:
arg = arg_stack.pop()
if arg in traversed_set_args:
continue
# If a node with the same hash already exists in
# the digraph, preserve the existing instance which
# may have a different reset_depth attribute
# (distiguishes user arguments from sets added for
# another reason such as complete mode).
arg = self._dynamic_config.digraph.get(arg, arg)
traversed_set_args.add(arg)
if add_to_digraph:
self._dynamic_config.digraph.add(arg, None,
priority=BlockerDepPriority.instance)
yield arg
# Traverse nested sets and add them to the stack
# if they're not already in the graph. Also, graph
# edges between parent and nested sets.
for token in arg.pset.getNonAtoms():
if not token.startswith(SETPREFIX):
continue
s = token[len(SETPREFIX):]
nested_set = depgraph_sets.sets.get(s)
if nested_set is None:
nested_set = root_config.sets.get(s)
if nested_set is not None:
# Propagate the reset_depth attribute from
# parent set to nested set.
nested_arg = SetArg(arg=token, pset=nested_set,
reset_depth=arg.reset_depth,
root_config=root_config)
# Preserve instances already in the graph (same
# reason as for the "arg" variable above).
nested_arg = self._dynamic_config.digraph.get(
nested_arg, nested_arg)
arg_stack.append(nested_arg)
if add_to_digraph:
self._dynamic_config.digraph.add(nested_arg, arg,
priority=BlockerDepPriority.instance)
depgraph_sets.sets[nested_arg.name] = nested_arg.pset
def _add_dep(self, dep, allow_unsatisfied=False):
debug = "--debug" in self._frozen_config.myopts
buildpkgonly = "--buildpkgonly" in self._frozen_config.myopts
nodeps = "--nodeps" in self._frozen_config.myopts
if dep.blocker:
# Slot collision nodes are not allowed to block other packages since
# blocker validation is only able to account for one package per slot.
is_slot_conflict_parent = any(dep.parent in conflict.pkgs[1:] for conflict in \
self._dynamic_config._package_tracker.slot_conflicts())
if not buildpkgonly and \
not nodeps and \
not dep.collapsed_priority.ignored and \
not dep.collapsed_priority.optional and \
not is_slot_conflict_parent:
if dep.parent.onlydeps:
# It's safe to ignore blockers if the
# parent is an --onlydeps node.
return 1
# The blocker applies to the root where
# the parent is or will be installed.
blocker = Blocker(atom=dep.atom,
eapi=dep.parent.eapi,
priority=dep.priority, root=dep.parent.root)
self._dynamic_config._blocker_parents.add(blocker, dep.parent)
return 1
if dep.child is None:
dep_pkg, existing_node = self._select_package(dep.root, dep.atom,
onlydeps=dep.onlydeps)
else:
# The caller has selected a specific package
# via self._minimize_packages().
dep_pkg = dep.child
existing_node = next(self._dynamic_config._package_tracker.match(
dep.root, dep_pkg.slot_atom, installed=False), None)
if not dep_pkg:
if (dep.collapsed_priority.optional or
dep.collapsed_priority.ignored):
# This is an unnecessary build-time dep.
return 1
# NOTE: For removal actions, allow_unsatisfied is always
# True since all existing removal actions traverse all
# installed deps deeply via the _complete_graph method,
# which calls _create_graph with allow_unsatisfied = True.
if allow_unsatisfied:
self._dynamic_config._unsatisfied_deps.append(dep)
return 1
# The following case occurs when
# _solve_non_slot_operator_slot_conflicts calls
# _create_graph. In this case, ignore unsatisfied deps for
# installed packages only if their depth is beyond the depth
# requested by the user and the dep was initially
# unsatisfied (not broken by a slot conflict in the current
# graph). See bug #520950.
# NOTE: The value of dep.parent.depth is guaranteed to be
# either an integer or _UNREACHABLE_DEPTH, where
# _UNREACHABLE_DEPTH indicates that the parent has been
# pulled in by the _complete_graph method (rather than by
# explicit arguments or their deep dependencies). These
# cases must be distinguished because depth is meaningless
# for packages that are not reachable as deep dependencies
# of arguments.
if (self._dynamic_config._complete_mode and
isinstance(dep.parent, Package) and
dep.parent.installed and
(dep.parent.depth is self._UNREACHABLE_DEPTH or
(self._frozen_config.requested_depth is not True and
dep.parent.depth >= self._frozen_config.requested_depth))):
inst_pkg, in_graph = \
self._select_pkg_from_installed(dep.root, dep.atom)
if inst_pkg is None:
self._dynamic_config._initially_unsatisfied_deps.append(dep)
return 1
self._dynamic_config._unsatisfied_deps_for_display.append(
((dep.root, dep.atom), {"myparent":dep.parent}))
# The parent node should not already be in
# runtime_pkg_mask, since that would trigger an
# infinite backtracking loop.
if self._dynamic_config._allow_backtracking:
if (dep.parent not in self._dynamic_config._runtime_pkg_mask and
dep.atom.package and dep.atom.slot_operator_built and
self._slot_operator_unsatisfied_probe(dep)):
self._slot_operator_unsatisfied_backtrack(dep)
return 1
else:
# This is for backward-compatibility with previous
# behavior, so that installed packages with unsatisfied
# dependencies trigger an error message but do not
# cause the dependency calculation to fail. Only do
# this if the parent is already in the runtime package
# mask, since otherwise we need to backtrack.
if (dep.parent.installed and
dep.parent in self._dynamic_config._runtime_pkg_mask and
not any(self._iter_match_pkgs_any(
dep.parent.root_config, dep.atom))):
self._dynamic_config._initially_unsatisfied_deps.append(dep)
return 1
# Do not backtrack if only USE have to be changed in
# order to satisfy the dependency. Note that when
# want_restart_for_use_change sets the need_restart
# flag, it causes _select_pkg_highest_available to
# return None, and eventually we come through here
# and skip the "missing dependency" backtracking path.
dep_pkg, existing_node = \
self._select_package(dep.root,
dep.atom.without_use if dep.atom.package
else dep.atom, onlydeps=dep.onlydeps)
if dep_pkg is None:
self._dynamic_config._backtrack_infos["missing dependency"] = dep
self._dynamic_config._need_restart = True
if debug:
msg = []
msg.append("")
msg.append("")
msg.append("backtracking due to unsatisfied dep:")
msg.append(" parent: %s" % dep.parent)
msg.append(" priority: %s" % dep.priority)
msg.append(" root: %s" % dep.root)
msg.append(" atom: %s" % dep.atom)
msg.append("")
writemsg_level("".join("%s\n" % l for l in msg),
noiselevel=-1, level=logging.DEBUG)
return 0
self._rebuild.add(dep_pkg, dep)
ignore = dep.collapsed_priority.ignored and \
not self._dynamic_config._traverse_ignored_deps
if not ignore and not self._add_pkg(dep_pkg, dep):
return 0
return 1
def _check_slot_conflict(self, pkg, atom):
existing_node = next(self._dynamic_config._package_tracker.match(
pkg.root, pkg.slot_atom, installed=False), None)
matches = None
if existing_node:
matches = pkg.cpv == existing_node.cpv
if pkg != existing_node and \
atom is not None:
matches = atom.match(existing_node.with_use(
self._pkg_use_enabled(existing_node)))
return (existing_node, matches)
def _add_pkg(self, pkg, dep):
"""
Adds a package to the depgraph, queues dependencies, and handles
slot conflicts.
"""
debug = "--debug" in self._frozen_config.myopts
myparent = None
priority = None
depth = 0
if dep is None:
dep = Dependency()
else:
myparent = dep.parent
priority = dep.priority
depth = dep.depth
if priority is None:
priority = DepPriority()
if debug:
writemsg_level(
"\n%s%s %s\n" % ("Child:".ljust(15), pkg,
pkg_use_display(pkg, self._frozen_config.myopts,
modified_use=self._pkg_use_enabled(pkg))),
level=logging.DEBUG, noiselevel=-1)
if isinstance(myparent,
(PackageArg, AtomArg)):
# For PackageArg and AtomArg types, it's
# redundant to display the atom attribute.
writemsg_level(
"%s%s\n" % ("Parent Dep:".ljust(15), myparent),
level=logging.DEBUG, noiselevel=-1)
else:
# Display the specific atom from SetArg or
# Package types.
uneval = ""
if (dep.atom and dep.atom.package and
dep.atom is not dep.atom.unevaluated_atom):
uneval = " (%s)" % (dep.atom.unevaluated_atom,)
writemsg_level(
"%s%s%s required by %s\n" %
("Parent Dep:".ljust(15), dep.atom, uneval, myparent),
level=logging.DEBUG, noiselevel=-1)
# Ensure that the dependencies of the same package
# are never processed more than once.
previously_added = pkg in self._dynamic_config.digraph
pkgsettings = self._frozen_config.pkgsettings[pkg.root]
arg_atoms = None
if True:
try:
arg_atoms = list(self._iter_atoms_for_pkg(pkg))
except portage.exception.InvalidDependString as e:
if not pkg.installed:
# should have been masked before it was selected
raise
del e
# NOTE: REQUIRED_USE checks are delayed until after
# package selection, since we want to prompt the user
# for USE adjustment rather than have REQUIRED_USE
# affect package selection and || dep choices.
if not pkg.built and pkg._metadata.get("REQUIRED_USE") and \
eapi_has_required_use(pkg.eapi):
required_use_is_sat = check_required_use(
pkg._metadata["REQUIRED_USE"],
self._pkg_use_enabled(pkg),
pkg.iuse.is_valid_flag,
eapi=pkg.eapi)
if not required_use_is_sat:
if dep.atom is not None and dep.parent is not None:
self._add_parent_atom(pkg, (dep.parent, dep.atom))
if arg_atoms:
for parent_atom in arg_atoms:
parent, atom = parent_atom
self._add_parent_atom(pkg, parent_atom)
atom = dep.atom
if atom is None:
atom = Atom("=" + pkg.cpv)
self._dynamic_config._unsatisfied_deps_for_display.append(
((pkg.root, atom),
{"myparent" : dep.parent, "show_req_use" : pkg}))
self._dynamic_config._required_use_unsatisfied = True
self._dynamic_config._skip_restart = True
return 0
if not pkg.onlydeps:
existing_node, existing_node_matches = \
self._check_slot_conflict(pkg, dep.atom)
if existing_node:
if existing_node_matches:
# The existing node can be reused.
if pkg != existing_node:
pkg = existing_node
previously_added = True
try:
arg_atoms = list(self._iter_atoms_for_pkg(pkg))
except InvalidDependString as e:
if not pkg.installed:
# should have been masked before
# it was selected
raise
if debug:
writemsg_level(
"%s%s %s\n" % ("Re-used Child:".ljust(15),
pkg, pkg_use_display(pkg,
self._frozen_config.myopts,
modified_use=self._pkg_use_enabled(pkg))),
level=logging.DEBUG, noiselevel=-1)
else:
if debug:
writemsg_level(
"%s%s %s\n" % ("Slot Conflict:".ljust(15),
existing_node, pkg_use_display(existing_node,
self._frozen_config.myopts,
modified_use=self._pkg_use_enabled(existing_node))),
level=logging.DEBUG, noiselevel=-1)
if not previously_added:
self._dynamic_config._package_tracker.add_pkg(pkg)
self._dynamic_config._filtered_trees[pkg.root]["porttree"].dbapi._clear_cache()
self._check_masks(pkg)
self._prune_highest_pkg_cache(pkg)
if not pkg.installed:
# Allow this package to satisfy old-style virtuals in case it
# doesn't already. Any pre-existing providers will be preferred
# over this one.
try:
pkgsettings.setinst(pkg.cpv, pkg._metadata)
# For consistency, also update the global virtuals.
settings = self._frozen_config.roots[pkg.root].settings
settings.unlock()
settings.setinst(pkg.cpv, pkg._metadata)
settings.lock()
except portage.exception.InvalidDependString:
if not pkg.installed:
# should have been masked before it was selected
raise
if arg_atoms:
self._dynamic_config._set_nodes.add(pkg)
# Do this even for onlydeps, so that the
# parent/child relationship is always known in case
# self._show_slot_collision_notice() needs to be called later.
# If a direct circular dependency is not an unsatisfied
# buildtime dependency then drop it here since otherwise
# it can skew the merge order calculation in an unwanted
# way.
if pkg != dep.parent or \
(priority.buildtime and not priority.satisfied):
self._dynamic_config.digraph.add(pkg,
dep.parent, priority=priority)
if dep.atom is not None and dep.parent is not None:
self._add_parent_atom(pkg, (dep.parent, dep.atom))
if arg_atoms:
for parent_atom in arg_atoms:
parent, atom = parent_atom
self._dynamic_config.digraph.add(pkg, parent, priority=priority)
self._add_parent_atom(pkg, parent_atom)
# This section determines whether we go deeper into dependencies or not.
# We want to go deeper on a few occasions:
# Installing package A, we need to make sure package A's deps are met.
# emerge --deep <pkgspec>; we need to recursively check dependencies of pkgspec
# If we are in --nodeps (no recursion) mode, we obviously only check 1 level of dependencies.
if arg_atoms and depth != 0:
for parent, atom in arg_atoms:
if parent.reset_depth:
depth = 0
break
if previously_added and depth != 0 and \
isinstance(pkg.depth, int):
# Use pkg.depth if it is less than depth.
if isinstance(depth, int):
depth = min(pkg.depth, depth)
else:
# depth is _UNREACHABLE_DEPTH and pkg.depth is
# an int, so use the int because it's considered
# to be less than _UNREACHABLE_DEPTH.
depth = pkg.depth
pkg.depth = depth
deep = self._dynamic_config.myparams.get("deep", 0)
update = "--update" in self._frozen_config.myopts
dep.want_update = (not self._dynamic_config._complete_mode and
(arg_atoms or update) and
not self._too_deep(depth))
dep.child = pkg
if not pkg.onlydeps and dep.atom and (
dep.atom.soname or
dep.atom.slot_operator == "="):
self._add_slot_operator_dep(dep)
recurse = (deep is True or
not self._too_deep(self._depth_increment(depth, n=1)))
dep_stack = self._dynamic_config._dep_stack
if "recurse" not in self._dynamic_config.myparams:
return 1
elif pkg.installed and not recurse:
dep_stack = self._dynamic_config._ignored_deps
self._spinner_update()
if not previously_added:
dep_stack.append(pkg)
return 1
def _add_installed_sonames(self, pkg):
if (self._frozen_config.soname_deps_enabled and
pkg.provides is not None):
for atom in pkg.provides:
self._dynamic_config._installed_sonames[
(pkg.root, atom)].append(pkg)
def _add_pkg_soname_deps(self, pkg, allow_unsatisfied=False):
if (self._frozen_config.soname_deps_enabled and
pkg.requires is not None):
if isinstance(pkg.depth, int):
depth = pkg.depth + 1
else:
depth = pkg.depth
soname_provided = self._frozen_config.roots[
pkg.root].settings.soname_provided
for atom in pkg.requires:
if atom in soname_provided:
continue
dep = Dependency(atom=atom, blocker=False, depth=depth,
parent=pkg, priority=self._priority(runtime=True),
root=pkg.root)
if not self._add_dep(dep,
allow_unsatisfied=allow_unsatisfied):
return False
return True
def _remove_pkg(self, pkg):
"""
Remove a package and all its then parentless digraph
children from all depgraph datastructures.
"""
debug = "--debug" in self._frozen_config.myopts
if debug:
writemsg_level(
"Removing package: %s\n" % pkg,
level=logging.DEBUG, noiselevel=-1)
try:
children = [child for child in self._dynamic_config.digraph.child_nodes(pkg) \
if child is not pkg]
self._dynamic_config.digraph.remove(pkg)
except KeyError:
children = []
self._dynamic_config._package_tracker.discard_pkg(pkg)
self._dynamic_config._parent_atoms.pop(pkg, None)
self._dynamic_config._set_nodes.discard(pkg)
for child in children:
try:
self._dynamic_config._parent_atoms[child] = set((parent, atom) \
for (parent, atom) in self._dynamic_config._parent_atoms[child] \
if parent is not pkg)
except KeyError:
pass
# Remove slot operator dependencies.
slot_key = (pkg.root, pkg.slot_atom)
if slot_key in self._dynamic_config._slot_operator_deps:
self._dynamic_config._slot_operator_deps[slot_key] = \
[dep for dep in self._dynamic_config._slot_operator_deps[slot_key] \
if dep.child is not pkg]
if not self._dynamic_config._slot_operator_deps[slot_key]:
del self._dynamic_config._slot_operator_deps[slot_key]
# Remove blockers.
self._dynamic_config._blocker_parents.discard(pkg)
self._dynamic_config._irrelevant_blockers.discard(pkg)
self._dynamic_config._unsolvable_blockers.discard(pkg)
if self._dynamic_config._blocked_pkgs is not None:
self._dynamic_config._blocked_pkgs.discard(pkg)
self._dynamic_config._blocked_world_pkgs.pop(pkg, None)
for child in children:
if child in self._dynamic_config.digraph and \
not self._dynamic_config.digraph.parent_nodes(child):
self._remove_pkg(child)
# Clear caches.
self._dynamic_config._filtered_trees[pkg.root]["porttree"].dbapi._clear_cache()
self._dynamic_config._highest_pkg_cache.clear()
self._dynamic_config._highest_pkg_cache_cp_map.clear()
def _check_masks(self, pkg):
slot_key = (pkg.root, pkg.slot_atom)
# Check for upgrades in the same slot that are
# masked due to a LICENSE change in a newer
# version that is not masked for any other reason.
other_pkg = self._frozen_config._highest_license_masked.get(slot_key)
if other_pkg is not None and pkg < other_pkg:
self._dynamic_config._masked_license_updates.add(other_pkg)
def _add_parent_atom(self, pkg, parent_atom):
parent_atoms = self._dynamic_config._parent_atoms.get(pkg)
if parent_atoms is None:
parent_atoms = set()
self._dynamic_config._parent_atoms[pkg] = parent_atoms
parent_atoms.add(parent_atom)
def _add_slot_operator_dep(self, dep):
slot_key = (dep.root, dep.child.slot_atom)
slot_info = self._dynamic_config._slot_operator_deps.get(slot_key)
if slot_info is None:
slot_info = []
self._dynamic_config._slot_operator_deps[slot_key] = slot_info
slot_info.append(dep)
def _add_pkg_deps(self, pkg, allow_unsatisfied=False):
if not self._add_pkg_soname_deps(pkg,
allow_unsatisfied=allow_unsatisfied):
return False
myroot = pkg.root
metadata = pkg._metadata
removal_action = "remove" in self._dynamic_config.myparams
eapi_attrs = _get_eapi_attrs(pkg.eapi)
edepend={}
for k in Package._dep_keys:
edepend[k] = metadata[k]
use_enabled = self._pkg_use_enabled(pkg)
with_test_deps = not removal_action and \
"with_test_deps" in \
self._dynamic_config.myparams and \
pkg.depth == 0 and \
"test" not in use_enabled and \
pkg.iuse.is_valid_flag("test") and \
self._is_argument(pkg)
if with_test_deps:
use_enabled = set(use_enabled)
use_enabled.add("test")
if not pkg.built and \
"--buildpkgonly" in self._frozen_config.myopts and \
"deep" not in self._dynamic_config.myparams:
edepend["RDEPEND"] = ""
edepend["PDEPEND"] = ""
if pkg.onlydeps and \
self._frozen_config.myopts.get("--onlydeps-with-rdeps") == 'n':
edepend["RDEPEND"] = ""
edepend["PDEPEND"] = ""
ignore_build_time_deps = False
if pkg.built and not removal_action:
if self._dynamic_config.myparams.get("bdeps") in ("y", "auto"):
# Pull in build time deps as requested, but marked them as
# "optional" since they are not strictly required. This allows
# more freedom in the merge order calculation for solving
# circular dependencies. Don't convert to PDEPEND since that
# could make --with-bdeps=y less effective if it is used to
# adjust merge order to prevent built_with_use() calls from
# failing.
pass
else:
ignore_build_time_deps = True
if removal_action and self._dynamic_config.myparams.get("bdeps", "y") == "n":
# Removal actions never traverse ignored buildtime
# dependencies, so it's safe to discard them early.
edepend["DEPEND"] = ""
edepend["HDEPEND"] = ""
ignore_build_time_deps = True
ignore_depend_deps = ignore_build_time_deps
ignore_hdepend_deps = ignore_build_time_deps
if removal_action:
depend_root = myroot
else:
if eapi_attrs.hdepend:
depend_root = myroot
else:
depend_root = self._frozen_config._running_root.root
root_deps = self._frozen_config.myopts.get("--root-deps")
if root_deps is not None:
if root_deps is True:
depend_root = myroot
elif root_deps == "rdeps":
ignore_depend_deps = True
# If rebuild mode is not enabled, it's safe to discard ignored
# build-time dependencies. If you want these deps to be traversed
# in "complete" mode then you need to specify --with-bdeps=y.
if not self._rebuild.rebuild:
if ignore_depend_deps:
edepend["DEPEND"] = ""
if ignore_hdepend_deps:
edepend["HDEPEND"] = ""
deps = (
(depend_root, edepend["DEPEND"],
self._priority(buildtime=True,
optional=(pkg.built or ignore_depend_deps),
ignored=ignore_depend_deps)),
(self._frozen_config._running_root.root, edepend["HDEPEND"],
self._priority(buildtime=True,
optional=(pkg.built or ignore_hdepend_deps),
ignored=ignore_hdepend_deps)),
(myroot, edepend["RDEPEND"],
self._priority(runtime=True)),
(myroot, edepend["PDEPEND"],
self._priority(runtime_post=True))
)
debug = "--debug" in self._frozen_config.myopts
for dep_root, dep_string, dep_priority in deps:
if not dep_string:
continue
if debug:
writemsg_level("\nParent: %s\n" % (pkg,),
noiselevel=-1, level=logging.DEBUG)
writemsg_level("Depstring: %s\n" % (dep_string,),
noiselevel=-1, level=logging.DEBUG)
writemsg_level("Priority: %s\n" % (dep_priority,),
noiselevel=-1, level=logging.DEBUG)
try:
dep_string = portage.dep.use_reduce(dep_string,
uselist=use_enabled,
is_valid_flag=pkg.iuse.is_valid_flag,
opconvert=True, token_class=Atom,
eapi=pkg.eapi)
except portage.exception.InvalidDependString as e:
if not pkg.installed:
# should have been masked before it was selected
raise
del e
# Try again, but omit the is_valid_flag argument, since
# invalid USE conditionals are a common problem and it's
# practical to ignore this issue for installed packages.
try:
dep_string = portage.dep.use_reduce(dep_string,
uselist=use_enabled,
opconvert=True, token_class=Atom,
eapi=pkg.eapi)
except portage.exception.InvalidDependString as e:
self._dynamic_config._masked_installed.add(pkg)
del e
continue
try:
dep_string = list(self._queue_disjunctive_deps(
pkg, dep_root, dep_priority, dep_string))
except portage.exception.InvalidDependString as e:
if pkg.installed:
self._dynamic_config._masked_installed.add(pkg)
del e
continue
# should have been masked before it was selected
raise
if not dep_string:
continue
if not self._add_pkg_dep_string(
pkg, dep_root, dep_priority, dep_string,
allow_unsatisfied):
return 0
self._dynamic_config._traversed_pkg_deps.add(pkg)
return 1
def _add_pkg_dep_string(self, pkg, dep_root, dep_priority, dep_string,
allow_unsatisfied):
_autounmask_backup = self._dynamic_config._autounmask
if dep_priority.optional or dep_priority.ignored:
# Temporarily disable autounmask for deps that
# don't necessarily need to be satisfied.
self._dynamic_config._autounmask = False
try:
return self._wrapped_add_pkg_dep_string(
pkg, dep_root, dep_priority, dep_string,
allow_unsatisfied)
finally:
self._dynamic_config._autounmask = _autounmask_backup
def _ignore_dependency(self, atom, pkg, child, dep, mypriority, recurse_satisfied):
"""
In some cases, dep_check will return deps that shouldn't
be processed any further, so they are identified and
discarded here. Try to discard as few as possible since
discarded dependencies reduce the amount of information
available for optimization of merge order.
Don't ignore dependencies if pkg has a slot operator dependency on the child
and the child has changed slot/sub_slot.
"""
if not mypriority.satisfied:
return False
slot_operator_rebuild = False
if atom.slot_operator == '=' and \
(pkg.root, pkg.slot_atom) in self._dynamic_config._slot_operator_replace_installed and \
mypriority.satisfied is not child and \
mypriority.satisfied.installed and \
child and \
not child.installed and \
(child.slot != mypriority.satisfied.slot or child.sub_slot != mypriority.satisfied.sub_slot):
slot_operator_rebuild = True
return not atom.blocker and \
not recurse_satisfied and \
mypriority.satisfied.visible and \
dep.child is not None and \
not dep.child.installed and \
not any(self._dynamic_config._package_tracker.match(
dep.child.root, dep.child.slot_atom, installed=False)) and \
not slot_operator_rebuild
def _wrapped_add_pkg_dep_string(self, pkg, dep_root, dep_priority,
dep_string, allow_unsatisfied):
if isinstance(pkg.depth, int):
depth = pkg.depth + 1
else:
depth = pkg.depth
deep = self._dynamic_config.myparams.get("deep", 0)
recurse_satisfied = deep is True or depth <= deep
debug = "--debug" in self._frozen_config.myopts
strict = pkg.type_name != "installed"
if debug:
writemsg_level("\nParent: %s\n" % (pkg,),
noiselevel=-1, level=logging.DEBUG)
dep_repr = portage.dep.paren_enclose(dep_string,
unevaluated_atom=True, opconvert=True)
writemsg_level("Depstring: %s\n" % (dep_repr,),
noiselevel=-1, level=logging.DEBUG)
writemsg_level("Priority: %s\n" % (dep_priority,),
noiselevel=-1, level=logging.DEBUG)
try:
selected_atoms = self._select_atoms(dep_root,
dep_string, myuse=self._pkg_use_enabled(pkg), parent=pkg,
strict=strict, priority=dep_priority)
except portage.exception.InvalidDependString:
if pkg.installed:
self._dynamic_config._masked_installed.add(pkg)
return 1
# should have been masked before it was selected
raise
if debug:
writemsg_level("Candidates: %s\n" % \
([str(x) for x in selected_atoms[pkg]],),
noiselevel=-1, level=logging.DEBUG)
root_config = self._frozen_config.roots[dep_root]
vardb = root_config.trees["vartree"].dbapi
traversed_virt_pkgs = set()
reinstall_atoms = self._frozen_config.reinstall_atoms
for atom, child in self._minimize_children(
pkg, dep_priority, root_config, selected_atoms[pkg]):
# If this was a specially generated virtual atom
# from dep_check, map it back to the original, in
# order to avoid distortion in places like display
# or conflict resolution code.
is_virt = hasattr(atom, '_orig_atom')
atom = getattr(atom, '_orig_atom', atom)
if atom.blocker and \
(dep_priority.optional or dep_priority.ignored):
# For --with-bdeps, ignore build-time only blockers
# that originate from built packages.
continue
mypriority = dep_priority.copy()
if not atom.blocker:
if atom.slot_operator == "=":
if mypriority.buildtime:
mypriority.buildtime_slot_op = True
if mypriority.runtime:
mypriority.runtime_slot_op = True
inst_pkgs = [inst_pkg for inst_pkg in
reversed(vardb.match_pkgs(atom))
if not reinstall_atoms.findAtomForPackage(inst_pkg,
modified_use=self._pkg_use_enabled(inst_pkg))]
if inst_pkgs:
for inst_pkg in inst_pkgs:
if self._pkg_visibility_check(inst_pkg):
# highest visible
mypriority.satisfied = inst_pkg
break
if not mypriority.satisfied:
# none visible, so use highest
mypriority.satisfied = inst_pkgs[0]
dep = Dependency(atom=atom,
blocker=atom.blocker, child=child, depth=depth, parent=pkg,
priority=mypriority, root=dep_root)
# In some cases, dep_check will return deps that shouldn't
# be processed any further, so they are identified and
# discarded here. Try to discard as few as possible since
# discarded dependencies reduce the amount of information
# available for optimization of merge order.
ignored = False
if self._ignore_dependency(atom, pkg, child, dep, mypriority, recurse_satisfied):
myarg = None
try:
myarg = next(self._iter_atoms_for_pkg(dep.child), None)
except InvalidDependString:
if not dep.child.installed:
raise
if myarg is None:
# Existing child selection may not be valid unless
# it's added to the graph immediately, since "complete"
# mode may select a different child later.
ignored = True
dep.child = None
self._dynamic_config._ignored_deps.append(dep)
if not ignored:
if dep_priority.ignored and \
not self._dynamic_config._traverse_ignored_deps:
if is_virt and dep.child is not None:
traversed_virt_pkgs.add(dep.child)
dep.child = None
self._dynamic_config._ignored_deps.append(dep)
else:
if not self._add_dep(dep,
allow_unsatisfied=allow_unsatisfied):
return 0
if is_virt and dep.child is not None:
traversed_virt_pkgs.add(dep.child)
selected_atoms.pop(pkg)
# Add selected indirect virtual deps to the graph. This
# takes advantage of circular dependency avoidance that's done
# by dep_zapdeps. We preserve actual parent/child relationships
# here in order to avoid distorting the dependency graph like
# <=portage-2.1.6.x did.
for virt_dep, atoms in selected_atoms.items():
virt_pkg = virt_dep.child
if virt_pkg not in traversed_virt_pkgs:
continue
if debug:
writemsg_level("\nCandidates: %s: %s\n" % \
(virt_pkg.cpv, [str(x) for x in atoms]),
noiselevel=-1, level=logging.DEBUG)
if not dep_priority.ignored or \
self._dynamic_config._traverse_ignored_deps:
inst_pkgs = [inst_pkg for inst_pkg in
reversed(vardb.match_pkgs(virt_dep.atom))
if not reinstall_atoms.findAtomForPackage(inst_pkg,
modified_use=self._pkg_use_enabled(inst_pkg))]
if inst_pkgs:
for inst_pkg in inst_pkgs:
if self._pkg_visibility_check(inst_pkg):
# highest visible
virt_dep.priority.satisfied = inst_pkg
break
if not virt_dep.priority.satisfied:
# none visible, so use highest
virt_dep.priority.satisfied = inst_pkgs[0]
if not self._add_pkg(virt_pkg, virt_dep):
return 0
for atom, child in self._minimize_children(
pkg, self._priority(runtime=True), root_config, atoms):
# If this was a specially generated virtual atom
# from dep_check, map it back to the original, in
# order to avoid distortion in places like display
# or conflict resolution code.
is_virt = hasattr(atom, '_orig_atom')
atom = getattr(atom, '_orig_atom', atom)
# This is a GLEP 37 virtual, so its deps are all runtime.
mypriority = self._priority(runtime=True)
if not atom.blocker:
inst_pkgs = [inst_pkg for inst_pkg in
reversed(vardb.match_pkgs(atom))
if not reinstall_atoms.findAtomForPackage(inst_pkg,
modified_use=self._pkg_use_enabled(inst_pkg))]
if inst_pkgs:
for inst_pkg in inst_pkgs:
if self._pkg_visibility_check(inst_pkg):
# highest visible
mypriority.satisfied = inst_pkg
break
if not mypriority.satisfied:
# none visible, so use highest
mypriority.satisfied = inst_pkgs[0]
# Dependencies of virtuals are considered to have the
# same depth as the virtual itself.
dep = Dependency(atom=atom,
blocker=atom.blocker, child=child, depth=virt_dep.depth,
parent=virt_pkg, priority=mypriority, root=dep_root,
collapsed_parent=pkg, collapsed_priority=dep_priority)
ignored = False
if self._ignore_dependency(atom, pkg, child, dep, mypriority, recurse_satisfied):
myarg = None
try:
myarg = next(self._iter_atoms_for_pkg(dep.child), None)
except InvalidDependString:
if not dep.child.installed:
raise
if myarg is None:
ignored = True
dep.child = None
self._dynamic_config._ignored_deps.append(dep)
if not ignored:
if dep_priority.ignored and \
not self._dynamic_config._traverse_ignored_deps:
if is_virt and dep.child is not None:
traversed_virt_pkgs.add(dep.child)
dep.child = None
self._dynamic_config._ignored_deps.append(dep)
else:
if not self._add_dep(dep,
allow_unsatisfied=allow_unsatisfied):
return 0
if is_virt and dep.child is not None:
traversed_virt_pkgs.add(dep.child)
if debug:
writemsg_level("\nExiting... %s\n" % (pkg,),
noiselevel=-1, level=logging.DEBUG)
return 1
def _minimize_children(self, parent, priority, root_config, atoms):
"""
Selects packages to satisfy the given atoms, and minimizes the
number of selected packages. This serves to identify and eliminate
redundant package selections when multiple atoms happen to specify
a version range.
"""
atom_pkg_map = {}
for atom in atoms:
if atom.blocker:
yield (atom, None)
continue
dep_pkg, existing_node = self._select_package(
root_config.root, atom, parent=parent)
if dep_pkg is None:
yield (atom, None)
continue
atom_pkg_map[atom] = dep_pkg
if len(atom_pkg_map) < 2:
for item in atom_pkg_map.items():
yield item
return
cp_pkg_map = {}
pkg_atom_map = {}
for atom, pkg in atom_pkg_map.items():
pkg_atom_map.setdefault(pkg, set()).add(atom)
cp_pkg_map.setdefault(pkg.cp, set()).add(pkg)
for pkgs in cp_pkg_map.values():
if len(pkgs) < 2:
for pkg in pkgs:
for atom in pkg_atom_map[pkg]:
yield (atom, pkg)
continue
# Use a digraph to identify and eliminate any
# redundant package selections.
atom_pkg_graph = digraph()
cp_atoms = set()
for pkg1 in pkgs:
for atom in pkg_atom_map[pkg1]:
cp_atoms.add(atom)
atom_pkg_graph.add(pkg1, atom)
atom_set = InternalPackageSet(initial_atoms=(atom,),
allow_repo=True)
for pkg2 in pkgs:
if pkg2 is pkg1:
continue
if atom_set.findAtomForPackage(pkg2, modified_use=self._pkg_use_enabled(pkg2)):
atom_pkg_graph.add(pkg2, atom)
for pkg in pkgs:
eliminate_pkg = True
for atom in atom_pkg_graph.parent_nodes(pkg):
if len(atom_pkg_graph.child_nodes(atom)) < 2:
eliminate_pkg = False
break
if eliminate_pkg:
atom_pkg_graph.remove(pkg)
# Yield ~, =*, < and <= atoms first, since those are more likely to
# cause slot conflicts, and we want those atoms to be displayed
# in the resulting slot conflict message (see bug #291142).
# Give similar treatment to slot/sub-slot atoms.
conflict_atoms = []
normal_atoms = []
abi_atoms = []
for atom in cp_atoms:
if atom.slot_operator_built:
abi_atoms.append(atom)
continue
conflict = False
for child_pkg in atom_pkg_graph.child_nodes(atom):
existing_node, matches = \
self._check_slot_conflict(child_pkg, atom)
if existing_node and not matches:
conflict = True
break
if conflict:
conflict_atoms.append(atom)
else:
normal_atoms.append(atom)
for atom in chain(abi_atoms, conflict_atoms, normal_atoms):
child_pkgs = atom_pkg_graph.child_nodes(atom)
# if more than one child, yield highest version
if len(child_pkgs) > 1:
child_pkgs.sort()
yield (atom, child_pkgs[-1])
def _queue_disjunctive_deps(self, pkg, dep_root, dep_priority, dep_struct):
"""
Queue disjunctive (virtual and ||) deps in self._dynamic_config._dep_disjunctive_stack.
Yields non-disjunctive deps. Raises InvalidDependString when
necessary.
"""
for x in dep_struct:
if isinstance(x, list):
if x and x[0] == "||":
self._queue_disjunction(pkg, dep_root, dep_priority, [x])
else:
for y in self._queue_disjunctive_deps(
pkg, dep_root, dep_priority, x):
yield y
else:
# Note: Eventually this will check for PROPERTIES=virtual
# or whatever other metadata gets implemented for this
# purpose.
if x.cp.startswith('virtual/'):
self._queue_disjunction(pkg, dep_root, dep_priority, [x])
else:
yield x
def _queue_disjunction(self, pkg, dep_root, dep_priority, dep_struct):
self._dynamic_config._dep_disjunctive_stack.append(
(pkg, dep_root, dep_priority, dep_struct))
def _pop_disjunction(self, allow_unsatisfied):
"""
Pop one disjunctive dep from self._dynamic_config._dep_disjunctive_stack, and use it to
populate self._dynamic_config._dep_stack.
"""
pkg, dep_root, dep_priority, dep_struct = \
self._dynamic_config._dep_disjunctive_stack.pop()
if not self._add_pkg_dep_string(
pkg, dep_root, dep_priority, dep_struct, allow_unsatisfied):
return 0
return 1
def _priority(self, **kwargs):
if "remove" in self._dynamic_config.myparams:
priority_constructor = UnmergeDepPriority
else:
priority_constructor = DepPriority
return priority_constructor(**kwargs)
def _dep_expand(self, root_config, atom_without_category):
"""
@param root_config: a root config instance
@type root_config: RootConfig
@param atom_without_category: an atom without a category component
@type atom_without_category: String
@rtype: list
@return: a list of atoms containing categories (possibly empty)
"""
null_cp = portage.dep_getkey(insert_category_into_atom(
atom_without_category, "null"))
cat, atom_pn = portage.catsplit(null_cp)
dbs = self._dynamic_config._filtered_trees[root_config.root]["dbs"]
categories = set()
for db, pkg_type, built, installed, db_keys in dbs:
for cat in db.categories:
if db.cp_list("%s/%s" % (cat, atom_pn)):
categories.add(cat)
deps = []
for cat in categories:
deps.append(Atom(insert_category_into_atom(
atom_without_category, cat), allow_repo=True))
return deps
def _have_new_virt(self, root, atom_cp):
ret = False
for db, pkg_type, built, installed, db_keys in \
self._dynamic_config._filtered_trees[root]["dbs"]:
if db.cp_list(atom_cp):
ret = True
break
return ret
def _iter_atoms_for_pkg(self, pkg):
depgraph_sets = self._dynamic_config.sets[pkg.root]
atom_arg_map = depgraph_sets.atom_arg_map
for atom in depgraph_sets.atoms.iterAtomsForPackage(pkg):
if atom.cp != pkg.cp and \
self._have_new_virt(pkg.root, atom.cp):
continue
visible_pkgs = \
self._dynamic_config._visible_pkgs[pkg.root].match_pkgs(atom)
visible_pkgs.reverse() # descending order
higher_slot = None
for visible_pkg in visible_pkgs:
if visible_pkg.cp != atom.cp:
continue
if pkg >= visible_pkg:
# This is descending order, and we're not
# interested in any versions <= pkg given.
break
if pkg.slot_atom != visible_pkg.slot_atom:
higher_slot = visible_pkg
break
if higher_slot is not None:
continue
for arg in atom_arg_map[(atom, pkg.root)]:
if isinstance(arg, PackageArg) and \
arg.package != pkg:
continue
yield arg, atom
def select_files(self, args):
# Use the global event loop for spinner progress
# indication during file owner lookups (bug #461412).
def spinner_cb():
self._frozen_config.spinner.update()
spinner_cb.handle = self._event_loop.call_soon(spinner_cb)
spinner_cb.handle = None
try:
spinner = self._frozen_config.spinner
if spinner is not None and \
spinner.update is not spinner.update_quiet:
spinner_cb.handle = self._event_loop.call_soon(spinner_cb)
return self._select_files(args)
finally:
if spinner_cb.handle is not None:
spinner_cb.handle.cancel()
def _select_files(self, myfiles):
"""Given a list of .tbz2s, .ebuilds sets, and deps, populate
self._dynamic_config._initial_arg_list and call self._resolve to create the
appropriate depgraph and return a favorite list."""
self._load_vdb()
if (self._frozen_config.soname_deps_enabled and
"remove" not in self._dynamic_config.myparams):
self._index_binpkgs()
debug = "--debug" in self._frozen_config.myopts
root_config = self._frozen_config.roots[self._frozen_config.target_root]
sets = root_config.sets
depgraph_sets = self._dynamic_config.sets[root_config.root]
myfavorites=[]
eroot = root_config.root
root = root_config.settings['ROOT']
vardb = self._frozen_config.trees[eroot]["vartree"].dbapi
real_vardb = self._frozen_config._trees_orig[eroot]["vartree"].dbapi
portdb = self._frozen_config.trees[eroot]["porttree"].dbapi
bindb = self._frozen_config.trees[eroot]["bintree"].dbapi
pkgsettings = self._frozen_config.pkgsettings[eroot]
args = []
onlydeps = "--onlydeps" in self._frozen_config.myopts
lookup_owners = []
for x in myfiles:
ext = os.path.splitext(x)[1]
if ext==".tbz2":
if not os.path.exists(x):
if os.path.exists(
os.path.join(pkgsettings["PKGDIR"], "All", x)):
x = os.path.join(pkgsettings["PKGDIR"], "All", x)
elif os.path.exists(
os.path.join(pkgsettings["PKGDIR"], x)):
x = os.path.join(pkgsettings["PKGDIR"], x)
else:
writemsg("\n\n!!! Binary package '"+str(x)+"' does not exist.\n", noiselevel=-1)
writemsg("!!! Please ensure the tbz2 exists as specified.\n\n", noiselevel=-1)
return 0, myfavorites
mytbz2=portage.xpak.tbz2(x)
mykey = None
cat = mytbz2.getfile("CATEGORY")
if cat is not None:
cat = _unicode_decode(cat.strip(),
encoding=_encodings['repo.content'])
mykey = cat + "/" + os.path.basename(x)[:-5]
if mykey is None:
writemsg(colorize("BAD", "\n*** Package is missing CATEGORY metadata: %s.\n\n" % x), noiselevel=-1)
self._dynamic_config._skip_restart = True
return 0, myfavorites
x = os.path.realpath(x)
for pkg in self._iter_match_pkgs(root_config, "binary", Atom('=%s' % mykey)):
if x == os.path.realpath(bindb.bintree.getname(pkg.cpv)):
break
else:
writemsg("\n%s\n\n" % colorize("BAD",
"*** " + _("You need to adjust PKGDIR to emerge "
"this package: %s") % x), noiselevel=-1)
self._dynamic_config._skip_restart = True
return 0, myfavorites
args.append(PackageArg(arg=x, package=pkg,
root_config=root_config))
elif ext==".ebuild":
ebuild_path = portage.util.normalize_path(os.path.abspath(x))
pkgdir = os.path.dirname(ebuild_path)
tree_root = os.path.dirname(os.path.dirname(pkgdir))
cp = pkgdir[len(tree_root)+1:]
error_msg = ("\n\n!!! '%s' is not in a valid portage tree "
"hierarchy or does not exist\n") % x
if not portage.isvalidatom(cp):
writemsg(error_msg, noiselevel=-1)
return 0, myfavorites
cat = portage.catsplit(cp)[0]
mykey = cat + "/" + os.path.basename(ebuild_path[:-7])
if not portage.isvalidatom("="+mykey):
writemsg(error_msg, noiselevel=-1)
return 0, myfavorites
ebuild_path = portdb.findname(mykey)
if ebuild_path:
if ebuild_path != os.path.join(os.path.realpath(tree_root),
cp, os.path.basename(ebuild_path)):
writemsg(colorize("BAD", "\n*** You need to adjust repos.conf to emerge this package.\n\n"), noiselevel=-1)
self._dynamic_config._skip_restart = True
return 0, myfavorites
if mykey not in portdb.xmatch(
"match-visible", portage.cpv_getkey(mykey)):
writemsg(colorize("BAD", "\n*** You are emerging a masked package. It is MUCH better to use\n"), noiselevel=-1)
writemsg(colorize("BAD", "*** /etc/portage/package.* to accomplish this. See portage(5) man\n"), noiselevel=-1)
writemsg(colorize("BAD", "*** page for details.\n"), noiselevel=-1)
countdown(int(self._frozen_config.settings["EMERGE_WARNING_DELAY"]),
"Continuing...")
else:
writemsg(error_msg, noiselevel=-1)
return 0, myfavorites
pkg = self._pkg(mykey, "ebuild", root_config,
onlydeps=onlydeps, myrepo=portdb.getRepositoryName(
os.path.dirname(os.path.dirname(os.path.dirname(ebuild_path)))))
args.append(PackageArg(arg=x, package=pkg,
root_config=root_config))
elif x.startswith(os.path.sep):
if not x.startswith(eroot):
portage.writemsg(("\n\n!!! '%s' does not start with" + \
" $EROOT.\n") % x, noiselevel=-1)
self._dynamic_config._skip_restart = True
return 0, []
# Queue these up since it's most efficient to handle
# multiple files in a single iter_owners() call.
lookup_owners.append(x)
elif x.startswith("." + os.sep) or \
x.startswith(".." + os.sep):
f = os.path.abspath(x)
if not f.startswith(eroot):
portage.writemsg(("\n\n!!! '%s' (resolved from '%s') does not start with" + \
" $EROOT.\n") % (f, x), noiselevel=-1)
self._dynamic_config._skip_restart = True
return 0, []
lookup_owners.append(f)
else:
if x in ("system", "world"):
x = SETPREFIX + x
if x.startswith(SETPREFIX):
s = x[len(SETPREFIX):]
if s not in sets:
raise portage.exception.PackageSetNotFound(s)
if s in depgraph_sets.sets:
continue
try:
set_atoms = root_config.setconfig.getSetAtoms(s)
except portage.exception.PackageSetNotFound as e:
writemsg_level("\n\n", level=logging.ERROR,
noiselevel=-1)
for pset in list(depgraph_sets.sets.values()) + [sets[s]]:
for error_msg in pset.errors:
writemsg_level("%s\n" % (error_msg,),
level=logging.ERROR, noiselevel=-1)
writemsg_level(("emerge: the given set '%s' "
"contains a non-existent set named '%s'.\n") % \
(s, e), level=logging.ERROR, noiselevel=-1)
if s in ('world', 'selected') and \
SETPREFIX + e.value in sets['selected']:
writemsg_level(("Use `emerge --deselect %s%s` to "
"remove this set from world_sets.\n") %
(SETPREFIX, e,), level=logging.ERROR,
noiselevel=-1)
writemsg_level("\n", level=logging.ERROR,
noiselevel=-1)
return False, myfavorites
pset = sets[s]
depgraph_sets.sets[s] = pset
args.append(SetArg(arg=x, pset=pset,
root_config=root_config))
continue
if not is_valid_package_atom(x, allow_repo=True):
portage.writemsg("\n\n!!! '%s' is not a valid package atom.\n" % x,
noiselevel=-1)
portage.writemsg("!!! Please check ebuild(5) for full details.\n")
portage.writemsg("!!! (Did you specify a version but forget to prefix with '='?)\n")
self._dynamic_config._skip_restart = True
return (0,[])
# Don't expand categories or old-style virtuals here unless
# necessary. Expansion of old-style virtuals here causes at
# least the following problems:
# 1) It's more difficult to determine which set(s) an atom
# came from, if any.
# 2) It takes away freedom from the resolver to choose other
# possible expansions when necessary.
if "/" in x.split(":")[0]:
args.append(AtomArg(arg=x, atom=Atom(x, allow_repo=True),
root_config=root_config))
continue
expanded_atoms = self._dep_expand(root_config, x)
installed_cp_set = set()
for atom in expanded_atoms:
if vardb.cp_list(atom.cp):
installed_cp_set.add(atom.cp)
if len(installed_cp_set) > 1:
non_virtual_cps = set()
for atom_cp in installed_cp_set:
if not atom_cp.startswith("virtual/"):
non_virtual_cps.add(atom_cp)
if len(non_virtual_cps) == 1:
installed_cp_set = non_virtual_cps
if len(expanded_atoms) > 1 and len(installed_cp_set) == 1:
installed_cp = next(iter(installed_cp_set))
for atom in expanded_atoms:
if atom.cp == installed_cp:
available = False
for pkg in self._iter_match_pkgs_any(
root_config, atom.without_use,
onlydeps=onlydeps):
if not pkg.installed:
available = True
break
if available:
expanded_atoms = [atom]
break
# If a non-virtual package and one or more virtual packages
# are in expanded_atoms, use the non-virtual package.
if len(expanded_atoms) > 1:
number_of_virtuals = 0
for expanded_atom in expanded_atoms:
if expanded_atom.cp.startswith("virtual/"):
number_of_virtuals += 1
else:
candidate = expanded_atom
if len(expanded_atoms) - number_of_virtuals == 1:
expanded_atoms = [ candidate ]
if len(expanded_atoms) > 1:
writemsg("\n\n", noiselevel=-1)
ambiguous_package_name(x, expanded_atoms, root_config,
self._frozen_config.spinner, self._frozen_config.myopts)
self._dynamic_config._skip_restart = True
return False, myfavorites
if expanded_atoms:
atom = expanded_atoms[0]
else:
null_atom = Atom(insert_category_into_atom(x, "null"),
allow_repo=True)
cat, atom_pn = portage.catsplit(null_atom.cp)
virts_p = root_config.settings.get_virts_p().get(atom_pn)
if virts_p:
# Allow the depgraph to choose which virtual.
atom = Atom(null_atom.replace('null/', 'virtual/', 1),
allow_repo=True)
else:
atom = null_atom
if atom.use and atom.use.conditional:
writemsg(
("\n\n!!! '%s' contains a conditional " + \
"which is not allowed.\n") % (x,), noiselevel=-1)
writemsg("!!! Please check ebuild(5) for full details.\n")
self._dynamic_config._skip_restart = True
return (0,[])
args.append(AtomArg(arg=x, atom=atom,
root_config=root_config))
if lookup_owners:
relative_paths = []
search_for_multiple = False
if len(lookup_owners) > 1:
search_for_multiple = True
for x in lookup_owners:
if not search_for_multiple and os.path.isdir(x):
search_for_multiple = True
relative_paths.append(x[len(root)-1:])
owners = set()
for pkg, relative_path in \
real_vardb._owners.iter_owners(relative_paths):
owners.add(pkg.mycpv)
if not search_for_multiple:
break
if not owners:
portage.writemsg(("\n\n!!! '%s' is not claimed " + \
"by any package.\n") % lookup_owners[0], noiselevel=-1)
self._dynamic_config._skip_restart = True
return 0, []
for cpv in owners:
pkg = vardb._pkg_str(cpv, None)
atom = Atom("%s:%s" % (pkg.cp, pkg.slot))
args.append(AtomArg(arg=atom, atom=atom,
root_config=root_config))
if "--update" in self._frozen_config.myopts:
# In some cases, the greedy slots behavior can pull in a slot that
# the user would want to uninstall due to it being blocked by a
# newer version in a different slot. Therefore, it's necessary to
# detect and discard any that should be uninstalled. Each time
# that arguments are updated, package selections are repeated in
# order to ensure consistency with the current arguments:
#
# 1) Initialize args
# 2) Select packages and generate initial greedy atoms
# 3) Update args with greedy atoms
# 4) Select packages and generate greedy atoms again, while
# accounting for any blockers between selected packages
# 5) Update args with revised greedy atoms
self._set_args(args)
greedy_args = []
for arg in args:
greedy_args.append(arg)
if not isinstance(arg, AtomArg):
continue
for atom in self._greedy_slots(arg.root_config, arg.atom):
greedy_args.append(
AtomArg(arg=arg.arg, atom=atom,
root_config=arg.root_config))
self._set_args(greedy_args)
del greedy_args
# Revise greedy atoms, accounting for any blockers
# between selected packages.
revised_greedy_args = []
for arg in args:
revised_greedy_args.append(arg)
if not isinstance(arg, AtomArg):
continue
for atom in self._greedy_slots(arg.root_config, arg.atom,
blocker_lookahead=True):
revised_greedy_args.append(
AtomArg(arg=arg.arg, atom=atom,
root_config=arg.root_config))
args = revised_greedy_args
del revised_greedy_args
args.extend(self._gen_reinstall_sets())
self._set_args(args)
myfavorites = set(myfavorites)
for arg in args:
if isinstance(arg, (AtomArg, PackageArg)):
myfavorites.add(arg.atom)
elif isinstance(arg, SetArg):
if not arg.internal:
myfavorites.add(arg.arg)
myfavorites = list(myfavorites)
if debug:
portage.writemsg("\n", noiselevel=-1)
# Order needs to be preserved since a feature of --nodeps
# is to allow the user to force a specific merge order.
self._dynamic_config._initial_arg_list = args[:]
return self._resolve(myfavorites)
def _gen_reinstall_sets(self):
atom_list = []
for root, atom in self._rebuild.rebuild_list:
atom_list.append((root, '__auto_rebuild__', atom))
for root, atom in self._rebuild.reinstall_list:
atom_list.append((root, '__auto_reinstall__', atom))
for root, atom in self._dynamic_config._slot_operator_replace_installed:
atom_list.append((root, '__auto_slot_operator_replace_installed__', atom))
set_dict = {}
for root, set_name, atom in atom_list:
set_dict.setdefault((root, set_name), []).append(atom)
for (root, set_name), atoms in set_dict.items():
yield SetArg(arg=(SETPREFIX + set_name),
# Set reset_depth=False here, since we don't want these
# special sets to interact with depth calculations (see
# the emerge --deep=DEPTH option), though we want them
# to behave like normal arguments in most other respects.
pset=InternalPackageSet(initial_atoms=atoms),
force_reinstall=True,
internal=True,
reset_depth=False,
root_config=self._frozen_config.roots[root])
def _resolve(self, myfavorites):
"""Given self._dynamic_config._initial_arg_list, pull in the root nodes,
call self._creategraph to process theier deps and return
a favorite list."""
debug = "--debug" in self._frozen_config.myopts
onlydeps = "--onlydeps" in self._frozen_config.myopts
myroot = self._frozen_config.target_root
pkgsettings = self._frozen_config.pkgsettings[myroot]
pprovideddict = pkgsettings.pprovideddict
virtuals = pkgsettings.getvirtuals()
args = self._dynamic_config._initial_arg_list[:]
for arg in self._expand_set_args(args, add_to_digraph=True):
for atom in arg.pset.getAtoms():
self._spinner_update()
dep = Dependency(atom=atom, onlydeps=onlydeps,
root=myroot, parent=arg)
try:
pprovided = pprovideddict.get(atom.cp)
if pprovided and portage.match_from_list(atom, pprovided):
# A provided package has been specified on the command line.
self._dynamic_config._pprovided_args.append((arg, atom))
continue
if isinstance(arg, PackageArg):
if not self._add_pkg(arg.package, dep) or \
not self._create_graph():
if not self.need_restart():
writemsg(("\n\n!!! Problem " + \
"resolving dependencies for %s\n") % \
arg.arg, noiselevel=-1)
return 0, myfavorites
continue
if debug:
writemsg_level("\n Arg: %s\n Atom: %s\n" %
(arg, atom), noiselevel=-1, level=logging.DEBUG)
pkg, existing_node = self._select_package(
myroot, atom, onlydeps=onlydeps)
if not pkg:
pprovided_match = False
for virt_choice in virtuals.get(atom.cp, []):
expanded_atom = portage.dep.Atom(
atom.replace(atom.cp, virt_choice.cp, 1))
pprovided = pprovideddict.get(expanded_atom.cp)
if pprovided and \
portage.match_from_list(expanded_atom, pprovided):
# A provided package has been
# specified on the command line.
self._dynamic_config._pprovided_args.append((arg, atom))
pprovided_match = True
break
if pprovided_match:
continue
excluded = False
for any_match in self._iter_match_pkgs_any(
self._frozen_config.roots[myroot], atom):
if self._frozen_config.excluded_pkgs.findAtomForPackage(
any_match, modified_use=self._pkg_use_enabled(any_match)):
excluded = True
break
if excluded:
continue
if not (isinstance(arg, SetArg) and \
arg.name in ("selected", "world")):
self._dynamic_config._unsatisfied_deps_for_display.append(
((myroot, atom), {"myparent" : arg}))
return 0, myfavorites
self._dynamic_config._missing_args.append((arg, atom))
continue
if atom.cp != pkg.cp:
# For old-style virtuals, we need to repeat the
# package.provided check against the selected package.
expanded_atom = atom.replace(atom.cp, pkg.cp)
pprovided = pprovideddict.get(pkg.cp)
if pprovided and \
portage.match_from_list(expanded_atom, pprovided):
# A provided package has been
# specified on the command line.
self._dynamic_config._pprovided_args.append((arg, atom))
continue
if pkg.installed and \
"selective" not in self._dynamic_config.myparams and \
not self._frozen_config.excluded_pkgs.findAtomForPackage(
pkg, modified_use=self._pkg_use_enabled(pkg)):
self._dynamic_config._unsatisfied_deps_for_display.append(
((myroot, atom), {"myparent" : arg}))
# Previous behavior was to bail out in this case, but
# since the dep is satisfied by the installed package,
# it's more friendly to continue building the graph
# and just show a warning message. Therefore, only bail
# out here if the atom is not from either the system or
# world set.
if not (isinstance(arg, SetArg) and \
arg.name in ("selected", "system", "world")):
return 0, myfavorites
# Add the selected package to the graph as soon as possible
# so that later dep_check() calls can use it as feedback
# for making more consistent atom selections.
if not self._add_pkg(pkg, dep):
if self.need_restart():
pass
elif isinstance(arg, SetArg):
writemsg(("\n\n!!! Problem resolving " + \
"dependencies for %s from %s\n") % \
(atom, arg.arg), noiselevel=-1)
else:
writemsg(("\n\n!!! Problem resolving " + \
"dependencies for %s\n") % \
(atom,), noiselevel=-1)
return 0, myfavorites
except SystemExit as e:
raise # Needed else can't exit
except Exception as e:
writemsg("\n\n!!! Problem in '%s' dependencies.\n" % atom, noiselevel=-1)
writemsg("!!! %s %s\n" % (str(e), str(getattr(e, "__module__", None))))
raise
# Now that the root packages have been added to the graph,
# process the dependencies.
if not self._create_graph():
self._apply_parent_use_changes()
return 0, myfavorites
try:
self.altlist()
except self._unknown_internal_error:
return False, myfavorites
have_slot_conflict = any(self._dynamic_config._package_tracker.slot_conflicts())
if (have_slot_conflict and
not self._accept_blocker_conflicts()) or \
(self._dynamic_config._allow_backtracking and
"slot conflict" in self._dynamic_config._backtrack_infos):
return False, myfavorites
if self._rebuild.trigger_rebuilds():
backtrack_infos = self._dynamic_config._backtrack_infos
config = backtrack_infos.setdefault("config", {})
config["rebuild_list"] = self._rebuild.rebuild_list
config["reinstall_list"] = self._rebuild.reinstall_list
self._dynamic_config._need_restart = True
return False, myfavorites
if "config" in self._dynamic_config._backtrack_infos and \
("slot_operator_mask_built" in self._dynamic_config._backtrack_infos["config"] or
"slot_operator_replace_installed" in self._dynamic_config._backtrack_infos["config"]) and \
self.need_restart():
return False, myfavorites
if not self._dynamic_config._prune_rebuilds and \
self._dynamic_config._slot_operator_replace_installed and \
self._get_missed_updates():
# When there are missed updates, we might have triggered
# some unnecessary rebuilds (see bug #439688). So, prune
# all the rebuilds and backtrack with the problematic
# updates masked. The next backtrack run should pull in
# any rebuilds that are really needed, and this
# prune_rebuilds path should never be entered more than
# once in a series of backtracking nodes (in order to
# avoid a backtracking loop).
backtrack_infos = self._dynamic_config._backtrack_infos
config = backtrack_infos.setdefault("config", {})
config["prune_rebuilds"] = True
self._dynamic_config._need_restart = True
return False, myfavorites
if self.need_restart():
# want_restart_for_use_change triggers this
return False, myfavorites
if "--fetchonly" not in self._frozen_config.myopts and \
"--buildpkgonly" in self._frozen_config.myopts:
graph_copy = self._dynamic_config.digraph.copy()
removed_nodes = set()
for node in graph_copy:
if not isinstance(node, Package) or \
node.operation == "nomerge":
removed_nodes.add(node)
graph_copy.difference_update(removed_nodes)
if not graph_copy.hasallzeros(ignore_priority = \
DepPrioritySatisfiedRange.ignore_medium):
self._dynamic_config._buildpkgonly_deps_unsatisfied = True
self._dynamic_config._skip_restart = True
return False, myfavorites
if (not self._dynamic_config._prune_rebuilds and
self._ignored_binaries_autounmask_backtrack()):
config = self._dynamic_config._backtrack_infos.setdefault("config", {})
config["prune_rebuilds"] = True
self._dynamic_config._need_restart = True
return False, myfavorites
# Any failures except those due to autounmask *alone* should return
# before this point, since the success_without_autounmask flag that's
# set below is reserved for cases where there are *zero* other
# problems. For reference, see backtrack_depgraph, where it skips the
# get_best_run() call when success_without_autounmask is True.
if self._have_autounmask_changes():
#We failed if the user needs to change the configuration
self._dynamic_config._success_without_autounmask = True
if (self._frozen_config.myopts.get("--autounmask-continue") is True and
"--pretend" not in self._frozen_config.myopts):
# This will return false if it fails or if the user
# aborts via --ask.
if self._display_autounmask(autounmask_continue=True):
self._apply_autounmask_continue_state()
self._dynamic_config._need_config_reload = True
return True, myfavorites
return False, myfavorites
# We're true here unless we are missing binaries.
return (True, myfavorites)
def _apply_autounmask_continue_state(self):
"""
Apply autounmask changes to Package instances, so that their
state will be consistent configuration file changes.
"""
for node in self._dynamic_config._serialized_tasks_cache:
if isinstance(node, Package):
effective_use = self._pkg_use_enabled(node)
if effective_use != node.use.enabled:
node._metadata['USE'] = ' '.join(effective_use)
def _apply_parent_use_changes(self):
"""
For parents with unsatisfied conditional dependencies, translate
USE change suggestions into autounmask changes.
"""
if (self._dynamic_config._unsatisfied_deps_for_display and
self._dynamic_config._autounmask):
remaining_items = []
for item in self._dynamic_config._unsatisfied_deps_for_display:
pargs, kwargs = item
kwargs = kwargs.copy()
kwargs['collect_use_changes'] = True
if not self._show_unsatisfied_dep(*pargs, **kwargs):
remaining_items.append(item)
if len(remaining_items) != len(self._dynamic_config._unsatisfied_deps_for_display):
self._dynamic_config._unsatisfied_deps_for_display = remaining_items
def _set_args(self, args):
"""
Create the "__non_set_args__" package set from atoms and packages given as
arguments. This method can be called multiple times if necessary.
The package selection cache is automatically invalidated, since
arguments influence package selections.
"""
set_atoms = {}
non_set_atoms = {}
for root in self._dynamic_config.sets:
depgraph_sets = self._dynamic_config.sets[root]
depgraph_sets.sets.setdefault('__non_set_args__',
InternalPackageSet(allow_repo=True)).clear()
depgraph_sets.atoms.clear()
depgraph_sets.atom_arg_map.clear()
set_atoms[root] = []
non_set_atoms[root] = []
# We don't add set args to the digraph here since that
# happens at a later stage and we don't want to make
# any state changes here that aren't reversed by a
# another call to this method.
for arg in self._expand_set_args(args, add_to_digraph=False):
atom_arg_map = self._dynamic_config.sets[
arg.root_config.root].atom_arg_map
if isinstance(arg, SetArg):
atom_group = set_atoms[arg.root_config.root]
else:
atom_group = non_set_atoms[arg.root_config.root]
for atom in arg.pset.getAtoms():
atom_group.append(atom)
atom_key = (atom, arg.root_config.root)
refs = atom_arg_map.get(atom_key)
if refs is None:
refs = []
atom_arg_map[atom_key] = refs
if arg not in refs:
refs.append(arg)
for root in self._dynamic_config.sets:
depgraph_sets = self._dynamic_config.sets[root]
depgraph_sets.atoms.update(chain(set_atoms.get(root, []),
non_set_atoms.get(root, [])))
depgraph_sets.sets['__non_set_args__'].update(
non_set_atoms.get(root, []))
# Invalidate the package selection cache, since
# arguments influence package selections.
self._dynamic_config._highest_pkg_cache.clear()
self._dynamic_config._highest_pkg_cache_cp_map.clear()
for trees in self._dynamic_config._filtered_trees.values():
trees["porttree"].dbapi._clear_cache()
def _greedy_slots(self, root_config, atom, blocker_lookahead=False):
"""
Return a list of slot atoms corresponding to installed slots that
differ from the slot of the highest visible match. When
blocker_lookahead is True, slot atoms that would trigger a blocker
conflict are automatically discarded, potentially allowing automatic
uninstallation of older slots when appropriate.
"""
highest_pkg, in_graph = self._select_package(root_config.root, atom)
if highest_pkg is None:
return []
vardb = root_config.trees["vartree"].dbapi
slots = set()
for cpv in vardb.match(atom):
# don't mix new virtuals with old virtuals
pkg = vardb._pkg_str(cpv, None)
if pkg.cp == highest_pkg.cp:
slots.add(pkg.slot)
slots.add(highest_pkg.slot)
if len(slots) == 1:
return []
greedy_pkgs = []
slots.remove(highest_pkg.slot)
while slots:
slot = slots.pop()
slot_atom = portage.dep.Atom("%s:%s" % (highest_pkg.cp, slot))
pkg, in_graph = self._select_package(root_config.root, slot_atom)
if pkg is not None and \
pkg.cp == highest_pkg.cp and pkg < highest_pkg:
greedy_pkgs.append(pkg)
if not greedy_pkgs:
return []
if not blocker_lookahead:
return [pkg.slot_atom for pkg in greedy_pkgs]
blockers = {}
blocker_dep_keys = Package._dep_keys
for pkg in greedy_pkgs + [highest_pkg]:
dep_str = " ".join(pkg._metadata[k] for k in blocker_dep_keys)
try:
selected_atoms = self._select_atoms(
pkg.root, dep_str, self._pkg_use_enabled(pkg),
parent=pkg, strict=True)
except portage.exception.InvalidDependString:
continue
blocker_atoms = []
for atoms in selected_atoms.values():
blocker_atoms.extend(x for x in atoms if x.blocker)
blockers[pkg] = InternalPackageSet(initial_atoms=blocker_atoms)
if highest_pkg not in blockers:
return []
# filter packages with invalid deps
greedy_pkgs = [pkg for pkg in greedy_pkgs if pkg in blockers]
# filter packages that conflict with highest_pkg
greedy_pkgs = [pkg for pkg in greedy_pkgs if not \
(blockers[highest_pkg].findAtomForPackage(pkg, modified_use=self._pkg_use_enabled(pkg)) or \
blockers[pkg].findAtomForPackage(highest_pkg, modified_use=self._pkg_use_enabled(highest_pkg)))]
if not greedy_pkgs:
return []
# If two packages conflict, discard the lower version.
discard_pkgs = set()
greedy_pkgs.sort(reverse=True)
for i in range(len(greedy_pkgs) - 1):
pkg1 = greedy_pkgs[i]
if pkg1 in discard_pkgs:
continue
for j in range(i + 1, len(greedy_pkgs)):
pkg2 = greedy_pkgs[j]
if pkg2 in discard_pkgs:
continue
if blockers[pkg1].findAtomForPackage(pkg2, modified_use=self._pkg_use_enabled(pkg2)) or \
blockers[pkg2].findAtomForPackage(pkg1, modified_use=self._pkg_use_enabled(pkg1)):
# pkg1 > pkg2
discard_pkgs.add(pkg2)
return [pkg.slot_atom for pkg in greedy_pkgs \
if pkg not in discard_pkgs]
def _select_atoms_from_graph(self, *pargs, **kwargs):
"""
Prefer atoms matching packages that have already been
added to the graph or those that are installed and have
not been scheduled for replacement.
"""
kwargs["trees"] = self._dynamic_config._graph_trees
return self._select_atoms_highest_available(*pargs, **kwargs)
def _select_atoms_highest_available(self, root, depstring,
myuse=None, parent=None, strict=True, trees=None, priority=None):
"""This will raise InvalidDependString if necessary. If trees is
None then self._dynamic_config._filtered_trees is used."""
if not isinstance(depstring, list):
eapi = None
is_valid_flag = None
if parent is not None:
eapi = parent.eapi
if not parent.installed:
is_valid_flag = parent.iuse.is_valid_flag
depstring = portage.dep.use_reduce(depstring,
uselist=myuse, opconvert=True, token_class=Atom,
is_valid_flag=is_valid_flag, eapi=eapi)
if (self._dynamic_config.myparams.get(
"ignore_built_slot_operator_deps", "n") == "y" and
parent and parent.built):
ignore_built_slot_operator_deps(depstring)
pkgsettings = self._frozen_config.pkgsettings[root]
if trees is None:
trees = self._dynamic_config._filtered_trees
mytrees = trees[root]
atom_graph = digraph()
if True:
# Temporarily disable autounmask so that || preferences
# account for masking and USE settings.
_autounmask_backup = self._dynamic_config._autounmask
self._dynamic_config._autounmask = False
# backup state for restoration, in case of recursive
# calls to this method
backup_parent = self._select_atoms_parent
backup_state = mytrees.copy()
try:
# clear state from previous call, in case this
# call is recursive (we have a backup, that we
# will use to restore it later)
self._select_atoms_parent = None
mytrees.pop("pkg_use_enabled", None)
mytrees.pop("parent", None)
mytrees.pop("atom_graph", None)
mytrees.pop("priority", None)
mytrees["pkg_use_enabled"] = self._pkg_use_enabled
if parent is not None:
self._select_atoms_parent = parent
mytrees["parent"] = parent
mytrees["atom_graph"] = atom_graph
if priority is not None:
mytrees["priority"] = priority
mycheck = portage.dep_check(depstring, None,
pkgsettings, myuse=myuse,
myroot=root, trees=trees)
finally:
# restore state
self._dynamic_config._autounmask = _autounmask_backup
self._select_atoms_parent = backup_parent
mytrees.pop("pkg_use_enabled", None)
mytrees.pop("parent", None)
mytrees.pop("atom_graph", None)
mytrees.pop("priority", None)
mytrees.update(backup_state)
if not mycheck[0]:
raise portage.exception.InvalidDependString(mycheck[1])
if parent is None:
selected_atoms = mycheck[1]
elif parent not in atom_graph:
selected_atoms = {parent : mycheck[1]}
else:
# Recursively traversed virtual dependencies, and their
# direct dependencies, are considered to have the same
# depth as direct dependencies.
if isinstance(parent.depth, int):
virt_depth = parent.depth + 1
else:
# The depth may be None when called via
# _select_atoms_probe, or it may be
# _UNREACHABLE_DEPTH for complete mode.
virt_depth = parent.depth
chosen_atom_ids = frozenset(chain(
(id(atom) for atom in mycheck[1]),
(id(atom._orig_atom) for atom in mycheck[1]
if hasattr(atom, '_orig_atom')),
))
selected_atoms = OrderedDict()
node_stack = [(parent, None, None)]
traversed_nodes = set()
while node_stack:
node, node_parent, parent_atom = node_stack.pop()
traversed_nodes.add(node)
if node is parent:
k = parent
else:
if node_parent is parent:
if priority is None:
node_priority = None
else:
node_priority = priority.copy()
else:
# virtuals only have runtime deps
node_priority = self._priority(runtime=True)
k = Dependency(atom=parent_atom,
blocker=parent_atom.blocker, child=node,
depth=virt_depth, parent=node_parent,
priority=node_priority, root=node.root)
child_atoms = []
selected_atoms[k] = child_atoms
for atom_node in atom_graph.child_nodes(node):
child_atom = atom_node[0]
if id(child_atom) not in chosen_atom_ids:
continue
child_atoms.append(child_atom)
for child_node in atom_graph.child_nodes(atom_node):
if child_node in traversed_nodes:
continue
if not portage.match_from_list(
child_atom, [child_node]):
# Typically this means that the atom
# specifies USE deps that are unsatisfied
# by the selected package. The caller will
# record this as an unsatisfied dependency
# when necessary.
continue
node_stack.append((child_node, node, child_atom))
return selected_atoms
def _expand_virt_from_graph(self, root, atom):
if not isinstance(atom, Atom):
atom = Atom(atom)
if not atom.cp.startswith("virtual/"):
yield atom
return
any_match = False
for pkg in self._dynamic_config._package_tracker.match(root, atom):
try:
rdepend = self._select_atoms_from_graph(
pkg.root, pkg._metadata.get("RDEPEND", ""),
myuse=self._pkg_use_enabled(pkg),
parent=pkg, strict=False)
except InvalidDependString as e:
writemsg_level("!!! Invalid RDEPEND in " + \
"'%svar/db/pkg/%s/RDEPEND': %s\n" % \
(pkg.root, pkg.cpv, e),
noiselevel=-1, level=logging.ERROR)
continue
for atoms in rdepend.values():
for atom in atoms:
if hasattr(atom, "_orig_atom"):
# Ignore virtual atoms since we're only
# interested in expanding the real atoms.
continue
yield atom
any_match = True
if not any_match:
yield atom
def _virt_deps_visible(self, pkg, ignore_use=False):
"""
Assumes pkg is a virtual package. Traverses virtual deps recursively
and returns True if all deps are visible, False otherwise. This is
useful for checking if it will be necessary to expand virtual slots,
for cases like bug #382557.
"""
try:
rdepend = self._select_atoms(
pkg.root, pkg._metadata.get("RDEPEND", ""),
myuse=self._pkg_use_enabled(pkg),
parent=pkg, priority=self._priority(runtime=True))
except InvalidDependString as e:
if not pkg.installed:
raise
writemsg_level("!!! Invalid RDEPEND in " + \
"'%svar/db/pkg/%s/RDEPEND': %s\n" % \
(pkg.root, pkg.cpv, e),
noiselevel=-1, level=logging.ERROR)
return False
for atoms in rdepend.values():
for atom in atoms:
if ignore_use:
atom = atom.without_use
pkg, existing = self._select_package(
pkg.root, atom)
if pkg is None or not self._pkg_visibility_check(pkg):
return False
return True
def _get_dep_chain(self, start_node, target_atom=None,
unsatisfied_dependency=False):
"""
Returns a list of (atom, node_type) pairs that represent a dep chain.
If target_atom is None, the first package shown is pkg's parent.
If target_atom is not None the first package shown is pkg.
If unsatisfied_dependency is True, the first parent is select who's
dependency is not satisfied by 'pkg'. This is need for USE changes.
(Does not support target_atom.)
"""
traversed_nodes = set()
dep_chain = []
node = start_node
child = None
all_parents = self._dynamic_config._parent_atoms
graph = self._dynamic_config.digraph
def format_pkg(pkg):
pkg_name = "%s%s%s" % (pkg.cpv, _repo_separator, pkg.repo)
return pkg_name
if target_atom is not None and isinstance(node, Package):
affecting_use = set()
for dep_str in Package._dep_keys:
try:
affecting_use.update(extract_affecting_use(
node._metadata[dep_str], target_atom,
eapi=node.eapi))
except InvalidDependString:
if not node.installed:
raise
affecting_use.difference_update(node.use.mask, node.use.force)
pkg_name = format_pkg(node)
if affecting_use:
usedep = []
for flag in affecting_use:
if flag in self._pkg_use_enabled(node):
usedep.append(flag)
else:
usedep.append("-"+flag)
pkg_name += "[%s]" % ",".join(usedep)
dep_chain.append((pkg_name, node.type_name))
# To build a dep chain for the given package we take
# "random" parents form the digraph, except for the
# first package, because we want a parent that forced
# the corresponding change (i.e '>=foo-2', instead 'foo').
traversed_nodes.add(start_node)
start_node_parent_atoms = {}
for ppkg, patom in all_parents.get(node, []):
# Get a list of suitable atoms. For use deps
# (aka unsatisfied_dependency is not None) we
# need that the start_node doesn't match the atom.
if not unsatisfied_dependency or \
not patom.match(start_node):
start_node_parent_atoms.setdefault(patom, []).append(ppkg)
if start_node_parent_atoms:
# If there are parents in all_parents then use one of them.
# If not, then this package got pulled in by an Arg and
# will be correctly handled by the code that handles later
# packages in the dep chain.
if (any(not x.package for x in start_node_parent_atoms) and
any(x.package for x in start_node_parent_atoms)):
for x in list(start_node_parent_atoms):
if not x.package:
del start_node_parent_atoms[x]
if next(iter(start_node_parent_atoms)).package:
best_match = best_match_to_list(node.cpv,
start_node_parent_atoms)
else:
best_match = next(iter(start_node_parent_atoms))
child = node
for ppkg in start_node_parent_atoms[best_match]:
node = ppkg
if ppkg in self._dynamic_config._initial_arg_list:
# Stop if reached the top level of the dep chain.
break
while node is not None:
traversed_nodes.add(node)
if node not in graph:
# The parent is not in the graph due to backtracking.
break
elif isinstance(node, DependencyArg):
if graph.parent_nodes(node):
node_type = "set"
else:
node_type = "argument"
dep_chain.append(("%s" % (node,), node_type))
elif node is not start_node:
for ppkg, patom in all_parents[child]:
if ppkg == node:
if child is start_node and unsatisfied_dependency and \
patom.match(child):
# This atom is satisfied by child, there must be another atom.
continue
atom = (patom.unevaluated_atom
if patom.package else patom)
break
dep_strings = set()
priorities = graph.nodes[node][0].get(child)
if priorities is None:
# This edge comes from _parent_atoms and was not added to
# the graph, and _parent_atoms does not contain priorities.
for k in Package._dep_keys:
dep_strings.add(node._metadata[k])
else:
for priority in priorities:
if priority.buildtime:
for k in Package._buildtime_keys:
dep_strings.add(node._metadata[k])
if priority.runtime:
dep_strings.add(node._metadata["RDEPEND"])
if priority.runtime_post:
dep_strings.add(node._metadata["PDEPEND"])
affecting_use = set()
for dep_str in dep_strings:
try:
affecting_use.update(extract_affecting_use(
dep_str, atom, eapi=node.eapi))
except InvalidDependString:
if not node.installed:
raise
#Don't show flags as 'affecting' if the user can't change them,
affecting_use.difference_update(node.use.mask, \
node.use.force)
pkg_name = format_pkg(node)
if affecting_use:
usedep = []
for flag in affecting_use:
if flag in self._pkg_use_enabled(node):
usedep.append(flag)
else:
usedep.append("-"+flag)
pkg_name += "[%s]" % ",".join(usedep)
dep_chain.append((pkg_name, node.type_name))
# When traversing to parents, prefer arguments over packages
# since arguments are root nodes. Never traverse the same
# package twice, in order to prevent an infinite loop.
child = node
selected_parent = None
parent_arg = None
parent_merge = None
parent_unsatisfied = None
for parent in self._dynamic_config.digraph.parent_nodes(node):
if parent in traversed_nodes:
continue
if isinstance(parent, DependencyArg):
parent_arg = parent
else:
if isinstance(parent, Package) and \
parent.operation == "merge":
parent_merge = parent
if unsatisfied_dependency and node is start_node:
# Make sure that pkg doesn't satisfy parent's dependency.
# This ensures that we select the correct parent for use
# flag changes.
for ppkg, atom in all_parents[start_node]:
if parent is ppkg:
if not atom.match(start_node):
parent_unsatisfied = parent
break
else:
selected_parent = parent
if parent_unsatisfied is not None:
selected_parent = parent_unsatisfied
elif parent_merge is not None:
# Prefer parent in the merge list (bug #354747).
selected_parent = parent_merge
elif parent_arg is not None:
if self._dynamic_config.digraph.parent_nodes(parent_arg):
selected_parent = parent_arg
else:
dep_chain.append(("%s" % (parent_arg,), "argument"))
selected_parent = None
node = selected_parent
return dep_chain
def _get_dep_chain_as_comment(self, pkg, unsatisfied_dependency=False):
dep_chain = self._get_dep_chain(pkg, unsatisfied_dependency=unsatisfied_dependency)
display_list = []
for node, node_type in dep_chain:
if node_type == "argument":
display_list.append("required by %s (argument)" % node)
else:
display_list.append("required by %s" % node)
msg = "# " + "\n# ".join(display_list) + "\n"
return msg
def _show_unsatisfied_dep(self, root, atom, myparent=None, arg=None,
check_backtrack=False, check_autounmask_breakage=False, show_req_use=None,
collect_use_changes=False):
"""
When check_backtrack=True, no output is produced and
the method either returns or raises _backtrack_mask if
a matching package has been masked by backtracking.
"""
backtrack_mask = False
autounmask_broke_use_dep = False
if atom.package:
xinfo = '"%s"' % atom.unevaluated_atom
atom_without_use = atom.without_use
else:
xinfo = '"%s"' % atom
atom_without_use = None
if arg:
xinfo='"%s"' % arg
if isinstance(myparent, AtomArg):
xinfo = '"%s"' % (myparent,)
# Discard null/ from failed cpv_expand category expansion.
xinfo = xinfo.replace("null/", "")
if root != self._frozen_config._running_root.root:
xinfo = "%s for %s" % (xinfo, root)
masked_packages = []
missing_use = []
missing_use_adjustable = set()
required_use_unsatisfied = []
masked_pkg_instances = set()
have_eapi_mask = False
pkgsettings = self._frozen_config.pkgsettings[root]
root_config = self._frozen_config.roots[root]
portdb = self._frozen_config.roots[root].trees["porttree"].dbapi
vardb = self._frozen_config.roots[root].trees["vartree"].dbapi
bindb = self._frozen_config.roots[root].trees["bintree"].dbapi
dbs = self._dynamic_config._filtered_trees[root]["dbs"]
use_ebuild_visibility = self._frozen_config.myopts.get(
'--use-ebuild-visibility', 'n') != 'n'
for db, pkg_type, built, installed, db_keys in dbs:
if installed:
continue
if atom.soname:
if not isinstance(db, DbapiProvidesIndex):
continue
cpv_list = db.match(atom)
elif hasattr(db, "xmatch"):
cpv_list = db.xmatch("match-all-cpv-only", atom.without_use)
else:
cpv_list = db.match(atom.without_use)
if atom.soname:
repo_list = [None]
elif atom.repo is None and hasattr(db, "getRepositories"):
repo_list = db.getRepositories()
else:
repo_list = [atom.repo]
# descending order
cpv_list.reverse()
for cpv in cpv_list:
for repo in repo_list:
if not db.cpv_exists(cpv, myrepo=repo):
continue
metadata, mreasons = get_mask_info(root_config, cpv, pkgsettings, db, pkg_type, \
built, installed, db_keys, myrepo=repo, _pkg_use_enabled=self._pkg_use_enabled)
if metadata is not None and \
portage.eapi_is_supported(metadata["EAPI"]):
if not repo:
repo = metadata.get('repository')
pkg = self._pkg(cpv, pkg_type, root_config,
installed=installed, myrepo=repo)
# pkg._metadata contains calculated USE for ebuilds,
# required later for getMissingLicenses.
metadata = pkg._metadata
if pkg.invalid:
# Avoid doing any operations with packages that
# have invalid metadata. It would be unsafe at
# least because it could trigger unhandled
# exceptions in places like check_required_use().
masked_packages.append(
(root_config, pkgsettings, cpv, repo, metadata, mreasons))
continue
if atom.soname and not atom.match(pkg):
continue
if (atom_without_use is not None and
not atom_without_use.match(pkg)):
continue
if pkg in self._dynamic_config._runtime_pkg_mask:
backtrack_reasons = \
self._dynamic_config._runtime_pkg_mask[pkg]
mreasons.append('backtracking: %s' % \
', '.join(sorted(backtrack_reasons)))
backtrack_mask = True
if not mreasons and self._frozen_config.excluded_pkgs.findAtomForPackage(pkg, \
modified_use=self._pkg_use_enabled(pkg)):
mreasons = ["exclude option"]
if mreasons:
masked_pkg_instances.add(pkg)
if atom.package and atom.unevaluated_atom.use:
try:
if not pkg.iuse.is_valid_flag(atom.unevaluated_atom.use.required) \
or atom.violated_conditionals(self._pkg_use_enabled(pkg), pkg.iuse.is_valid_flag).use:
missing_use.append(pkg)
if atom.match(pkg):
autounmask_broke_use_dep = True
if not mreasons:
continue
except InvalidAtom:
writemsg("violated_conditionals raised " + \
"InvalidAtom: '%s' parent: %s" % \
(atom, myparent), noiselevel=-1)
raise
if not mreasons and \
not pkg.built and \
pkg._metadata.get("REQUIRED_USE") and \
eapi_has_required_use(pkg.eapi):
if not check_required_use(
pkg._metadata["REQUIRED_USE"],
self._pkg_use_enabled(pkg),
pkg.iuse.is_valid_flag,
eapi=pkg.eapi):
required_use_unsatisfied.append(pkg)
continue
root_slot = (pkg.root, pkg.slot_atom)
if pkg.built and root_slot in self._rebuild.rebuild_list:
mreasons = ["need to rebuild from source"]
elif pkg.installed and root_slot in self._rebuild.reinstall_list:
mreasons = ["need to rebuild from source"]
elif (pkg.built and not mreasons and
self._dynamic_config.ignored_binaries.get(
pkg, {}).get("respect_use")):
mreasons = ["use flag configuration mismatch"]
elif (pkg.built and not mreasons and
self._dynamic_config.ignored_binaries.get(
pkg, {}).get("changed_deps")):
mreasons = ["changed deps"]
elif (pkg.built and use_ebuild_visibility and
not self._equiv_ebuild_visible(pkg)):
equiv_ebuild = self._equiv_ebuild(pkg)
if equiv_ebuild is None:
if portdb.cpv_exists(pkg.cpv):
mreasons = ["ebuild corrupt"]
else:
mreasons = ["ebuild not available"]
elif not mreasons:
mreasons = get_masking_status(
equiv_ebuild, pkgsettings, root_config,
use=self._pkg_use_enabled(equiv_ebuild))
if mreasons:
metadata = equiv_ebuild._metadata
masked_packages.append(
(root_config, pkgsettings, cpv, repo, metadata, mreasons))
if check_backtrack:
if backtrack_mask:
raise self._backtrack_mask()
else:
return
if check_autounmask_breakage:
if autounmask_broke_use_dep:
raise self._autounmask_breakage()
else:
return
missing_use_reasons = []
missing_iuse_reasons = []
for pkg in missing_use:
use = self._pkg_use_enabled(pkg)
missing_iuse = []
#Use the unevaluated atom here, because some flags might have gone
#lost during evaluation.
required_flags = atom.unevaluated_atom.use.required
missing_iuse = pkg.iuse.get_missing_iuse(required_flags)
mreasons = []
if missing_iuse:
mreasons.append("Missing IUSE: %s" % " ".join(missing_iuse))
missing_iuse_reasons.append((pkg, mreasons))
else:
need_enable = sorted(atom.use.enabled.difference(use).intersection(pkg.iuse.all))
need_disable = sorted(atom.use.disabled.intersection(use).intersection(pkg.iuse.all))
untouchable_flags = \
frozenset(chain(pkg.use.mask, pkg.use.force))
if any(x in untouchable_flags for x in
chain(need_enable, need_disable)):
continue
missing_use_adjustable.add(pkg)
required_use = pkg._metadata.get("REQUIRED_USE")
required_use_warning = ""
if required_use:
old_use = self._pkg_use_enabled(pkg)
new_use = set(self._pkg_use_enabled(pkg))
for flag in need_enable:
new_use.add(flag)
for flag in need_disable:
new_use.discard(flag)
if check_required_use(required_use, old_use,
pkg.iuse.is_valid_flag, eapi=pkg.eapi) \
and not check_required_use(required_use, new_use,
pkg.iuse.is_valid_flag, eapi=pkg.eapi):
required_use_warning = ", this change violates use flag constraints " + \
"defined by %s: '%s'" % (pkg.cpv, human_readable_required_use(required_use))
if need_enable or need_disable:
changes = []
changes.extend(colorize("red", "+" + x) \
for x in need_enable)
changes.extend(colorize("blue", "-" + x) \
for x in need_disable)
mreasons.append("Change USE: %s" % " ".join(changes) + required_use_warning)
missing_use_reasons.append((pkg, mreasons))
if not missing_iuse and myparent and atom.unevaluated_atom.use.conditional:
# Lets see if the violated use deps are conditional.
# If so, suggest to change them on the parent.
# If the child package is masked then a change to
# parent USE is not a valid solution (a normal mask
# message should be displayed instead).
if pkg in masked_pkg_instances:
continue
mreasons = []
violated_atom = atom.unevaluated_atom.violated_conditionals(self._pkg_use_enabled(pkg), \
pkg.iuse.is_valid_flag, self._pkg_use_enabled(myparent))
if not (violated_atom.use.enabled or violated_atom.use.disabled):
#all violated use deps are conditional
changes = []
conditional = violated_atom.use.conditional
involved_flags = set(chain(conditional.equal, conditional.not_equal, \
conditional.enabled, conditional.disabled))
untouchable_flags = \
frozenset(chain(myparent.use.mask, myparent.use.force))
if any(x in untouchable_flags for x in involved_flags):
continue
required_use = myparent._metadata.get("REQUIRED_USE")
required_use_warning = ""
if required_use:
old_use = self._pkg_use_enabled(myparent)
new_use = set(self._pkg_use_enabled(myparent))
for flag in involved_flags:
if flag in old_use:
new_use.discard(flag)
else:
new_use.add(flag)
if check_required_use(required_use, old_use,
myparent.iuse.is_valid_flag,
eapi=myparent.eapi) and \
not check_required_use(required_use, new_use,
myparent.iuse.is_valid_flag,
eapi=myparent.eapi):
required_use_warning = ", this change violates use flag constraints " + \
"defined by %s: '%s'" % (myparent.cpv, \
human_readable_required_use(required_use))
target_use = {}
for flag in involved_flags:
if flag in self._pkg_use_enabled(myparent):
target_use[flag] = False
changes.append(colorize("blue", "-" + flag))
else:
target_use[flag] = True
changes.append(colorize("red", "+" + flag))
if collect_use_changes and not required_use_warning:
previous_changes = self._dynamic_config._needed_use_config_changes.get(myparent)
self._pkg_use_enabled(myparent, target_use=target_use)
if previous_changes is not self._dynamic_config._needed_use_config_changes.get(myparent):
return True
mreasons.append("Change USE: %s" % " ".join(changes) + required_use_warning)
if (myparent, mreasons) not in missing_use_reasons:
missing_use_reasons.append((myparent, mreasons))
if collect_use_changes:
return False
unmasked_use_reasons = [(pkg, mreasons) for (pkg, mreasons) \
in missing_use_reasons if pkg not in masked_pkg_instances]
unmasked_iuse_reasons = [(pkg, mreasons) for (pkg, mreasons) \
in missing_iuse_reasons if pkg not in masked_pkg_instances]
show_missing_use = False
if unmasked_use_reasons:
# Only show the latest version.
show_missing_use = []
pkg_reason = None
parent_reason = None
for pkg, mreasons in unmasked_use_reasons:
if pkg is myparent:
if parent_reason is None:
#This happens if a use change on the parent
#leads to a satisfied conditional use dep.
parent_reason = (pkg, mreasons)
elif pkg_reason is None:
#Don't rely on the first pkg in unmasked_use_reasons,
#being the highest version of the dependency.
pkg_reason = (pkg, mreasons)
if pkg_reason:
show_missing_use.append(pkg_reason)
if parent_reason:
show_missing_use.append(parent_reason)
elif unmasked_iuse_reasons:
masked_with_iuse = False
for pkg in masked_pkg_instances:
#Use atom.unevaluated here, because some flags might have gone
#lost during evaluation.
if not pkg.iuse.get_missing_iuse(atom.unevaluated_atom.use.required):
# Package(s) with required IUSE are masked,
# so display a normal masking message.
masked_with_iuse = True
break
if not masked_with_iuse:
show_missing_use = unmasked_iuse_reasons
if required_use_unsatisfied:
# If there's a higher unmasked version in missing_use_adjustable
# then we want to show that instead.
for pkg in missing_use_adjustable:
if pkg not in masked_pkg_instances and \
pkg > required_use_unsatisfied[0]:
required_use_unsatisfied = False
break
mask_docs = False
if show_req_use is None and required_use_unsatisfied:
# We have an unmasked package that only requires USE adjustment
# in order to satisfy REQUIRED_USE, and nothing more. We assume
# that the user wants the latest version, so only the first
# instance is displayed.
show_req_use = required_use_unsatisfied[0]
if show_req_use is not None:
pkg = show_req_use
output_cpv = pkg.cpv + _repo_separator + pkg.repo
writemsg("\n!!! " + \
colorize("BAD", "The ebuild selected to satisfy ") + \
colorize("INFORM", xinfo) + \
colorize("BAD", " has unmet requirements.") + "\n",
noiselevel=-1)
use_display = pkg_use_display(pkg, self._frozen_config.myopts)
writemsg("- %s %s\n" % (output_cpv, use_display),
noiselevel=-1)
writemsg("\n The following REQUIRED_USE flag constraints " + \
"are unsatisfied:\n", noiselevel=-1)
reduced_noise = check_required_use(
pkg._metadata["REQUIRED_USE"],
self._pkg_use_enabled(pkg),
pkg.iuse.is_valid_flag,
eapi=pkg.eapi).tounicode()
writemsg(" %s\n" % \
human_readable_required_use(reduced_noise),
noiselevel=-1)
normalized_required_use = \
" ".join(pkg._metadata["REQUIRED_USE"].split())
if reduced_noise != normalized_required_use:
writemsg("\n The above constraints " + \
"are a subset of the following complete expression:\n",
noiselevel=-1)
writemsg(" %s\n" % \
human_readable_required_use(normalized_required_use),
noiselevel=-1)
writemsg("\n", noiselevel=-1)
elif show_missing_use:
writemsg("\nemerge: there are no ebuilds built with USE flags to satisfy "+green(xinfo)+".\n", noiselevel=-1)
writemsg("!!! One of the following packages is required to complete your request:\n", noiselevel=-1)
for pkg, mreasons in show_missing_use:
writemsg("- "+pkg.cpv+_repo_separator+pkg.repo+" ("+", ".join(mreasons)+")\n", noiselevel=-1)
elif masked_packages:
writemsg("\n!!! " + \
colorize("BAD", "All ebuilds that could satisfy ") + \
colorize("INFORM", xinfo) + \
colorize("BAD", " have been masked.") + "\n", noiselevel=-1)
writemsg("!!! One of the following masked packages is required to complete your request:\n", noiselevel=-1)
have_eapi_mask = show_masked_packages(masked_packages)
if have_eapi_mask:
writemsg("\n", noiselevel=-1)
msg = ("The current version of portage supports " + \
"EAPI '%s'. You must upgrade to a newer version" + \
" of portage before EAPI masked packages can" + \
" be installed.") % portage.const.EAPI
writemsg("\n".join(textwrap.wrap(msg, 75)), noiselevel=-1)
writemsg("\n", noiselevel=-1)
mask_docs = True
else:
cp_exists = False
if atom.package and not atom.cp.startswith("null/"):
for pkg in self._iter_match_pkgs_any(
root_config, Atom(atom.cp)):
cp_exists = True
break
writemsg("\nemerge: there are no %s to satisfy " %
("binary packages" if
self._frozen_config.myopts.get("--usepkgonly", "y") == True
else "ebuilds") + green(xinfo) + ".\n", noiselevel=-1)
if isinstance(myparent, AtomArg) and \
not cp_exists and \
self._frozen_config.myopts.get(
"--misspell-suggestions", "y") != "n":
writemsg("\nemerge: searching for similar names..."
, noiselevel=-1)
search_index = self._frozen_config.myopts.get("--search-index", "y") != "n"
# fakedbapi is indexed
dbs = [vardb]
if "--usepkgonly" not in self._frozen_config.myopts:
dbs.append(IndexedPortdb(portdb) if search_index else portdb)
if "--usepkg" in self._frozen_config.myopts:
# bindbapi is indexed
dbs.append(bindb)
matches = similar_name_search(dbs, atom)
if len(matches) == 1:
writemsg("\nemerge: Maybe you meant " + matches[0] + "?\n"
, noiselevel=-1)
elif len(matches) > 1:
writemsg(
"\nemerge: Maybe you meant any of these: %s?\n" % \
(", ".join(matches),), noiselevel=-1)
else:
# Generally, this would only happen if
# all dbapis are empty.
writemsg(" nothing similar found.\n"
, noiselevel=-1)
msg = []
if not isinstance(myparent, AtomArg):
# It's redundant to show parent for AtomArg since
# it's the same as 'xinfo' displayed above.
dep_chain = self._get_dep_chain(myparent, atom)
for node, node_type in dep_chain:
msg.append('(dependency required by "%s" [%s])' % \
(colorize('INFORM', "%s" % (node)), node_type))
if msg:
writemsg("\n".join(msg), noiselevel=-1)
writemsg("\n", noiselevel=-1)
if mask_docs:
show_mask_docs()
writemsg("\n", noiselevel=-1)
def _iter_match_pkgs_any(self, root_config, atom, onlydeps=False):
for db, pkg_type, built, installed, db_keys in \
self._dynamic_config._filtered_trees[root_config.root]["dbs"]:
for pkg in self._iter_match_pkgs(root_config,
pkg_type, atom, onlydeps=onlydeps):
yield pkg
def _iter_match_pkgs(self, root_config, pkg_type, atom,
onlydeps=False):
if atom.package:
return self._iter_match_pkgs_atom(root_config, pkg_type,
atom, onlydeps=onlydeps)
else:
return self._iter_match_pkgs_soname(root_config, pkg_type,
atom, onlydeps=onlydeps)
def _iter_match_pkgs_soname(self, root_config, pkg_type, atom,
onlydeps=False):
db = root_config.trees[self.pkg_tree_map[pkg_type]].dbapi
installed = pkg_type == 'installed'
if isinstance(db, DbapiProvidesIndex):
# descending order
for cpv in reversed(db.match(atom)):
yield self._pkg(cpv, pkg_type, root_config,
installed=installed, onlydeps=onlydeps)
def _iter_match_pkgs_atom(self, root_config, pkg_type, atom,
onlydeps=False):
"""
Iterate over Package instances of pkg_type matching the given atom.
This does not check visibility and it also does not match USE for
unbuilt ebuilds since USE are lazily calculated after visibility
checks (to avoid the expense when possible).
"""
db = root_config.trees[self.pkg_tree_map[pkg_type]].dbapi
atom_exp = dep_expand(atom, mydb=db, settings=root_config.settings)
cp_list = db.cp_list(atom_exp.cp)
matched_something = False
installed = pkg_type == 'installed'
if cp_list:
atom_set = InternalPackageSet(initial_atoms=(atom,),
allow_repo=True)
if atom.repo is None and hasattr(db, "getRepositories"):
repo_list = db.getRepositories()
else:
repo_list = [atom.repo]
# descending order
cp_list.reverse()
for cpv in cp_list:
# Call match_from_list on one cpv at a time, in order
# to avoid unnecessary match_from_list comparisons on
# versions that are never yielded from this method.
if not match_from_list(atom_exp, [cpv]):
continue
for repo in repo_list:
try:
pkg = self._pkg(cpv, pkg_type, root_config,
installed=installed, onlydeps=onlydeps, myrepo=repo)
except portage.exception.PackageNotFound:
pass
else:
# A cpv can be returned from dbapi.match() as an
# old-style virtual match even in cases when the
# package does not actually PROVIDE the virtual.
# Filter out any such false matches here.
# Make sure that cpv from the current repo satisfies the atom.
# This might not be the case if there are several repos with
# the same cpv, but different metadata keys, like SLOT.
# Also, parts of the match that require metadata access
# are deferred until we have cached the metadata in a
# Package instance.
if not atom_set.findAtomForPackage(pkg,
modified_use=self._pkg_use_enabled(pkg)):
continue
matched_something = True
yield pkg
# USE=multislot can make an installed package appear as if
# it doesn't satisfy a slot dependency. Rebuilding the ebuild
# won't do any good as long as USE=multislot is enabled since
# the newly built package still won't have the expected slot.
# Therefore, assume that such SLOT dependencies are already
# satisfied rather than forcing a rebuild.
if not matched_something and installed and \
atom.slot is not None and not atom.slot_operator_built:
if "remove" in self._dynamic_config.myparams:
# We need to search the portdbapi, which is not in our
# normal dbs list, in order to find the real SLOT.
portdb = self._frozen_config.trees[root_config.root]["porttree"].dbapi
db_keys = list(portdb._aux_cache_keys)
dbs = [(portdb, "ebuild", False, False, db_keys)]
else:
dbs = self._dynamic_config._filtered_trees[root_config.root]["dbs"]
cp_list = db.cp_list(atom_exp.cp)
if cp_list:
atom_set = InternalPackageSet(
initial_atoms=(atom.without_slot,), allow_repo=True)
atom_exp_without_slot = atom_exp.without_slot
cp_list.reverse()
for cpv in cp_list:
if not match_from_list(atom_exp_without_slot, [cpv]):
continue
slot_available = False
for other_db, other_type, other_built, \
other_installed, other_keys in dbs:
try:
if portage.dep._match_slot(atom,
other_db._pkg_str(_unicode(cpv), None)):
slot_available = True
break
except (KeyError, InvalidData):
pass
if not slot_available:
continue
inst_pkg = self._pkg(cpv, "installed",
root_config, installed=installed, myrepo=atom.repo)
# Remove the slot from the atom and verify that
# the package matches the resulting atom.
if atom_set.findAtomForPackage(inst_pkg):
yield inst_pkg
return
def _select_pkg_highest_available(self, root, atom, onlydeps=False, parent=None):
if atom.package:
cache_key = (root, atom, atom.unevaluated_atom, onlydeps,
self._dynamic_config._autounmask)
self._dynamic_config._highest_pkg_cache_cp_map.\
setdefault((root, atom.cp), []).append(cache_key)
else:
cache_key = (root, atom, onlydeps,
self._dynamic_config._autounmask)
self._dynamic_config._highest_pkg_cache_cp_map.\
setdefault((root, atom), []).append(cache_key)
ret = self._dynamic_config._highest_pkg_cache.get(cache_key)
if ret is not None:
return ret
ret = self._select_pkg_highest_available_imp(root, atom, onlydeps=onlydeps, parent=parent)
self._dynamic_config._highest_pkg_cache[cache_key] = ret
pkg, existing = ret
if pkg is not None:
if self._pkg_visibility_check(pkg) and \
not (pkg.installed and pkg.masks):
self._dynamic_config._visible_pkgs[pkg.root].cpv_inject(pkg)
return ret
def _is_argument(self, pkg):
for arg, atom in self._iter_atoms_for_pkg(pkg):
if isinstance(arg, (AtomArg, PackageArg)):
return True
return False
def _prune_highest_pkg_cache(self, pkg):
cache = self._dynamic_config._highest_pkg_cache
key_map = self._dynamic_config._highest_pkg_cache_cp_map
for cp in pkg.provided_cps:
for cache_key in key_map.pop((pkg.root, cp), []):
cache.pop(cache_key, None)
if pkg.provides is not None:
for atom in pkg.provides:
for cache_key in key_map.pop((pkg.root, atom), []):
cache.pop(cache_key, None)
def _want_installed_pkg(self, pkg):
"""
Given an installed package returned from select_pkg, return
True if the user has not explicitly requested for this package
to be replaced (typically via an atom on the command line).
"""
if self._frozen_config.excluded_pkgs.findAtomForPackage(pkg,
modified_use=self._pkg_use_enabled(pkg)):
return True
arg = False
try:
for arg, atom in self._iter_atoms_for_pkg(pkg):
if arg.force_reinstall:
return False
except InvalidDependString:
pass
if "selective" in self._dynamic_config.myparams:
return True
return not arg
def _want_update_pkg(self, parent, pkg):
if self._frozen_config.excluded_pkgs.findAtomForPackage(pkg,
modified_use=self._pkg_use_enabled(pkg)):
return False
arg_atoms = None
try:
arg_atoms = list(self._iter_atoms_for_pkg(pkg))
except InvalidDependString:
if not pkg.installed:
# should have been masked before it was selected
raise
depth = parent.depth or 0
if isinstance(depth, int):
depth += 1
if arg_atoms:
for arg, atom in arg_atoms:
if arg.reset_depth:
depth = 0
break
update = "--update" in self._frozen_config.myopts
return (not self._dynamic_config._complete_mode and
(arg_atoms or update) and
not self._too_deep(depth))
def _too_deep(self, depth):
"""
Check if a package depth is deeper than the max allowed depth.
@param depth: the depth of a particular package
@type depth: int or _UNREACHABLE_DEPTH
@rtype: bool
@return: True if the package is deeper than the max allowed depth
"""
deep = self._dynamic_config.myparams.get("deep", 0)
if depth is self._UNREACHABLE_DEPTH:
return True
elif deep is True:
return False
else:
# All non-integer cases are handled above,
# so both values must be int type.
return depth > deep
def _depth_increment(self, depth, n=1):
"""
Return depth + n if depth is an int, otherwise return depth.
@param depth: the depth of a particular package
@type depth: int or _UNREACHABLE_DEPTH
@param n: number to add (default is 1)
@type n: int
@rtype: int or _UNREACHABLE_DEPTH
@return: depth + 1 or _UNREACHABLE_DEPTH
"""
return depth + n if isinstance(depth, int) else depth
def _equiv_ebuild(self, pkg):
try:
return self._pkg(
pkg.cpv, "ebuild", pkg.root_config, myrepo=pkg.repo)
except portage.exception.PackageNotFound:
return next(self._iter_match_pkgs(pkg.root_config,
"ebuild", Atom("=%s" % (pkg.cpv,))), None)
def _equiv_ebuild_visible(self, pkg, autounmask_level=None):
try:
pkg_eb = self._pkg(
pkg.cpv, "ebuild", pkg.root_config, myrepo=pkg.repo)
except portage.exception.PackageNotFound:
pkg_eb_visible = False
for pkg_eb in self._iter_match_pkgs(pkg.root_config,
"ebuild", Atom("=%s" % (pkg.cpv,))):
if self._pkg_visibility_check(pkg_eb, autounmask_level):
pkg_eb_visible = True
break
if not pkg_eb_visible:
return False
else:
if not self._pkg_visibility_check(pkg_eb, autounmask_level):
return False
return True
def _equiv_binary_installed(self, pkg):
build_time = pkg.build_time
if not build_time:
return False
try:
inst_pkg = self._pkg(pkg.cpv, "installed",
pkg.root_config, installed=True)
except PackageNotFound:
return False
return build_time == inst_pkg.build_time
class _AutounmaskLevel(object):
__slots__ = ("allow_use_changes", "allow_unstable_keywords", "allow_license_changes", \
"allow_missing_keywords", "allow_unmasks")
def __init__(self):
self.allow_use_changes = False
self.allow_license_changes = False
self.allow_unstable_keywords = False
self.allow_missing_keywords = False
self.allow_unmasks = False
def _autounmask_levels(self):
"""
Iterate over the different allowed things to unmask.
0. USE
1. USE + license
2. USE + ~arch + license
3. USE + ~arch + license + missing keywords
4. USE + license + masks
5. USE + ~arch + license + masks
6. USE + ~arch + license + missing keywords + masks
Some thoughts:
* Do least invasive changes first.
* Try unmasking alone before unmasking + missing keywords
to avoid -9999 versions if possible
"""
if self._dynamic_config._autounmask is not True:
return
autounmask_keep_masks = self._frozen_config.myopts.get("--autounmask-keep-masks", "n") != "n"
autounmask_level = self._AutounmaskLevel()
autounmask_level.allow_use_changes = True
yield autounmask_level
autounmask_level.allow_license_changes = True
yield autounmask_level
autounmask_level.allow_unstable_keywords = True
yield autounmask_level
if not autounmask_keep_masks:
autounmask_level.allow_missing_keywords = True
yield autounmask_level
# 4. USE + license + masks
# Try to respect keywords while discarding
# package.mask (see bug #463394).
autounmask_level.allow_unstable_keywords = False
autounmask_level.allow_missing_keywords = False
autounmask_level.allow_unmasks = True
yield autounmask_level
autounmask_level.allow_unstable_keywords = True
for missing_keyword, unmask in ((False, True), (True, True)):
autounmask_level.allow_missing_keywords = missing_keyword
autounmask_level.allow_unmasks = unmask
yield autounmask_level
def _select_pkg_highest_available_imp(self, root, atom, onlydeps=False, parent=None):
pkg, existing = self._wrapped_select_pkg_highest_available_imp(
root, atom, onlydeps=onlydeps, parent=parent)
default_selection = (pkg, existing)
if self._dynamic_config._autounmask is True:
if pkg is not None and \
pkg.installed and \
not self._want_installed_pkg(pkg):
pkg = None
# Temporarily reset _need_restart state, in order to
# avoid interference as reported in bug #459832.
earlier_need_restart = self._dynamic_config._need_restart
self._dynamic_config._need_restart = False
try:
for autounmask_level in self._autounmask_levels():
if pkg is not None:
break
pkg, existing = \
self._wrapped_select_pkg_highest_available_imp(
root, atom, onlydeps=onlydeps,
autounmask_level=autounmask_level, parent=parent)
if pkg is not None and \
pkg.installed and \
not self._want_installed_pkg(pkg):
pkg = None
if self._dynamic_config._need_restart:
return None, None
finally:
if earlier_need_restart:
self._dynamic_config._need_restart = True
if pkg is None:
# This ensures that we can fall back to an installed package
# that may have been rejected in the autounmask path above.
return default_selection
return pkg, existing
def _pkg_visibility_check(self, pkg, autounmask_level=None, trust_graph=True):
if pkg.visible:
return True
if trust_graph and pkg in self._dynamic_config.digraph:
# Sometimes we need to temporarily disable
# dynamic_config._autounmask, but for overall
# consistency in dependency resolution, in most
# cases we want to treat packages in the graph
# as though they are visible.
return True
if not self._dynamic_config._autounmask or autounmask_level is None:
return False
pkgsettings = self._frozen_config.pkgsettings[pkg.root]
root_config = self._frozen_config.roots[pkg.root]
mreasons = _get_masking_status(pkg, pkgsettings, root_config, use=self._pkg_use_enabled(pkg))
masked_by_unstable_keywords = False
masked_by_missing_keywords = False
missing_licenses = None
masked_by_something_else = False
masked_by_p_mask = False
for reason in mreasons:
hint = reason.unmask_hint
if hint is None:
masked_by_something_else = True
elif hint.key == "unstable keyword":
masked_by_unstable_keywords = True
if hint.value == "**":
masked_by_missing_keywords = True
elif hint.key == "p_mask":
masked_by_p_mask = True
elif hint.key == "license":
missing_licenses = hint.value
else:
masked_by_something_else = True
if masked_by_something_else:
return False
if pkg in self._dynamic_config._needed_unstable_keywords:
#If the package is already keyworded, remove the mask.
masked_by_unstable_keywords = False
masked_by_missing_keywords = False
if pkg in self._dynamic_config._needed_p_mask_changes:
#If the package is already keyworded, remove the mask.
masked_by_p_mask = False
if missing_licenses:
#If the needed licenses are already unmasked, remove the mask.
missing_licenses.difference_update(self._dynamic_config._needed_license_changes.get(pkg, set()))
if not (masked_by_unstable_keywords or masked_by_p_mask or missing_licenses):
#Package has already been unmasked.
return True
if (masked_by_unstable_keywords and not autounmask_level.allow_unstable_keywords) or \
(masked_by_missing_keywords and not autounmask_level.allow_missing_keywords) or \
(masked_by_p_mask and not autounmask_level.allow_unmasks) or \
(missing_licenses and not autounmask_level.allow_license_changes):
#We are not allowed to do the needed changes.
return False
if masked_by_unstable_keywords:
self._dynamic_config._needed_unstable_keywords.add(pkg)
backtrack_infos = self._dynamic_config._backtrack_infos
backtrack_infos.setdefault("config", {})
backtrack_infos["config"].setdefault("needed_unstable_keywords", set())
backtrack_infos["config"]["needed_unstable_keywords"].add(pkg)
if masked_by_p_mask:
self._dynamic_config._needed_p_mask_changes.add(pkg)
backtrack_infos = self._dynamic_config._backtrack_infos
backtrack_infos.setdefault("config", {})
backtrack_infos["config"].setdefault("needed_p_mask_changes", set())
backtrack_infos["config"]["needed_p_mask_changes"].add(pkg)
if missing_licenses:
self._dynamic_config._needed_license_changes.setdefault(pkg, set()).update(missing_licenses)
backtrack_infos = self._dynamic_config._backtrack_infos
backtrack_infos.setdefault("config", {})
backtrack_infos["config"].setdefault("needed_license_changes", set())
backtrack_infos["config"]["needed_license_changes"].add((pkg, frozenset(missing_licenses)))
return True
def _pkg_use_enabled(self, pkg, target_use=None):
"""
If target_use is None, returns pkg.use.enabled + changes in _needed_use_config_changes.
If target_use is given, the need changes are computed to make the package useable.
Example: target_use = { "foo": True, "bar": False }
The flags target_use must be in the pkg's IUSE.
@rtype: frozenset
@return: set of effectively enabled USE flags, including changes
made by autounmask
"""
if pkg.built:
return pkg.use.enabled
needed_use_config_change = self._dynamic_config._needed_use_config_changes.get(pkg)
if target_use is None:
if needed_use_config_change is None:
return pkg.use.enabled
else:
return needed_use_config_change[0]
if needed_use_config_change is not None:
old_use = needed_use_config_change[0]
new_use = set()
old_changes = needed_use_config_change[1]
new_changes = old_changes.copy()
else:
old_use = pkg.use.enabled
new_use = set()
old_changes = {}
new_changes = {}
for flag, state in target_use.items():
real_flag = pkg.iuse.get_real_flag(flag)
if real_flag is None:
# Triggered by use-dep defaults.
continue
if state:
if real_flag not in old_use:
if new_changes.get(real_flag) == False:
return old_use
new_changes[real_flag] = True
new_use.add(flag)
else:
if real_flag in old_use:
if new_changes.get(real_flag) == True:
return old_use
new_changes[real_flag] = False
new_use.update(old_use.difference(target_use))
def want_restart_for_use_change(pkg, new_use):
if pkg not in self._dynamic_config.digraph.nodes:
return False
for key in Package._dep_keys + ("LICENSE",):
dep = pkg._metadata[key]
old_val = set(portage.dep.use_reduce(dep, pkg.use.enabled, is_valid_flag=pkg.iuse.is_valid_flag, flat=True))
new_val = set(portage.dep.use_reduce(dep, new_use, is_valid_flag=pkg.iuse.is_valid_flag, flat=True))
if old_val != new_val:
return True
parent_atoms = self._dynamic_config._parent_atoms.get(pkg)
if not parent_atoms:
return False
new_use, changes = self._dynamic_config._needed_use_config_changes.get(pkg)
for ppkg, atom in parent_atoms:
if not atom.use or \
not any(x in atom.use.required for x in changes):
continue
else:
return True
return False
# Always return frozenset since the result needs to be
# hashable (see bug #531112).
new_use = frozenset(new_use)
if new_changes != old_changes:
#Don't do the change if it violates REQUIRED_USE.
required_use = pkg._metadata.get("REQUIRED_USE")
if required_use and check_required_use(required_use, old_use,
pkg.iuse.is_valid_flag, eapi=pkg.eapi) and \
not check_required_use(required_use, new_use,
pkg.iuse.is_valid_flag, eapi=pkg.eapi):
return old_use
if any(x in pkg.use.mask for x in new_changes) or \
any(x in pkg.use.force for x in new_changes):
return old_use
self._dynamic_config._needed_use_config_changes[pkg] = (new_use, new_changes)
backtrack_infos = self._dynamic_config._backtrack_infos
backtrack_infos.setdefault("config", {})
backtrack_infos["config"].setdefault("needed_use_config_changes", [])
backtrack_infos["config"]["needed_use_config_changes"].append((pkg, (new_use, new_changes)))
if want_restart_for_use_change(pkg, new_use):
self._dynamic_config._need_restart = True
return new_use
def _wrapped_select_pkg_highest_available_imp(self, root, atom, onlydeps=False, autounmask_level=None, parent=None):
root_config = self._frozen_config.roots[root]
pkgsettings = self._frozen_config.pkgsettings[root]
dbs = self._dynamic_config._filtered_trees[root]["dbs"]
vardb = self._frozen_config.roots[root].trees["vartree"].dbapi
# List of acceptable packages, ordered by type preference.
matched_packages = []
highest_version = None
atom_cp = None
have_new_virt = None
if atom.package:
atom_cp = atom.cp
have_new_virt = (atom_cp.startswith("virtual/") and
self._have_new_virt(root, atom_cp))
existing_node = None
myeb = None
rebuilt_binaries = 'rebuilt_binaries' in self._dynamic_config.myparams
usepkg = "--usepkg" in self._frozen_config.myopts
usepkgonly = "--usepkgonly" in self._frozen_config.myopts
empty = "empty" in self._dynamic_config.myparams
selective = "selective" in self._dynamic_config.myparams
reinstall = False
avoid_update = "--update" not in self._frozen_config.myopts
dont_miss_updates = "--update" in self._frozen_config.myopts
use_ebuild_visibility = self._frozen_config.myopts.get(
'--use-ebuild-visibility', 'n') != 'n'
reinstall_atoms = self._frozen_config.reinstall_atoms
usepkg_exclude = self._frozen_config.usepkg_exclude
useoldpkg_atoms = self._frozen_config.useoldpkg_atoms
matched_oldpkg = []
# Behavior of the "selective" parameter depends on
# whether or not a package matches an argument atom.
# If an installed package provides an old-style
# virtual that is no longer provided by an available
# package, the installed package may match an argument
# atom even though none of the available packages do.
# Therefore, "selective" logic does not consider
# whether or not an installed package matches an
# argument atom. It only considers whether or not
# available packages match argument atoms, which is
# represented by the found_available_arg flag.
found_available_arg = False
packages_with_invalid_use_config = []
for find_existing_node in True, False:
if existing_node:
break
for db, pkg_type, built, installed, db_keys in dbs:
if existing_node:
break
if installed and not find_existing_node:
want_reinstall = reinstall or empty or \
(found_available_arg and not selective)
if want_reinstall and matched_packages:
continue
# For unbuilt ebuilds, ignore USE deps for the initial
# match since we want to ensure that updates aren't
# missed solely due to the user's USE configuration.
for pkg in self._iter_match_pkgs(root_config, pkg_type,
atom.without_use if (atom.package and not built) else atom,
onlydeps=onlydeps):
if have_new_virt is True and pkg.cp != atom_cp:
# pull in a new-style virtual instead
continue
if pkg in self._dynamic_config._runtime_pkg_mask:
# The package has been masked by the backtracking logic
continue
root_slot = (pkg.root, pkg.slot_atom)
if pkg.built and root_slot in self._rebuild.rebuild_list:
continue
if (pkg.installed and
root_slot in self._rebuild.reinstall_list):
continue
if not pkg.installed and \
self._frozen_config.excluded_pkgs.findAtomForPackage(pkg, \
modified_use=self._pkg_use_enabled(pkg)):
continue
if built and not installed and usepkg_exclude.findAtomForPackage(pkg, \
modified_use=self._pkg_use_enabled(pkg)):
break
useoldpkg = useoldpkg_atoms.findAtomForPackage(pkg, \
modified_use=self._pkg_use_enabled(pkg))
if packages_with_invalid_use_config and (not built or not useoldpkg) and \
(not pkg.installed or dont_miss_updates):
# Check if a higher version was rejected due to user
# USE configuration. The packages_with_invalid_use_config
# list only contains unbuilt ebuilds since USE can't
# be changed for built packages.
higher_version_rejected = False
repo_priority = pkg.repo_priority
for rejected in packages_with_invalid_use_config:
if rejected.cp != pkg.cp:
continue
if rejected > pkg:
higher_version_rejected = True
break
if portage.dep.cpvequal(rejected.cpv, pkg.cpv):
# If version is identical then compare
# repo priority (see bug #350254).
rej_repo_priority = rejected.repo_priority
if rej_repo_priority is not None and \
(repo_priority is None or
rej_repo_priority > repo_priority):
higher_version_rejected = True
break
if higher_version_rejected:
continue
cpv = pkg.cpv
reinstall_for_flags = None
if not pkg.installed or \
(matched_packages and not avoid_update):
# Only enforce visibility on installed packages
# if there is at least one other visible package
# available. By filtering installed masked packages
# here, packages that have been masked since they
# were installed can be automatically downgraded
# to an unmasked version. NOTE: This code needs to
# be consistent with masking behavior inside
# _dep_check_composite_db, in order to prevent
# incorrect choices in || deps like bug #351828.
if not self._pkg_visibility_check(pkg, autounmask_level):
continue
# Enable upgrade or downgrade to a version
# with visible KEYWORDS when the installed
# version is masked by KEYWORDS, but never
# reinstall the same exact version only due
# to a KEYWORDS mask. See bug #252167.
identical_binary = False
if pkg.type_name != "ebuild" and matched_packages:
# Don't re-install a binary package that is
# identical to the currently installed package
# (see bug #354441).
if usepkg and pkg.installed:
for selected_pkg in matched_packages:
if selected_pkg.type_name == "binary" and \
selected_pkg.cpv == pkg.cpv and \
selected_pkg.build_time == \
pkg.build_time:
identical_binary = True
break
if (not identical_binary and pkg.built and
(use_ebuild_visibility or matched_packages)):
# If the ebuild no longer exists or it's
# keywords have been dropped, reject built
# instances (installed or binary).
# If --usepkgonly is enabled, assume that
# the ebuild status should be ignored unless
# --use-ebuild-visibility has been specified.
if not use_ebuild_visibility and (usepkgonly or useoldpkg):
if pkg.installed and pkg.masks:
continue
elif not self._equiv_ebuild_visible(pkg,
autounmask_level=autounmask_level):
continue
# Calculation of USE for unbuilt ebuilds is relatively
# expensive, so it is only performed lazily, after the
# above visibility checks are complete.
myarg = None
try:
for myarg, myarg_atom in self._iter_atoms_for_pkg(pkg):
if myarg.force_reinstall:
reinstall = True
break
except InvalidDependString:
if not installed:
# masked by corruption
continue
if not installed and myarg:
found_available_arg = True
if atom.package and atom.unevaluated_atom.use:
#Make sure we don't miss a 'missing IUSE'.
if pkg.iuse.get_missing_iuse(atom.unevaluated_atom.use.required):
# Don't add this to packages_with_invalid_use_config
# since IUSE cannot be adjusted by the user.
continue
if atom.package and atom.use is not None:
if autounmask_level and autounmask_level.allow_use_changes and not pkg.built:
target_use = {}
for flag in atom.use.enabled:
target_use[flag] = True
for flag in atom.use.disabled:
target_use[flag] = False
use = self._pkg_use_enabled(pkg, target_use)
else:
use = self._pkg_use_enabled(pkg)
use_match = True
can_adjust_use = not pkg.built
is_valid_flag = pkg.iuse.is_valid_flag
missing_enabled = frozenset(x for x in
atom.use.missing_enabled if not is_valid_flag(x))
missing_disabled = frozenset(x for x in
atom.use.missing_disabled if not is_valid_flag(x))
if atom.use.enabled:
if any(x in atom.use.enabled for x in missing_disabled):
use_match = False
can_adjust_use = False
need_enabled = atom.use.enabled.difference(use)
if need_enabled:
need_enabled = need_enabled.difference(missing_enabled)
if need_enabled:
use_match = False
if can_adjust_use:
if any(x in pkg.use.mask for x in need_enabled):
can_adjust_use = False
if atom.use.disabled:
if any(x in atom.use.disabled for x in missing_enabled):
use_match = False
can_adjust_use = False
need_disabled = atom.use.disabled.intersection(use)
if need_disabled:
need_disabled = need_disabled.difference(missing_disabled)
if need_disabled:
use_match = False
if can_adjust_use:
if any(x in pkg.use.force and x not in
pkg.use.mask for x in need_disabled):
can_adjust_use = False
if not use_match:
if can_adjust_use:
# Above we must ensure that this package has
# absolutely no use.force, use.mask, or IUSE
# issues that the user typically can't make
# adjustments to solve (see bug #345979).
# FIXME: Conditional USE deps complicate
# issues. This code currently excludes cases
# in which the user can adjust the parent
# package's USE in order to satisfy the dep.
packages_with_invalid_use_config.append(pkg)
continue
if atom_cp is None or pkg.cp == atom_cp:
if highest_version is None:
highest_version = pkg
elif pkg > highest_version:
highest_version = pkg
# At this point, we've found the highest visible
# match from the current repo. Any lower versions
# from this repo are ignored, so this so the loop
# will always end with a break statement below
# this point.
if find_existing_node:
# Use reversed iteration in order to get
# descending order here, so that the highest
# version involved in a slot conflict is
# selected. This is needed for correct operation
# of conflict_downgrade logic in the dep_zapdeps
# function (see bug 554070).
e_pkg = next(reversed(list(
self._dynamic_config._package_tracker.match(
root, pkg.slot_atom, installed=False))), None)
if not e_pkg:
break
# Use PackageSet.findAtomForPackage()
# for PROVIDE support.
if atom.match(e_pkg.with_use(
self._pkg_use_enabled(e_pkg))):
if highest_version and \
(atom_cp is None or
e_pkg.cp == atom_cp) and \
e_pkg < highest_version and \
e_pkg.slot_atom != highest_version.slot_atom:
# There is a higher version available in a
# different slot, so this existing node is
# irrelevant.
pass
else:
matched_packages.append(e_pkg)
existing_node = e_pkg
break
# Compare built package to current config and
# reject the built package if necessary.
reinstall_use = ("--newuse" in self._frozen_config.myopts or \
"--reinstall" in self._frozen_config.myopts)
changed_deps = (
self._dynamic_config.myparams.get(
"changed_deps", "n") != "n")
binpkg_changed_deps = (
self._dynamic_config.myparams.get(
"binpkg_changed_deps", "n") != "n")
respect_use = self._dynamic_config.myparams.get("binpkg_respect_use") in ("y", "auto")
if built and not useoldpkg and \
(not installed or matched_packages) and \
not (installed and
self._frozen_config.excluded_pkgs.findAtomForPackage(pkg,
modified_use=self._pkg_use_enabled(pkg))):
if myeb and "--newrepo" in self._frozen_config.myopts and myeb.repo != pkg.repo:
break
elif reinstall_use or (not installed and respect_use):
iuses = pkg.iuse.all
old_use = self._pkg_use_enabled(pkg)
if myeb:
now_use = self._pkg_use_enabled(myeb)
forced_flags = set(chain(
myeb.use.force, myeb.use.mask))
else:
pkgsettings.setcpv(pkg)
now_use = pkgsettings["PORTAGE_USE"].split()
forced_flags = set(chain(
pkgsettings.useforce, pkgsettings.usemask))
cur_iuse = iuses
if myeb and not usepkgonly and not useoldpkg:
cur_iuse = myeb.iuse.all
reinstall_for_flags = self._reinstall_for_flags(pkg,
forced_flags, old_use, iuses, now_use, cur_iuse)
if reinstall_for_flags:
if not pkg.installed:
self._dynamic_config.\
ignored_binaries.setdefault(
pkg, {}).setdefault(
"respect_use", set()).update(
reinstall_for_flags)
# Continue searching for a binary
# package instance built with the
# desired USE settings.
continue
break
if (((installed and changed_deps) or
(not installed and binpkg_changed_deps)) and
self._changed_deps(pkg)):
if not installed:
self._dynamic_config.\
ignored_binaries.setdefault(
pkg, {})["changed_deps"] = True
# Continue searching for a binary
# package instance built with the
# desired USE settings.
continue
break
# Compare current config to installed package
# and do not reinstall if possible.
if not installed and not useoldpkg and cpv in vardb.match(atom):
inst_pkg = vardb.match_pkgs(
Atom('=' + pkg.cpv))[0]
if "--newrepo" in self._frozen_config.myopts and pkg.repo != inst_pkg.repo:
reinstall = True
elif reinstall_use:
forced_flags = set()
forced_flags.update(pkg.use.force)
forced_flags.update(pkg.use.mask)
old_use = inst_pkg.use.enabled
old_iuse = inst_pkg.iuse.all
cur_use = self._pkg_use_enabled(pkg)
cur_iuse = pkg.iuse.all
reinstall_for_flags = \
self._reinstall_for_flags(pkg,
forced_flags, old_use, old_iuse,
cur_use, cur_iuse)
if reinstall_for_flags:
reinstall = True
if reinstall_atoms.findAtomForPackage(pkg, \
modified_use=self._pkg_use_enabled(pkg)):
reinstall = True
if not built:
myeb = pkg
elif useoldpkg:
matched_oldpkg.append(pkg)
matched_packages.append(pkg)
if reinstall_for_flags:
self._dynamic_config._reinstall_nodes[pkg] = \
reinstall_for_flags
break
if not matched_packages:
return None, None
if "--debug" in self._frozen_config.myopts:
for pkg in matched_packages:
portage.writemsg("%s %s%s%s\n" % \
((pkg.type_name + ":").rjust(10),
pkg.cpv, _repo_separator, pkg.repo), noiselevel=-1)
# Filter out any old-style virtual matches if they are
# mixed with new-style virtual matches.
cp = atom_cp
if len(matched_packages) > 1 and \
cp is not None and \
"virtual" == portage.catsplit(cp)[0]:
for pkg in matched_packages:
if pkg.cp != cp:
continue
# Got a new-style virtual, so filter
# out any old-style virtuals.
matched_packages = [pkg for pkg in matched_packages \
if pkg.cp == cp]
break
if existing_node is not None and \
existing_node in matched_packages:
return existing_node, existing_node
if len(matched_packages) > 1:
if parent is not None and \
(parent.root, parent.slot_atom) in self._dynamic_config._slot_operator_replace_installed:
# We're forcing a rebuild of the parent because we missed some
# update because of a slot operator dep.
if atom.slot_operator == "=" and atom.sub_slot is None:
# This one is a slot operator dep. Exclude the installed packages if a newer non-installed
# pkg exists.
highest_installed = None
for pkg in matched_packages:
if pkg.installed:
if highest_installed is None or pkg.version > highest_installed.version:
highest_installed = pkg
if highest_installed and self._want_update_pkg(parent, highest_installed):
non_installed = [pkg for pkg in matched_packages \
if not pkg.installed and pkg.version > highest_installed.version]
if non_installed:
matched_packages = non_installed
if rebuilt_binaries:
inst_pkg = None
built_pkg = None
unbuilt_pkg = None
for pkg in matched_packages:
if pkg.installed:
inst_pkg = pkg
elif pkg.built:
built_pkg = pkg
else:
if unbuilt_pkg is None or pkg > unbuilt_pkg:
unbuilt_pkg = pkg
if built_pkg is not None and inst_pkg is not None:
# Only reinstall if binary package BUILD_TIME is
# non-empty, in order to avoid cases like to
# bug #306659 where BUILD_TIME fields are missing
# in local and/or remote Packages file.
built_timestamp = built_pkg.build_time
installed_timestamp = inst_pkg.build_time
if unbuilt_pkg is not None and unbuilt_pkg > built_pkg:
pass
elif "--rebuilt-binaries-timestamp" in self._frozen_config.myopts:
minimal_timestamp = self._frozen_config.myopts["--rebuilt-binaries-timestamp"]
if built_timestamp and \
built_timestamp > installed_timestamp and \
built_timestamp >= minimal_timestamp:
return built_pkg, existing_node
else:
#Don't care if the binary has an older BUILD_TIME than the installed
#package. This is for closely tracking a binhost.
#Use --rebuilt-binaries-timestamp 0 if you want only newer binaries
#pulled in here.
if built_timestamp and \
built_timestamp != installed_timestamp:
return built_pkg, existing_node
inst_pkg = None
for pkg in matched_packages:
if pkg.installed:
inst_pkg = pkg
if pkg.installed and pkg.invalid:
matched_packages = [x for x in \
matched_packages if x is not pkg]
if (inst_pkg is not None and parent is not None and
not self._want_update_pkg(parent, inst_pkg)):
return inst_pkg, existing_node
if avoid_update:
for pkg in matched_packages:
if pkg.installed and self._pkg_visibility_check(pkg, autounmask_level):
return pkg, existing_node
visible_matches = []
if matched_oldpkg:
visible_matches = [pkg.cpv for pkg in matched_oldpkg \
if self._pkg_visibility_check(pkg, autounmask_level)]
if not visible_matches:
visible_matches = [pkg.cpv for pkg in matched_packages \
if self._pkg_visibility_check(pkg, autounmask_level)]
if visible_matches:
bestmatch = portage.best(visible_matches)
else:
# all are masked, so ignore visibility
bestmatch = portage.best([pkg.cpv for pkg in matched_packages])
matched_packages = [pkg for pkg in matched_packages \
if portage.dep.cpvequal(pkg.cpv, bestmatch)]
# ordered by type preference ("ebuild" type is the last resort)
return matched_packages[-1], existing_node
def _select_pkg_from_graph(self, root, atom, onlydeps=False, parent=None):
"""
Select packages that have already been added to the graph or
those that are installed and have not been scheduled for
replacement.
"""
graph_db = self._dynamic_config._graph_trees[root]["porttree"].dbapi
matches = graph_db.match_pkgs(atom)
if not matches:
return None, None
# There may be multiple matches, and they may
# conflict with eachother, so choose the highest
# version that has already been added to the graph.
for pkg in reversed(matches):
if pkg in self._dynamic_config.digraph:
return pkg, pkg
# Fall back to installed packages
return self._select_pkg_from_installed(root, atom, onlydeps=onlydeps, parent=parent)
def _select_pkg_from_installed(self, root, atom, onlydeps=False, parent=None):
"""
Select packages that are installed.
"""
matches = list(self._iter_match_pkgs(self._frozen_config.roots[root],
"installed", atom))
if not matches:
return None, None
if len(matches) > 1:
matches.reverse() # ascending order
unmasked = [pkg for pkg in matches if \
self._pkg_visibility_check(pkg)]
if unmasked:
if len(unmasked) == 1:
matches = unmasked
else:
# Account for packages with masks (like KEYWORDS masks)
# that are usually ignored in visibility checks for
# installed packages, in order to handle cases like
# bug #350285.
unmasked = [pkg for pkg in matches if not pkg.masks]
if unmasked:
matches = unmasked
if len(matches) > 1:
# Now account for packages for which existing
# ebuilds are masked or unavailable (bug #445506).
unmasked = [pkg for pkg in matches if
self._equiv_ebuild_visible(pkg)]
if unmasked:
matches = unmasked
pkg = matches[-1] # highest match
in_graph = next(self._dynamic_config._package_tracker.match(
root, pkg.slot_atom, installed=False), None)
return pkg, in_graph
def _complete_graph(self, required_sets=None):
"""
Add any deep dependencies of required sets (args, system, world) that
have not been pulled into the graph yet. This ensures that the graph
is consistent such that initially satisfied deep dependencies are not
broken in the new graph. Initially unsatisfied dependencies are
irrelevant since we only want to avoid breaking dependencies that are
initially satisfied.
Since this method can consume enough time to disturb users, it is
currently only enabled by the --complete-graph option.
@param required_sets: contains required sets (currently only used
for depclean and prune removal operations)
@type required_sets: dict
"""
if "--buildpkgonly" in self._frozen_config.myopts or \
"recurse" not in self._dynamic_config.myparams:
return 1
complete_if_new_use = self._dynamic_config.myparams.get(
"complete_if_new_use", "y") == "y"
complete_if_new_ver = self._dynamic_config.myparams.get(
"complete_if_new_ver", "y") == "y"
rebuild_if_new_slot = self._dynamic_config.myparams.get(
"rebuild_if_new_slot", "y") == "y"
complete_if_new_slot = rebuild_if_new_slot
if "complete" not in self._dynamic_config.myparams and \
(complete_if_new_use or
complete_if_new_ver or complete_if_new_slot):
# Enable complete mode if an installed package will change somehow.
use_change = False
version_change = False
for node in self._dynamic_config.digraph:
if not isinstance(node, Package) or \
node.operation != "merge":
continue
vardb = self._frozen_config.roots[
node.root].trees["vartree"].dbapi
if complete_if_new_use or complete_if_new_ver:
inst_pkg = vardb.match_pkgs(node.slot_atom)
if inst_pkg and inst_pkg[0].cp == node.cp:
inst_pkg = inst_pkg[0]
if complete_if_new_ver:
if inst_pkg < node or node < inst_pkg:
version_change = True
break
elif not (inst_pkg.slot == node.slot and
inst_pkg.sub_slot == node.sub_slot):
# slot/sub-slot change without revbump gets
# similar treatment to a version change
version_change = True
break
# Intersect enabled USE with IUSE, in order to
# ignore forced USE from implicit IUSE flags, since
# they're probably irrelevant and they are sensitive
# to use.mask/force changes in the profile.
if complete_if_new_use and \
(node.iuse.all != inst_pkg.iuse.all or
self._pkg_use_enabled(node).intersection(node.iuse.all) !=
self._pkg_use_enabled(inst_pkg).intersection(inst_pkg.iuse.all)):
use_change = True
break
if complete_if_new_slot:
cp_list = vardb.match_pkgs(Atom(node.cp))
if (cp_list and cp_list[0].cp == node.cp and
not any(node.slot == pkg.slot and
node.sub_slot == pkg.sub_slot for pkg in cp_list)):
version_change = True
break
if use_change or version_change:
self._dynamic_config.myparams["complete"] = True
if "complete" not in self._dynamic_config.myparams:
return 1
self._load_vdb()
# Put the depgraph into a mode that causes it to only
# select packages that have already been added to the
# graph or those that are installed and have not been
# scheduled for replacement. Also, toggle the "deep"
# parameter so that all dependencies are traversed and
# accounted for.
self._dynamic_config._complete_mode = True
self._select_atoms = self._select_atoms_from_graph
if "remove" in self._dynamic_config.myparams:
self._select_package = self._select_pkg_from_installed
else:
self._select_package = self._select_pkg_from_graph
self._dynamic_config._traverse_ignored_deps = True
already_deep = self._dynamic_config.myparams.get("deep") is True
if not already_deep:
self._dynamic_config.myparams["deep"] = True
# Invalidate the package selection cache, since
# _select_package has just changed implementations.
for trees in self._dynamic_config._filtered_trees.values():
trees["porttree"].dbapi._clear_cache()
args = self._dynamic_config._initial_arg_list[:]
for root in self._frozen_config.roots:
if root != self._frozen_config.target_root and \
("remove" in self._dynamic_config.myparams or
self._frozen_config.myopts.get("--root-deps") is not None):
# Only pull in deps for the relevant root.
continue
depgraph_sets = self._dynamic_config.sets[root]
required_set_names = self._frozen_config._required_set_names.copy()
remaining_args = required_set_names.copy()
if required_sets is None or root not in required_sets:
pass
else:
# Removal actions may override sets with temporary
# replacements that have had atoms removed in order
# to implement --deselect behavior.
required_set_names = set(required_sets[root])
depgraph_sets.sets.clear()
depgraph_sets.sets.update(required_sets[root])
if "remove" not in self._dynamic_config.myparams and \
root == self._frozen_config.target_root and \
already_deep:
remaining_args.difference_update(depgraph_sets.sets)
if not remaining_args and \
not self._dynamic_config._ignored_deps and \
not self._dynamic_config._dep_stack:
continue
root_config = self._frozen_config.roots[root]
for s in required_set_names:
pset = depgraph_sets.sets.get(s)
if pset is None:
pset = root_config.sets[s]
atom = SETPREFIX + s
args.append(SetArg(arg=atom, pset=pset,
reset_depth=False, root_config=root_config))
self._set_args(args)
for arg in self._expand_set_args(args, add_to_digraph=True):
for atom in arg.pset.getAtoms():
self._dynamic_config._dep_stack.append(
Dependency(atom=atom, root=arg.root_config.root,
parent=arg, depth=self._UNREACHABLE_DEPTH))
if True:
if self._dynamic_config._ignored_deps:
self._dynamic_config._dep_stack.extend(self._dynamic_config._ignored_deps)
self._dynamic_config._ignored_deps = []
if not self._create_graph(allow_unsatisfied=True):
return 0
# Check the unsatisfied deps to see if any initially satisfied deps
# will become unsatisfied due to an upgrade. Initially unsatisfied
# deps are irrelevant since we only want to avoid breaking deps
# that are initially satisfied.
while self._dynamic_config._unsatisfied_deps:
dep = self._dynamic_config._unsatisfied_deps.pop()
vardb = self._frozen_config.roots[
dep.root].trees["vartree"].dbapi
matches = vardb.match_pkgs(dep.atom)
if not matches:
self._dynamic_config._initially_unsatisfied_deps.append(dep)
continue
# An scheduled installation broke a deep dependency.
# Add the installed package to the graph so that it
# will be appropriately reported as a slot collision
# (possibly solvable via backtracking).
pkg = matches[-1] # highest match
if (self._dynamic_config._allow_backtracking and
not self._want_installed_pkg(pkg) and (dep.atom.soname or (
dep.atom.package and dep.atom.slot_operator_built))):
# If pkg was already scheduled for rebuild by the previous
# calculation, then pulling in the installed instance will
# trigger a slot conflict that may go unsolved. Therefore,
# trigger a rebuild of the parent if appropriate.
dep.child = pkg
new_dep = self._slot_operator_update_probe(dep)
if new_dep is not None:
self._slot_operator_update_backtrack(
dep, new_dep=new_dep)
continue
if not self._add_pkg(pkg, dep):
return 0
if not self._create_graph(allow_unsatisfied=True):
return 0
return 1
def _pkg(self, cpv, type_name, root_config, installed=False,
onlydeps=False, myrepo = None):
"""
Get a package instance from the cache, or create a new
one if necessary. Raises PackageNotFound from aux_get if it
failures for some reason (package does not exist or is
corrupt).
"""
# Ensure that we use the specially optimized RootConfig instance
# that refers to FakeVartree instead of the real vartree.
root_config = self._frozen_config.roots[root_config.root]
pkg = self._frozen_config._pkg_cache.get(
Package._gen_hash_key(cpv=cpv, type_name=type_name,
repo_name=myrepo, root_config=root_config,
installed=installed, onlydeps=onlydeps))
if pkg is None and onlydeps and not installed:
# Maybe it already got pulled in as a "merge" node.
for candidate in self._dynamic_config._package_tracker.match(
root_config.root, Atom("="+cpv)):
if candidate.type_name == type_name and \
candidate.repo_name == myrepo and \
candidate.root_config is root_config and \
candidate.installed == installed and \
not candidate.onlydeps:
pkg = candidate
if pkg is None:
tree_type = self.pkg_tree_map[type_name]
db = root_config.trees[tree_type].dbapi
db_keys = list(self._frozen_config._trees_orig[root_config.root][
tree_type].dbapi._aux_cache_keys)
try:
metadata = zip(db_keys, db.aux_get(cpv, db_keys, myrepo=myrepo))
except KeyError:
raise portage.exception.PackageNotFound(cpv)
pkg = Package(built=(type_name != "ebuild"), cpv=cpv,
installed=installed, metadata=metadata, onlydeps=onlydeps,
root_config=root_config, type_name=type_name)
self._frozen_config._pkg_cache[pkg] = pkg
if not self._pkg_visibility_check(pkg) and \
'LICENSE' in pkg.masks and len(pkg.masks) == 1:
slot_key = (pkg.root, pkg.slot_atom)
other_pkg = self._frozen_config._highest_license_masked.get(slot_key)
if other_pkg is None or pkg > other_pkg:
self._frozen_config._highest_license_masked[slot_key] = pkg
return pkg
def _validate_blockers(self):
"""Remove any blockers from the digraph that do not match any of the
packages within the graph. If necessary, create hard deps to ensure
correct merge order such that mutually blocking packages are never
installed simultaneously. Also add runtime blockers from all installed
packages if any of them haven't been added already (bug 128809).
Normally, this method is called only after the graph is complete, and
after _solve_non_slot_operator_slot_conflicts has had an opportunity
to solve slot conflicts (possibly removing some blockers). It can also
be called earlier, in order to get a preview of the blocker data, but
then it needs to be called again after the graph is complete.
"""
# The _in_blocker_conflict method needs to assert that this method
# has been called before it, by checking that it is not None.
self._dynamic_config._blocked_pkgs = digraph()
if "--buildpkgonly" in self._frozen_config.myopts or \
"--nodeps" in self._frozen_config.myopts:
return True
if True:
# Pull in blockers from all installed packages that haven't already
# been pulled into the depgraph, in order to ensure that they are
# respected (bug 128809). Due to the performance penalty that is
# incurred by all the additional dep_check calls that are required,
# blockers returned from dep_check are cached on disk by the
# BlockerCache class.
# For installed packages, always ignore blockers from DEPEND since
# only runtime dependencies should be relevant for packages that
# are already built.
dep_keys = Package._runtime_keys
for myroot in self._frozen_config.trees:
if self._frozen_config.myopts.get("--root-deps") is not None and \
myroot != self._frozen_config.target_root:
continue
vardb = self._frozen_config.trees[myroot]["vartree"].dbapi
pkgsettings = self._frozen_config.pkgsettings[myroot]
root_config = self._frozen_config.roots[myroot]
final_db = PackageTrackerDbapiWrapper(
myroot, self._dynamic_config._package_tracker)
blocker_cache = BlockerCache(myroot, vardb)
stale_cache = set(blocker_cache)
for pkg in vardb:
cpv = pkg.cpv
stale_cache.discard(cpv)
pkg_in_graph = self._dynamic_config.digraph.contains(pkg)
pkg_deps_added = \
pkg in self._dynamic_config._traversed_pkg_deps
# Check for masked installed packages. Only warn about
# packages that are in the graph in order to avoid warning
# about those that will be automatically uninstalled during
# the merge process or by --depclean. Always warn about
# packages masked by license, since the user likely wants
# to adjust ACCEPT_LICENSE.
if pkg in self._dynamic_config._package_tracker:
if not self._pkg_visibility_check(pkg,
trust_graph=False) and \
(pkg_in_graph or 'LICENSE' in pkg.masks):
self._dynamic_config._masked_installed.add(pkg)
else:
self._check_masks(pkg)
blocker_atoms = None
blockers = None
if pkg_deps_added:
blockers = []
try:
blockers.extend(
self._dynamic_config._blocker_parents.child_nodes(pkg))
except KeyError:
pass
try:
blockers.extend(
self._dynamic_config._irrelevant_blockers.child_nodes(pkg))
except KeyError:
pass
if blockers:
# Select just the runtime blockers.
blockers = [blocker for blocker in blockers \
if blocker.priority.runtime or \
blocker.priority.runtime_post]
if blockers is not None:
blockers = set(blocker.atom for blocker in blockers)
# If this node has any blockers, create a "nomerge"
# node for it so that they can be enforced.
self._spinner_update()
blocker_data = blocker_cache.get(cpv)
if blocker_data is not None and \
blocker_data.counter != pkg.counter:
blocker_data = None
# If blocker data from the graph is available, use
# it to validate the cache and update the cache if
# it seems invalid.
if blocker_data is not None and \
blockers is not None:
if not blockers.symmetric_difference(
blocker_data.atoms):
continue
blocker_data = None
if blocker_data is None and \
blockers is not None:
# Re-use the blockers from the graph.
blocker_atoms = sorted(blockers)
blocker_data = \
blocker_cache.BlockerData(pkg.counter, blocker_atoms)
blocker_cache[pkg.cpv] = blocker_data
continue
if blocker_data:
blocker_atoms = [Atom(atom) for atom in blocker_data.atoms]
else:
# Use aux_get() to trigger FakeVartree global
# updates on *DEPEND when appropriate.
depstr = " ".join(vardb.aux_get(pkg.cpv, dep_keys))
# It is crucial to pass in final_db here in order to
# optimize dep_check calls by eliminating atoms via
# dep_wordreduce and dep_eval calls.
try:
success, atoms = portage.dep_check(depstr,
final_db, pkgsettings, myuse=self._pkg_use_enabled(pkg),
trees=self._dynamic_config._graph_trees, myroot=myroot)
except SystemExit:
raise
except Exception as e:
# This is helpful, for example, if a ValueError
# is thrown from cpv_expand due to multiple
# matches (this can happen if an atom lacks a
# category).
show_invalid_depstring_notice(
pkg, depstr, "%s" % (e,))
del e
raise
if not success:
replacement_pkgs = self._dynamic_config._package_tracker.match(
myroot, pkg.slot_atom)
if any(replacement_pkg.operation == "merge" for
replacement_pkg in replacement_pkgs):
# This package is being replaced anyway, so
# ignore invalid dependencies so as not to
# annoy the user too much (otherwise they'd be
# forced to manually unmerge it first).
continue
show_invalid_depstring_notice(pkg, depstr, atoms)
return False
blocker_atoms = [myatom for myatom in atoms \
if myatom.blocker]
blocker_atoms.sort()
blocker_cache[cpv] = \
blocker_cache.BlockerData(pkg.counter, blocker_atoms)
if blocker_atoms:
try:
for atom in blocker_atoms:
blocker = Blocker(atom=atom,
eapi=pkg.eapi,
priority=self._priority(runtime=True),
root=myroot)
self._dynamic_config._blocker_parents.add(blocker, pkg)
except portage.exception.InvalidAtom as e:
depstr = " ".join(vardb.aux_get(pkg.cpv, dep_keys))
show_invalid_depstring_notice(
pkg, depstr, "Invalid Atom: %s" % (e,))
return False
for cpv in stale_cache:
del blocker_cache[cpv]
blocker_cache.flush()
del blocker_cache
# Discard any "uninstall" tasks scheduled by previous calls
# to this method, since those tasks may not make sense given
# the current graph state.
previous_uninstall_tasks = self._dynamic_config._blocker_uninstalls.leaf_nodes()
if previous_uninstall_tasks:
self._dynamic_config._blocker_uninstalls = digraph()
self._dynamic_config.digraph.difference_update(previous_uninstall_tasks)
# Revert state from previous calls.
self._dynamic_config._blocker_parents.update(
self._dynamic_config._irrelevant_blockers)
self._dynamic_config._irrelevant_blockers.clear()
self._dynamic_config._unsolvable_blockers.clear()
for blocker in self._dynamic_config._blocker_parents.leaf_nodes():
self._spinner_update()
root_config = self._frozen_config.roots[blocker.root]
virtuals = root_config.settings.getvirtuals()
myroot = blocker.root
initial_db = self._frozen_config.trees[myroot]["vartree"].dbapi
provider_virtual = False
if blocker.cp in virtuals and \
not self._have_new_virt(blocker.root, blocker.cp):
provider_virtual = True
# Use this to check PROVIDE for each matched package
# when necessary.
atom_set = InternalPackageSet(
initial_atoms=[blocker.atom])
if provider_virtual:
atoms = []
for provider_entry in virtuals[blocker.cp]:
atoms.append(Atom(blocker.atom.replace(
blocker.cp, provider_entry.cp, 1)))
else:
atoms = [blocker.atom]
blocked_initial = set()
for atom in atoms:
for pkg in initial_db.match_pkgs(atom):
if atom_set.findAtomForPackage(pkg, modified_use=self._pkg_use_enabled(pkg)):
blocked_initial.add(pkg)
blocked_final = set()
for atom in atoms:
for pkg in self._dynamic_config._package_tracker.match(myroot, atom):
if atom_set.findAtomForPackage(pkg, modified_use=self._pkg_use_enabled(pkg)):
blocked_final.add(pkg)
if not blocked_initial and not blocked_final:
parent_pkgs = self._dynamic_config._blocker_parents.parent_nodes(blocker)
self._dynamic_config._blocker_parents.remove(blocker)
# Discard any parents that don't have any more blockers.
for pkg in parent_pkgs:
self._dynamic_config._irrelevant_blockers.add(blocker, pkg)
if not self._dynamic_config._blocker_parents.child_nodes(pkg):
self._dynamic_config._blocker_parents.remove(pkg)
continue
for parent in self._dynamic_config._blocker_parents.parent_nodes(blocker):
unresolved_blocks = False
depends_on_order = set()
for pkg in blocked_initial:
if pkg.slot_atom == parent.slot_atom and \
not blocker.atom.blocker.overlap.forbid:
# New !!atom blockers do not allow temporary
# simulaneous installation, so unlike !atom
# blockers, !!atom blockers aren't ignored
# when they match other packages occupying
# the same slot.
continue
if parent.installed:
# Two currently installed packages conflict with
# eachother. Ignore this case since the damage
# is already done and this would be likely to
# confuse users if displayed like a normal blocker.
continue
self._dynamic_config._blocked_pkgs.add(pkg, blocker)
if parent.operation == "merge":
# Maybe the blocked package can be replaced or simply
# unmerged to resolve this block.
depends_on_order.add((pkg, parent))
continue
# None of the above blocker resolutions techniques apply,
# so apparently this one is unresolvable.
unresolved_blocks = True
for pkg in blocked_final:
if pkg.slot_atom == parent.slot_atom and \
not blocker.atom.blocker.overlap.forbid:
# New !!atom blockers do not allow temporary
# simulaneous installation, so unlike !atom
# blockers, !!atom blockers aren't ignored
# when they match other packages occupying
# the same slot.
continue
if parent.operation == "nomerge" and \
pkg.operation == "nomerge":
# This blocker will be handled the next time that a
# merge of either package is triggered.
continue
self._dynamic_config._blocked_pkgs.add(pkg, blocker)
# Maybe the blocking package can be
# unmerged to resolve this block.
if parent.operation == "merge" and pkg.installed:
depends_on_order.add((pkg, parent))
continue
elif parent.operation == "nomerge":
depends_on_order.add((parent, pkg))
continue
# None of the above blocker resolutions techniques apply,
# so apparently this one is unresolvable.
unresolved_blocks = True
# Make sure we don't unmerge any package that have been pulled
# into the graph.
if not unresolved_blocks and depends_on_order:
for inst_pkg, inst_task in depends_on_order:
if self._dynamic_config.digraph.contains(inst_pkg) and \
self._dynamic_config.digraph.parent_nodes(inst_pkg):
unresolved_blocks = True
break
if not unresolved_blocks and depends_on_order:
for inst_pkg, inst_task in depends_on_order:
uninst_task = Package(built=inst_pkg.built,
cpv=inst_pkg.cpv, installed=inst_pkg.installed,
metadata=inst_pkg._metadata,
operation="uninstall",
root_config=inst_pkg.root_config,
type_name=inst_pkg.type_name)
# Enforce correct merge order with a hard dep.
self._dynamic_config.digraph.addnode(uninst_task, inst_task,
priority=BlockerDepPriority.instance)
# Count references to this blocker so that it can be
# invalidated after nodes referencing it have been
# merged.
self._dynamic_config._blocker_uninstalls.addnode(uninst_task, blocker)
if not unresolved_blocks and not depends_on_order:
self._dynamic_config._irrelevant_blockers.add(blocker, parent)
self._dynamic_config._blocker_parents.remove_edge(blocker, parent)
if not self._dynamic_config._blocker_parents.parent_nodes(blocker):
self._dynamic_config._blocker_parents.remove(blocker)
if not self._dynamic_config._blocker_parents.child_nodes(parent):
self._dynamic_config._blocker_parents.remove(parent)
if unresolved_blocks:
self._dynamic_config._unsolvable_blockers.add(blocker, parent)
return True
def _accept_blocker_conflicts(self):
acceptable = False
for x in ("--buildpkgonly", "--fetchonly",
"--fetch-all-uri", "--nodeps"):
if x in self._frozen_config.myopts:
acceptable = True
break
return acceptable
def _merge_order_bias(self, mygraph):
"""
For optimal leaf node selection, promote deep system runtime deps and
order nodes from highest to lowest overall reference count.
"""
node_info = {}
for node in mygraph.order:
node_info[node] = len(mygraph.parent_nodes(node))
deep_system_deps = _find_deep_system_runtime_deps(mygraph)
def cmp_merge_preference(node1, node2):
if node1.operation == 'uninstall':
if node2.operation == 'uninstall':
return 0
return 1
if node2.operation == 'uninstall':
if node1.operation == 'uninstall':
return 0
return -1
node1_sys = node1 in deep_system_deps
node2_sys = node2 in deep_system_deps
if node1_sys != node2_sys:
if node1_sys:
return -1
return 1
return node_info[node2] - node_info[node1]
mygraph.order.sort(key=cmp_sort_key(cmp_merge_preference))
def altlist(self, reversed=DeprecationWarning):
if reversed is not DeprecationWarning:
warnings.warn("The reversed parameter of "
"_emerge.depgraph.depgraph.altlist() is deprecated",
DeprecationWarning, stacklevel=2)
while self._dynamic_config._serialized_tasks_cache is None:
self._resolve_conflicts()
try:
self._dynamic_config._serialized_tasks_cache, self._dynamic_config._scheduler_graph = \
self._serialize_tasks()
except self._serialize_tasks_retry:
pass
retlist = self._dynamic_config._serialized_tasks_cache
if reversed is not DeprecationWarning and reversed:
# TODO: remove the "reversed" parameter (builtin name collision)
retlist = list(retlist)
retlist.reverse()
retlist = tuple(retlist)
return retlist
def _implicit_libc_deps(self, mergelist, graph):
"""
Create implicit dependencies on libc, in order to ensure that libc
is installed as early as possible (see bug #303567).
"""
libc_pkgs = {}
implicit_libc_roots = (self._frozen_config._running_root.root,)
for root in implicit_libc_roots:
vardb = self._frozen_config.trees[root]["vartree"].dbapi
for atom in self._expand_virt_from_graph(root,
portage.const.LIBC_PACKAGE_ATOM):
if atom.blocker:
continue
for pkg in self._dynamic_config._package_tracker.match(root, atom):
if pkg.operation == "merge" and \
not vardb.cpv_exists(pkg.cpv):
libc_pkgs.setdefault(pkg.root, set()).add(pkg)
if not libc_pkgs:
return
earlier_libc_pkgs = set()
for pkg in mergelist:
if not isinstance(pkg, Package):
# a satisfied blocker
continue
root_libc_pkgs = libc_pkgs.get(pkg.root)
if root_libc_pkgs is not None and \
pkg.operation == "merge":
if pkg in root_libc_pkgs:
earlier_libc_pkgs.add(pkg)
else:
for libc_pkg in root_libc_pkgs:
if libc_pkg in earlier_libc_pkgs:
graph.add(libc_pkg, pkg,
priority=DepPriority(buildtime=True))
def schedulerGraph(self):
"""
The scheduler graph is identical to the normal one except that
uninstall edges are reversed in specific cases that require
conflicting packages to be temporarily installed simultaneously.
This is intended for use by the Scheduler in it's parallelization
logic. It ensures that temporary simultaneous installation of
conflicting packages is avoided when appropriate (especially for
!!atom blockers), but allowed in specific cases that require it.
Note that this method calls break_refs() which alters the state of
internal Package instances such that this depgraph instance should
not be used to perform any more calculations.
"""
# NOTE: altlist initializes self._dynamic_config._scheduler_graph
mergelist = self.altlist()
self._implicit_libc_deps(mergelist,
self._dynamic_config._scheduler_graph)
# Break DepPriority.satisfied attributes which reference
# installed Package instances.
for parents, children, node in \
self._dynamic_config._scheduler_graph.nodes.values():
for priorities in chain(parents.values(), children.values()):
for priority in priorities:
if priority.satisfied:
priority.satisfied = True
pkg_cache = self._frozen_config._pkg_cache
graph = self._dynamic_config._scheduler_graph
trees = self._frozen_config.trees
pruned_pkg_cache = {}
for key, pkg in pkg_cache.items():
if pkg in graph or \
(pkg.installed and pkg in trees[pkg.root]['vartree'].dbapi):
pruned_pkg_cache[key] = pkg
for root in trees:
trees[root]['vartree']._pkg_cache = pruned_pkg_cache
self.break_refs()
sched_config = \
_scheduler_graph_config(trees, pruned_pkg_cache, graph, mergelist)
return sched_config
def break_refs(self):
"""
Break any references in Package instances that lead back to the depgraph.
This is useful if you want to hold references to packages without also
holding the depgraph on the heap. It should only be called after the
depgraph and _frozen_config will not be used for any more calculations.
"""
for root_config in self._frozen_config.roots.values():
root_config.update(self._frozen_config._trees_orig[
root_config.root]["root_config"])
# Both instances are now identical, so discard the
# original which should have no other references.
self._frozen_config._trees_orig[
root_config.root]["root_config"] = root_config
def _resolve_conflicts(self):
if "complete" not in self._dynamic_config.myparams and \
self._dynamic_config._allow_backtracking and \
any(self._dynamic_config._package_tracker.slot_conflicts()) and \
not self._accept_blocker_conflicts():
self._dynamic_config.myparams["complete"] = True
if not self._complete_graph():
raise self._unknown_internal_error()
self._process_slot_conflicts()
def _serialize_tasks(self):
debug = "--debug" in self._frozen_config.myopts
if debug:
writemsg("\ndigraph:\n\n", noiselevel=-1)
self._dynamic_config.digraph.debug_print()
writemsg("\n", noiselevel=-1)
scheduler_graph = self._dynamic_config.digraph.copy()
if '--nodeps' in self._frozen_config.myopts:
# Preserve the package order given on the command line.
return ([node for node in scheduler_graph \
if isinstance(node, Package) \
and node.operation == 'merge'], scheduler_graph)
mygraph=self._dynamic_config.digraph.copy()
removed_nodes = set()
# Prune off all DependencyArg instances since they aren't
# needed, and because of nested sets this is faster than doing
# it with multiple digraph.root_nodes() calls below. This also
# takes care of nested sets that have circular references,
# which wouldn't be matched by digraph.root_nodes().
for node in mygraph:
if isinstance(node, DependencyArg):
removed_nodes.add(node)
if removed_nodes:
mygraph.difference_update(removed_nodes)
removed_nodes.clear()
# Prune "nomerge" root nodes if nothing depends on them, since
# otherwise they slow down merge order calculation. Don't remove
# non-root nodes since they help optimize merge order in some cases
# such as revdep-rebuild.
while True:
for node in mygraph.root_nodes():
if not isinstance(node, Package) or \
node.installed or node.onlydeps:
removed_nodes.add(node)
if removed_nodes:
self._spinner_update()
mygraph.difference_update(removed_nodes)
if not removed_nodes:
break
removed_nodes.clear()
self._merge_order_bias(mygraph)
def cmp_circular_bias(n1, n2):
"""
RDEPEND is stronger than PDEPEND and this function
measures such a strength bias within a circular
dependency relationship.
"""
n1_n2_medium = n2 in mygraph.child_nodes(n1,
ignore_priority=priority_range.ignore_medium_soft)
n2_n1_medium = n1 in mygraph.child_nodes(n2,
ignore_priority=priority_range.ignore_medium_soft)
if n1_n2_medium == n2_n1_medium:
return 0
elif n1_n2_medium:
return 1
return -1
myblocker_uninstalls = self._dynamic_config._blocker_uninstalls.copy()
retlist=[]
# Contains uninstall tasks that have been scheduled to
# occur after overlapping blockers have been installed.
scheduled_uninstalls = set()
# Contains any Uninstall tasks that have been ignored
# in order to avoid the circular deps code path. These
# correspond to blocker conflicts that could not be
# resolved.
ignored_uninstall_tasks = set()
have_uninstall_task = False
complete = "complete" in self._dynamic_config.myparams
asap_nodes = []
def get_nodes(**kwargs):
"""
Returns leaf nodes excluding Uninstall instances
since those should be executed as late as possible.
"""
return [node for node in mygraph.leaf_nodes(**kwargs) \
if isinstance(node, Package) and \
(node.operation != "uninstall" or \
node in scheduled_uninstalls)]
# sys-apps/portage needs special treatment if ROOT="/"
running_root = self._frozen_config._running_root.root
runtime_deps = InternalPackageSet(
initial_atoms=[PORTAGE_PACKAGE_ATOM])
running_portage = self._frozen_config.trees[running_root]["vartree"].dbapi.match_pkgs(
Atom(PORTAGE_PACKAGE_ATOM))
replacement_portage = list(self._dynamic_config._package_tracker.match(
running_root, Atom(PORTAGE_PACKAGE_ATOM)))
if running_portage:
running_portage = running_portage[0]
else:
running_portage = None
if replacement_portage:
replacement_portage = replacement_portage[0]
else:
replacement_portage = None
if replacement_portage == running_portage:
replacement_portage = None
if running_portage is not None:
try:
portage_rdepend = self._select_atoms_highest_available(
running_root, running_portage._metadata["RDEPEND"],
myuse=self._pkg_use_enabled(running_portage),
parent=running_portage, strict=False)
except portage.exception.InvalidDependString as e:
portage.writemsg("!!! Invalid RDEPEND in " + \
"'%svar/db/pkg/%s/RDEPEND': %s\n" % \
(running_root, running_portage.cpv, e), noiselevel=-1)
del e
portage_rdepend = {running_portage : []}
for atoms in portage_rdepend.values():
runtime_deps.update(atom for atom in atoms \
if not atom.blocker)
# Merge libc asap, in order to account for implicit
# dependencies. See bug #303567.
implicit_libc_roots = (running_root,)
for root in implicit_libc_roots:
libc_pkgs = set()
vardb = self._frozen_config.trees[root]["vartree"].dbapi
for atom in self._expand_virt_from_graph(root,
portage.const.LIBC_PACKAGE_ATOM):
if atom.blocker:
continue
for pkg in self._dynamic_config._package_tracker.match(root, atom):
if pkg.operation == "merge" and \
not vardb.cpv_exists(pkg.cpv):
libc_pkgs.add(pkg)
if libc_pkgs:
# If there's also an os-headers upgrade, we need to
# pull that in first. See bug #328317.
for atom in self._expand_virt_from_graph(root,
portage.const.OS_HEADERS_PACKAGE_ATOM):
if atom.blocker:
continue
for pkg in self._dynamic_config._package_tracker.match(root, atom):
if pkg.operation == "merge" and \
not vardb.cpv_exists(pkg.cpv):
asap_nodes.append(pkg)
asap_nodes.extend(libc_pkgs)
def gather_deps(ignore_priority, mergeable_nodes,
selected_nodes, node):
"""
Recursively gather a group of nodes that RDEPEND on
eachother. This ensures that they are merged as a group
and get their RDEPENDs satisfied as soon as possible.
"""
if node in selected_nodes:
return True
if node not in mergeable_nodes:
return False
if node == replacement_portage and \
mygraph.child_nodes(node,
ignore_priority=priority_range.ignore_medium_soft):
# Make sure that portage always has all of it's
# RDEPENDs installed first.
return False
selected_nodes.add(node)
for child in mygraph.child_nodes(node,
ignore_priority=ignore_priority):
if not gather_deps(ignore_priority,
mergeable_nodes, selected_nodes, child):
return False
return True
def ignore_uninst_or_med(priority):
if priority is BlockerDepPriority.instance:
return True
return priority_range.ignore_medium(priority)
def ignore_uninst_or_med_soft(priority):
if priority is BlockerDepPriority.instance:
return True
return priority_range.ignore_medium_soft(priority)
tree_mode = "--tree" in self._frozen_config.myopts
# Tracks whether or not the current iteration should prefer asap_nodes
# if available. This is set to False when the previous iteration
# failed to select any nodes. It is reset whenever nodes are
# successfully selected.
prefer_asap = True
# Controls whether or not the current iteration should drop edges that
# are "satisfied" by installed packages, in order to solve circular
# dependencies. The deep runtime dependencies of installed packages are
# not checked in this case (bug #199856), so it must be avoided
# whenever possible.
drop_satisfied = False
# State of variables for successive iterations that loosen the
# criteria for node selection.
#
# iteration prefer_asap drop_satisfied
# 1 True False
# 2 False False
# 3 False True
#
# If no nodes are selected on the last iteration, it is due to
# unresolved blockers or circular dependencies.
while mygraph:
self._spinner_update()
selected_nodes = None
ignore_priority = None
if drop_satisfied or (prefer_asap and asap_nodes):
priority_range = DepPrioritySatisfiedRange
else:
priority_range = DepPriorityNormalRange
if prefer_asap and asap_nodes:
# ASAP nodes are merged before their soft deps. Go ahead and
# select root nodes here if necessary, since it's typical for
# the parent to have been removed from the graph already.
asap_nodes = [node for node in asap_nodes \
if mygraph.contains(node)]
for i in range(priority_range.SOFT,
priority_range.MEDIUM_SOFT + 1):
ignore_priority = priority_range.ignore_priority[i]
for node in asap_nodes:
if not mygraph.child_nodes(node,
ignore_priority=ignore_priority):
selected_nodes = [node]
asap_nodes.remove(node)
break
if selected_nodes:
break
if not selected_nodes and \
not (prefer_asap and asap_nodes):
for i in range(priority_range.NONE,
priority_range.MEDIUM_SOFT + 1):
ignore_priority = priority_range.ignore_priority[i]
nodes = get_nodes(ignore_priority=ignore_priority)
if nodes:
# If there is a mixture of merges and uninstalls,
# do the uninstalls first.
good_uninstalls = None
if len(nodes) > 1:
good_uninstalls = []
for node in nodes:
if node.operation == "uninstall":
good_uninstalls.append(node)
if good_uninstalls:
nodes = good_uninstalls
else:
nodes = nodes
if good_uninstalls or len(nodes) == 1 or \
(ignore_priority is None and \
not asap_nodes and not tree_mode):
# Greedily pop all of these nodes since no
# relationship has been ignored. This optimization
# destroys --tree output, so it's disabled in tree
# mode.
selected_nodes = nodes
else:
# For optimal merge order:
# * Only pop one node.
# * Removing a root node (node without a parent)
# will not produce a leaf node, so avoid it.
# * It's normal for a selected uninstall to be a
# root node, so don't check them for parents.
if asap_nodes:
prefer_asap_parents = (True, False)
else:
prefer_asap_parents = (False,)
for check_asap_parent in prefer_asap_parents:
if check_asap_parent:
for node in nodes:
parents = mygraph.parent_nodes(node,
ignore_priority=DepPrioritySatisfiedRange.ignore_soft)
if any(x in asap_nodes for x in parents):
selected_nodes = [node]
break
else:
for node in nodes:
if mygraph.parent_nodes(node):
selected_nodes = [node]
break
if selected_nodes:
break
if selected_nodes:
break
if not selected_nodes:
nodes = get_nodes(ignore_priority=priority_range.ignore_medium)
if nodes:
mergeable_nodes = set(nodes)
if prefer_asap and asap_nodes:
nodes = asap_nodes
# When gathering the nodes belonging to a runtime cycle,
# we want to minimize the number of nodes gathered, since
# this tends to produce a more optimal merge order.
# Ignoring all medium_soft deps serves this purpose.
# In the case of multiple runtime cycles, where some cycles
# may depend on smaller independent cycles, it's optimal
# to merge smaller independent cycles before other cycles
# that depend on them. Therefore, we search for the
# smallest cycle in order to try and identify and prefer
# these smaller independent cycles.
ignore_priority = priority_range.ignore_medium_soft
smallest_cycle = None
for node in nodes:
if not mygraph.parent_nodes(node):
continue
selected_nodes = set()
if gather_deps(ignore_priority,
mergeable_nodes, selected_nodes, node):
if smallest_cycle is None or \
len(selected_nodes) < len(smallest_cycle):
smallest_cycle = selected_nodes
selected_nodes = smallest_cycle
if selected_nodes is not None:
cycle_digraph = mygraph.copy()
cycle_digraph.difference_update([x for x in
cycle_digraph if x not in selected_nodes])
leaves = cycle_digraph.leaf_nodes()
if leaves:
# NOTE: This case should only be triggered when
# prefer_asap is True, since otherwise these
# leaves would have been selected to merge
# before this point. Since these "leaves" may
# actually have some low-priority dependencies
# that we have intentionally ignored, select
# only one node here, so that merge order
# accounts for as many dependencies as possible.
selected_nodes = [leaves[0]]
if debug:
writemsg("\nruntime cycle digraph (%s nodes):\n\n" %
(len(selected_nodes),), noiselevel=-1)
cycle_digraph.debug_print()
writemsg("\n", noiselevel=-1)
if leaves:
writemsg("runtime cycle leaf: %s\n\n" %
(selected_nodes[0],), noiselevel=-1)
if prefer_asap and asap_nodes and not selected_nodes:
# We failed to find any asap nodes to merge, so ignore
# them for the next iteration.
prefer_asap = False
continue
if selected_nodes and ignore_priority is not None:
# Try to merge ignored medium_soft deps as soon as possible
# if they're not satisfied by installed packages.
for node in selected_nodes:
children = set(mygraph.child_nodes(node))
soft = children.difference(
mygraph.child_nodes(node,
ignore_priority=DepPrioritySatisfiedRange.ignore_soft))
medium_soft = children.difference(
mygraph.child_nodes(node,
ignore_priority = \
DepPrioritySatisfiedRange.ignore_medium_soft))
medium_soft.difference_update(soft)
for child in medium_soft:
if child in selected_nodes:
continue
if child in asap_nodes:
continue
# Merge PDEPEND asap for bug #180045.
asap_nodes.append(child)
if selected_nodes and len(selected_nodes) > 1:
if not isinstance(selected_nodes, list):
selected_nodes = list(selected_nodes)
selected_nodes.sort(key=cmp_sort_key(cmp_circular_bias))
if not selected_nodes and myblocker_uninstalls:
# An Uninstall task needs to be executed in order to
# avoid conflict if possible.
if drop_satisfied:
priority_range = DepPrioritySatisfiedRange
else:
priority_range = DepPriorityNormalRange
mergeable_nodes = get_nodes(
ignore_priority=ignore_uninst_or_med)
min_parent_deps = None
uninst_task = None
for task in myblocker_uninstalls.leaf_nodes():
# Do some sanity checks so that system or world packages
# don't get uninstalled inappropriately here (only really
# necessary when --complete-graph has not been enabled).
if task in ignored_uninstall_tasks:
continue
if task in scheduled_uninstalls:
# It's been scheduled but it hasn't
# been executed yet due to dependence
# on installation of blocking packages.
continue
root_config = self._frozen_config.roots[task.root]
inst_pkg = self._pkg(task.cpv, "installed", root_config,
installed=True)
if self._dynamic_config.digraph.contains(inst_pkg):
continue
forbid_overlap = False
heuristic_overlap = False
for blocker in myblocker_uninstalls.parent_nodes(task):
if not eapi_has_strong_blocks(blocker.eapi):
heuristic_overlap = True
elif blocker.atom.blocker.overlap.forbid:
forbid_overlap = True
break
if forbid_overlap and running_root == task.root:
continue
if heuristic_overlap and running_root == task.root:
# Never uninstall sys-apps/portage or it's essential
# dependencies, except through replacement.
try:
runtime_dep_atoms = \
list(runtime_deps.iterAtomsForPackage(task))
except portage.exception.InvalidDependString as e:
portage.writemsg("!!! Invalid PROVIDE in " + \
"'%svar/db/pkg/%s/PROVIDE': %s\n" % \
(task.root, task.cpv, e), noiselevel=-1)
del e
continue
# Don't uninstall a runtime dep if it appears
# to be the only suitable one installed.
skip = False
vardb = root_config.trees["vartree"].dbapi
for atom in runtime_dep_atoms:
other_version = None
for pkg in vardb.match_pkgs(atom):
if pkg.cpv == task.cpv and \
pkg.counter == task.counter:
continue
other_version = pkg
break
if other_version is None:
skip = True
break
if skip:
continue
# For packages in the system set, don't take
# any chances. If the conflict can't be resolved
# by a normal replacement operation then abort.
skip = False
try:
for atom in root_config.sets[
"system"].iterAtomsForPackage(task):
skip = True
break
except portage.exception.InvalidDependString as e:
portage.writemsg("!!! Invalid PROVIDE in " + \
"'%svar/db/pkg/%s/PROVIDE': %s\n" % \
(task.root, task.cpv, e), noiselevel=-1)
del e
skip = True
if skip:
continue
# Note that the world check isn't always
# necessary since self._complete_graph() will
# add all packages from the system and world sets to the
# graph. This just allows unresolved conflicts to be
# detected as early as possible, which makes it possible
# to avoid calling self._complete_graph() when it is
# unnecessary due to blockers triggering an abortion.
if not complete:
# For packages in the world set, go ahead an uninstall
# when necessary, as long as the atom will be satisfied
# in the final state.
skip = False
try:
for atom in root_config.sets[
"selected"].iterAtomsForPackage(task):
satisfied = False
for pkg in self._dynamic_config._package_tracker.match(task.root, atom):
if pkg == inst_pkg:
continue
satisfied = True
break
if not satisfied:
skip = True
self._dynamic_config._blocked_world_pkgs[inst_pkg] = atom
break
except portage.exception.InvalidDependString as e:
portage.writemsg("!!! Invalid PROVIDE in " + \
"'%svar/db/pkg/%s/PROVIDE': %s\n" % \
(task.root, task.cpv, e), noiselevel=-1)
del e
skip = True
if skip:
continue
# Check the deps of parent nodes to ensure that
# the chosen task produces a leaf node. Maybe
# this can be optimized some more to make the
# best possible choice, but the current algorithm
# is simple and should be near optimal for most
# common cases.
self._spinner_update()
mergeable_parent = False
parent_deps = set()
parent_deps.add(task)
for parent in mygraph.parent_nodes(task):
parent_deps.update(mygraph.child_nodes(parent,
ignore_priority=priority_range.ignore_medium_soft))
if min_parent_deps is not None and \
len(parent_deps) >= min_parent_deps:
# This task is no better than a previously selected
# task, so abort search now in order to avoid wasting
# any more cpu time on this task. This increases
# performance dramatically in cases when there are
# hundreds of blockers to solve, like when
# upgrading to a new slot of kde-meta.
mergeable_parent = None
break
if parent in mergeable_nodes and \
gather_deps(ignore_uninst_or_med_soft,
mergeable_nodes, set(), parent):
mergeable_parent = True
if not mergeable_parent:
continue
if min_parent_deps is None or \
len(parent_deps) < min_parent_deps:
min_parent_deps = len(parent_deps)
uninst_task = task
if uninst_task is not None and min_parent_deps == 1:
# This is the best possible result, so so abort search
# now in order to avoid wasting any more cpu time.
break
if uninst_task is not None:
# The uninstall is performed only after blocking
# packages have been merged on top of it. File
# collisions between blocking packages are detected
# and removed from the list of files to be uninstalled.
scheduled_uninstalls.add(uninst_task)
parent_nodes = mygraph.parent_nodes(uninst_task)
# Reverse the parent -> uninstall edges since we want
# to do the uninstall after blocking packages have
# been merged on top of it.
mygraph.remove(uninst_task)
for blocked_pkg in parent_nodes:
mygraph.add(blocked_pkg, uninst_task,
priority=BlockerDepPriority.instance)
scheduler_graph.remove_edge(uninst_task, blocked_pkg)
scheduler_graph.add(blocked_pkg, uninst_task,
priority=BlockerDepPriority.instance)
# Sometimes a merge node will render an uninstall
# node unnecessary (due to occupying the same SLOT),
# and we want to avoid executing a separate uninstall
# task in that case.
for slot_node in self._dynamic_config._package_tracker.match(
uninst_task.root, uninst_task.slot_atom):
if slot_node.operation == "merge":
mygraph.add(slot_node, uninst_task,
priority=BlockerDepPriority.instance)
# Reset the state variables for leaf node selection and
# continue trying to select leaf nodes.
prefer_asap = True
drop_satisfied = False
continue
if not selected_nodes:
# Only select root nodes as a last resort. This case should
# only trigger when the graph is nearly empty and the only
# remaining nodes are isolated (no parents or children). Since
# the nodes must be isolated, ignore_priority is not needed.
selected_nodes = get_nodes()
if not selected_nodes and not drop_satisfied:
drop_satisfied = True
continue
if not selected_nodes and myblocker_uninstalls:
# If possible, drop an uninstall task here in order to avoid
# the circular deps code path. The corresponding blocker will
# still be counted as an unresolved conflict.
uninst_task = None
for node in myblocker_uninstalls.leaf_nodes():
try:
mygraph.remove(node)
except KeyError:
pass
else:
uninst_task = node
ignored_uninstall_tasks.add(node)
break
if uninst_task is not None:
# Reset the state variables for leaf node selection and
# continue trying to select leaf nodes.
prefer_asap = True
drop_satisfied = False
continue
if not selected_nodes:
self._dynamic_config._circular_deps_for_display = mygraph
self._dynamic_config._skip_restart = True
raise self._unknown_internal_error()
# At this point, we've succeeded in selecting one or more nodes, so
# reset state variables for leaf node selection.
prefer_asap = True
drop_satisfied = False
mygraph.difference_update(selected_nodes)
for node in selected_nodes:
if isinstance(node, Package) and \
node.operation == "nomerge":
continue
# Handle interactions between blockers
# and uninstallation tasks.
solved_blockers = set()
uninst_task = None
if isinstance(node, Package) and \
"uninstall" == node.operation:
have_uninstall_task = True
uninst_task = node
else:
vardb = self._frozen_config.trees[node.root]["vartree"].dbapi
inst_pkg = vardb.match_pkgs(node.slot_atom)
if inst_pkg:
# The package will be replaced by this one, so remove
# the corresponding Uninstall task if necessary.
inst_pkg = inst_pkg[0]
uninst_task = Package(built=inst_pkg.built,
cpv=inst_pkg.cpv, installed=inst_pkg.installed,
metadata=inst_pkg._metadata,
operation="uninstall",
root_config=inst_pkg.root_config,
type_name=inst_pkg.type_name)
try:
mygraph.remove(uninst_task)
except KeyError:
pass
if uninst_task is not None and \
uninst_task not in ignored_uninstall_tasks and \
myblocker_uninstalls.contains(uninst_task):
blocker_nodes = myblocker_uninstalls.parent_nodes(uninst_task)
myblocker_uninstalls.remove(uninst_task)
# Discard any blockers that this Uninstall solves.
for blocker in blocker_nodes:
if not myblocker_uninstalls.child_nodes(blocker):
myblocker_uninstalls.remove(blocker)
if blocker not in \
self._dynamic_config._unsolvable_blockers:
solved_blockers.add(blocker)
retlist.append(node)
if (isinstance(node, Package) and \
"uninstall" == node.operation) or \
(uninst_task is not None and \
uninst_task in scheduled_uninstalls):
# Include satisfied blockers in the merge list
# since the user might be interested and also
# it serves as an indicator that blocking packages
# will be temporarily installed simultaneously.
for blocker in solved_blockers:
retlist.append(blocker)
unsolvable_blockers = set(self._dynamic_config._unsolvable_blockers.leaf_nodes())
for node in myblocker_uninstalls.root_nodes():
unsolvable_blockers.add(node)
# If any Uninstall tasks need to be executed in order
# to avoid a conflict, complete the graph with any
# dependencies that may have been initially
# neglected (to ensure that unsafe Uninstall tasks
# are properly identified and blocked from execution).
if have_uninstall_task and \
not complete and \
not unsolvable_blockers:
self._dynamic_config.myparams["complete"] = True
if '--debug' in self._frozen_config.myopts:
msg = []
msg.append("enabling 'complete' depgraph mode " + \
"due to uninstall task(s):")
msg.append("")
for node in retlist:
if isinstance(node, Package) and \
node.operation == 'uninstall':
msg.append("\t%s" % (node,))
writemsg_level("\n%s\n" % \
"".join("%s\n" % line for line in msg),
level=logging.DEBUG, noiselevel=-1)
raise self._serialize_tasks_retry("")
# Set satisfied state on blockers, but not before the
# above retry path, since we don't want to modify the
# state in that case.
for node in retlist:
if isinstance(node, Blocker):
node.satisfied = True
for blocker in unsolvable_blockers:
retlist.append(blocker)
retlist = tuple(retlist)
if unsolvable_blockers and \
not self._accept_blocker_conflicts():
self._dynamic_config._unsatisfied_blockers_for_display = unsolvable_blockers
self._dynamic_config._serialized_tasks_cache = retlist
self._dynamic_config._scheduler_graph = scheduler_graph
# Blockers don't trigger the _skip_restart flag, since
# backtracking may solve blockers when it solves slot
# conflicts (or by blind luck).
raise self._unknown_internal_error()
have_slot_conflict = any(self._dynamic_config._package_tracker.slot_conflicts())
if have_slot_conflict and \
not self._accept_blocker_conflicts():
self._dynamic_config._serialized_tasks_cache = retlist
self._dynamic_config._scheduler_graph = scheduler_graph
raise self._unknown_internal_error()
return retlist, scheduler_graph
def _show_circular_deps(self, mygraph):
self._dynamic_config._circular_dependency_handler = \
circular_dependency_handler(self, mygraph)
handler = self._dynamic_config._circular_dependency_handler
self._frozen_config.myopts.pop("--quiet", None)
self._frozen_config.myopts["--verbose"] = True
self._frozen_config.myopts["--tree"] = True
portage.writemsg("\n\n", noiselevel=-1)
self.display(handler.merge_list)
prefix = colorize("BAD", " * ")
portage.writemsg("\n", noiselevel=-1)
portage.writemsg(prefix + "Error: circular dependencies:\n",
noiselevel=-1)
portage.writemsg("\n", noiselevel=-1)
if handler.circular_dep_message is None:
handler.debug_print()
portage.writemsg("\n", noiselevel=-1)
if handler.circular_dep_message is not None:
portage.writemsg(handler.circular_dep_message, noiselevel=-1)
suggestions = handler.suggestions
if suggestions:
writemsg("\n\nIt might be possible to break this cycle\n", noiselevel=-1)
if len(suggestions) == 1:
writemsg("by applying the following change:\n", noiselevel=-1)
else:
writemsg("by applying " + colorize("bold", "any of") + \
" the following changes:\n", noiselevel=-1)
writemsg("".join(suggestions), noiselevel=-1)
writemsg("\nNote that this change can be reverted, once the package has" + \
" been installed.\n", noiselevel=-1)
if handler.large_cycle_count:
writemsg("\nNote that the dependency graph contains a lot of cycles.\n" + \
"Several changes might be required to resolve all cycles.\n" + \
"Temporarily changing some use flag for all packages might be the better option.\n", noiselevel=-1)
else:
writemsg("\n\n", noiselevel=-1)
writemsg(prefix + "Note that circular dependencies " + \
"can often be avoided by temporarily\n", noiselevel=-1)
writemsg(prefix + "disabling USE flags that trigger " + \
"optional dependencies.\n", noiselevel=-1)
def _show_merge_list(self):
if self._dynamic_config._serialized_tasks_cache is not None and \
not (self._dynamic_config._displayed_list is not None and \
self._dynamic_config._displayed_list is self._dynamic_config._serialized_tasks_cache):
self.display(self._dynamic_config._serialized_tasks_cache)
def _show_unsatisfied_blockers(self, blockers):
self._show_merge_list()
msg = "Error: The above package list contains " + \
"packages which cannot be installed " + \
"at the same time on the same system."
prefix = colorize("BAD", " * ")
portage.writemsg("\n", noiselevel=-1)
for line in textwrap.wrap(msg, 70):
portage.writemsg(prefix + line + "\n", noiselevel=-1)
# Display the conflicting packages along with the packages
# that pulled them in. This is helpful for troubleshooting
# cases in which blockers don't solve automatically and
# the reasons are not apparent from the normal merge list
# display.
conflict_pkgs = {}
for blocker in blockers:
for pkg in chain(self._dynamic_config._blocked_pkgs.child_nodes(blocker), \
self._dynamic_config._blocker_parents.parent_nodes(blocker)):
is_slot_conflict_pkg = False
for conflict in self._dynamic_config._package_tracker.slot_conflicts():
if conflict.root == pkg.root and conflict.atom == pkg.slot_atom:
is_slot_conflict_pkg = True
break
if is_slot_conflict_pkg:
# The slot conflict display has better noise reduction
# than the unsatisfied blockers display, so skip
# unsatisfied blockers display for packages involved
# directly in slot conflicts (see bug #385391).
continue
parent_atoms = self._dynamic_config._parent_atoms.get(pkg)
if not parent_atoms:
atom = self._dynamic_config._blocked_world_pkgs.get(pkg)
if atom is not None:
parent_atoms = set([("@selected", atom)])
if parent_atoms:
conflict_pkgs[pkg] = parent_atoms
if conflict_pkgs:
# Reduce noise by pruning packages that are only
# pulled in by other conflict packages.
pruned_pkgs = set()
for pkg, parent_atoms in conflict_pkgs.items():
relevant_parent = False
for parent, atom in parent_atoms:
if parent not in conflict_pkgs:
relevant_parent = True
break
if not relevant_parent:
pruned_pkgs.add(pkg)
for pkg in pruned_pkgs:
del conflict_pkgs[pkg]
if conflict_pkgs:
msg = []
msg.append("\n")
indent = " "
for pkg, parent_atoms in conflict_pkgs.items():
# Prefer packages that are not directly involved in a conflict.
# It can be essential to see all the packages here, so don't
# omit any. If the list is long, people can simply use a pager.
preferred_parents = set()
for parent_atom in parent_atoms:
parent, atom = parent_atom
if parent not in conflict_pkgs:
preferred_parents.add(parent_atom)
ordered_list = list(preferred_parents)
if len(parent_atoms) > len(ordered_list):
for parent_atom in parent_atoms:
if parent_atom not in preferred_parents:
ordered_list.append(parent_atom)
msg.append(indent + "%s pulled in by\n" % pkg)
for parent_atom in ordered_list:
parent, atom = parent_atom
msg.append(2*indent)
if isinstance(parent,
(PackageArg, AtomArg)):
# For PackageArg and AtomArg types, it's
# redundant to display the atom attribute.
msg.append(str(parent))
else:
# Display the specific atom from SetArg or
# Package types.
if atom != atom.unevaluated_atom:
# Show the unevaluated atom, since it can reveal
# issues with conditional use-flags missing
# from IUSE.
msg.append("%s (%s) required by %s" %
(atom.unevaluated_atom, atom, parent))
else:
msg.append("%s required by %s" % (atom, parent))
msg.append("\n")
msg.append("\n")
writemsg("".join(msg), noiselevel=-1)
if "--quiet" not in self._frozen_config.myopts:
show_blocker_docs_link()
def display(self, mylist, favorites=[], verbosity=None):
# This is used to prevent display_problems() from
# redundantly displaying this exact same merge list
# again via _show_merge_list().
self._dynamic_config._displayed_list = mylist
if "--tree" in self._frozen_config.myopts:
mylist = tuple(reversed(mylist))
display = Display()
return display(self, mylist, favorites, verbosity)
def _display_autounmask(self, autounmask_continue=False):
"""
Display --autounmask message and optionally write it to config files
(using CONFIG_PROTECT). The message includes the comments and the changes.
"""
if self._dynamic_config._displayed_autounmask:
return
self._dynamic_config._displayed_autounmask = True
ask = "--ask" in self._frozen_config.myopts
autounmask_write = autounmask_continue or \
self._frozen_config.myopts.get("--autounmask-write",
ask) is True
autounmask_unrestricted_atoms = \
self._frozen_config.myopts.get("--autounmask-unrestricted-atoms", "n") == True
quiet = "--quiet" in self._frozen_config.myopts
pretend = "--pretend" in self._frozen_config.myopts
enter_invalid = '--ask-enter-invalid' in self._frozen_config.myopts
def check_if_latest(pkg, check_visibility=False):
is_latest = True
is_latest_in_slot = True
dbs = self._dynamic_config._filtered_trees[pkg.root]["dbs"]
root_config = self._frozen_config.roots[pkg.root]
for db, pkg_type, built, installed, db_keys in dbs:
for other_pkg in self._iter_match_pkgs(root_config, pkg_type, Atom(pkg.cp)):
if (check_visibility and
not self._pkg_visibility_check(other_pkg)):
continue
if other_pkg.cp != pkg.cp:
# old-style PROVIDE virtual means there are no
# normal matches for this pkg_type
break
if other_pkg > pkg:
is_latest = False
if other_pkg.slot_atom == pkg.slot_atom:
is_latest_in_slot = False
break
else:
# iter_match_pkgs yields highest version first, so
# there's no need to search this pkg_type any further
break
if not is_latest_in_slot:
break
return is_latest, is_latest_in_slot
#Set of roots we have autounmask changes for.
roots = set()
masked_by_missing_keywords = False
unstable_keyword_msg = {}
for pkg in self._dynamic_config._needed_unstable_keywords:
self._show_merge_list()
if pkg in self._dynamic_config.digraph:
root = pkg.root
roots.add(root)
unstable_keyword_msg.setdefault(root, [])
is_latest, is_latest_in_slot = check_if_latest(pkg)
pkgsettings = self._frozen_config.pkgsettings[pkg.root]
mreasons = _get_masking_status(pkg, pkgsettings, pkg.root_config,
use=self._pkg_use_enabled(pkg))
for reason in mreasons:
if reason.unmask_hint and \
reason.unmask_hint.key == 'unstable keyword':
keyword = reason.unmask_hint.value
if keyword == "**":
masked_by_missing_keywords = True
unstable_keyword_msg[root].append(self._get_dep_chain_as_comment(pkg))
if autounmask_unrestricted_atoms:
if is_latest:
unstable_keyword_msg[root].append(">=%s %s\n" % (pkg.cpv, keyword))
elif is_latest_in_slot:
unstable_keyword_msg[root].append(">=%s:%s %s\n" % (pkg.cpv, pkg.slot, keyword))
else:
unstable_keyword_msg[root].append("=%s %s\n" % (pkg.cpv, keyword))
else:
unstable_keyword_msg[root].append("=%s %s\n" % (pkg.cpv, keyword))
p_mask_change_msg = {}
for pkg in self._dynamic_config._needed_p_mask_changes:
self._show_merge_list()
if pkg in self._dynamic_config.digraph:
root = pkg.root
roots.add(root)
p_mask_change_msg.setdefault(root, [])
is_latest, is_latest_in_slot = check_if_latest(pkg)
pkgsettings = self._frozen_config.pkgsettings[pkg.root]
mreasons = _get_masking_status(pkg, pkgsettings, pkg.root_config,
use=self._pkg_use_enabled(pkg))
for reason in mreasons:
if reason.unmask_hint and \
reason.unmask_hint.key == 'p_mask':
keyword = reason.unmask_hint.value
comment, filename = portage.getmaskingreason(
pkg.cpv, metadata=pkg._metadata,
settings=pkgsettings,
portdb=pkg.root_config.trees["porttree"].dbapi,
return_location=True)
p_mask_change_msg[root].append(self._get_dep_chain_as_comment(pkg))
if filename:
p_mask_change_msg[root].append("# %s:\n" % filename)
if comment:
comment = [line for line in
comment.splitlines() if line]
for line in comment:
p_mask_change_msg[root].append("%s\n" % line)
if autounmask_unrestricted_atoms:
if is_latest:
p_mask_change_msg[root].append(">=%s\n" % pkg.cpv)
elif is_latest_in_slot:
p_mask_change_msg[root].append(">=%s:%s\n" % (pkg.cpv, pkg.slot))
else:
p_mask_change_msg[root].append("=%s\n" % pkg.cpv)
else:
p_mask_change_msg[root].append("=%s\n" % pkg.cpv)
use_changes_msg = {}
for pkg, needed_use_config_change in self._dynamic_config._needed_use_config_changes.items():
self._show_merge_list()
if pkg in self._dynamic_config.digraph:
root = pkg.root
roots.add(root)
use_changes_msg.setdefault(root, [])
# NOTE: For USE changes, call check_if_latest with
# check_visibility=True, since we want to generate
# a >= atom if possible. Don't do this for keyword
# or mask changes, since that may cause undesired
# versions to be unmasked! See bug #536392.
is_latest, is_latest_in_slot = check_if_latest(
pkg, check_visibility=True)
changes = needed_use_config_change[1]
adjustments = []
for flag, state in changes.items():
if state:
adjustments.append(flag)
else:
adjustments.append("-" + flag)
use_changes_msg[root].append(self._get_dep_chain_as_comment(pkg, unsatisfied_dependency=True))
if is_latest:
use_changes_msg[root].append(">=%s %s\n" % (pkg.cpv, " ".join(adjustments)))
elif is_latest_in_slot:
use_changes_msg[root].append(">=%s:%s %s\n" % (pkg.cpv, pkg.slot, " ".join(adjustments)))
else:
use_changes_msg[root].append("=%s %s\n" % (pkg.cpv, " ".join(adjustments)))
license_msg = {}
for pkg, missing_licenses in self._dynamic_config._needed_license_changes.items():
self._show_merge_list()
if pkg in self._dynamic_config.digraph:
root = pkg.root
roots.add(root)
license_msg.setdefault(root, [])
is_latest, is_latest_in_slot = check_if_latest(pkg)
license_msg[root].append(self._get_dep_chain_as_comment(pkg))
if is_latest:
license_msg[root].append(">=%s %s\n" % (pkg.cpv, " ".join(sorted(missing_licenses))))
elif is_latest_in_slot:
license_msg[root].append(">=%s:%s %s\n" % (pkg.cpv, pkg.slot, " ".join(sorted(missing_licenses))))
else:
license_msg[root].append("=%s %s\n" % (pkg.cpv, " ".join(sorted(missing_licenses))))
def find_config_file(abs_user_config, file_name):
"""
Searches /etc/portage for an appropriate file to append changes to.
If the file_name is a file it is returned, if it is a directory, the
last file in it is returned. Order of traversal is the identical to
portage.util.grablines(recursive=True).
file_name - String containing a file name like "package.use"
return value - String. Absolute path of file to write to. None if
no suitable file exists.
"""
file_path = os.path.join(abs_user_config, file_name)
try:
os.lstat(file_path)
except OSError as e:
if e.errno == errno.ENOENT:
# The file doesn't exist, so we'll
# simply create it.
return file_path
# Disk or file system trouble?
return None
last_file_path = None
stack = [file_path]
while stack:
p = stack.pop()
try:
st = os.stat(p)
except OSError:
pass
else:
if stat.S_ISREG(st.st_mode):
last_file_path = p
elif stat.S_ISDIR(st.st_mode):
if os.path.basename(p) in VCS_DIRS:
continue
try:
contents = os.listdir(p)
except OSError:
pass
else:
contents.sort(reverse=True)
for child in contents:
if child.startswith(".") or \
child.endswith("~"):
continue
stack.append(os.path.join(p, child))
# If the directory is empty add a file with name
# pattern file_name.default
if last_file_path is None:
last_file_path = os.path.join(file_path, file_path, "zz-autounmask")
with open(last_file_path, "a+") as default:
default.write("# " + file_name)
return last_file_path
write_to_file = autounmask_write and not pretend
#Make sure we have a file to write to before doing any write.
file_to_write_to = {}
problems = []
if write_to_file:
for root in roots:
settings = self._frozen_config.roots[root].settings
abs_user_config = os.path.join(
settings["PORTAGE_CONFIGROOT"], USER_CONFIG_PATH)
if root in unstable_keyword_msg:
if not os.path.exists(os.path.join(abs_user_config,
"package.keywords")):
filename = "package.accept_keywords"
else:
filename = "package.keywords"
file_to_write_to[(abs_user_config, "package.keywords")] = \
find_config_file(abs_user_config, filename)
if root in p_mask_change_msg:
file_to_write_to[(abs_user_config, "package.unmask")] = \
find_config_file(abs_user_config, "package.unmask")
if root in use_changes_msg:
file_to_write_to[(abs_user_config, "package.use")] = \
find_config_file(abs_user_config, "package.use")
if root in license_msg:
file_to_write_to[(abs_user_config, "package.license")] = \
find_config_file(abs_user_config, "package.license")
for (abs_user_config, f), path in file_to_write_to.items():
if path is None:
problems.append("!!! No file to write for '%s'\n" % os.path.join(abs_user_config, f))
write_to_file = not problems
def format_msg(lines):
lines = lines[:]
for i, line in enumerate(lines):
if line.startswith("#"):
continue
lines[i] = colorize("INFORM", line.rstrip()) + "\n"
return "".join(lines)
for root in roots:
settings = self._frozen_config.roots[root].settings
abs_user_config = os.path.join(
settings["PORTAGE_CONFIGROOT"], USER_CONFIG_PATH)
if len(roots) > 1:
writemsg("\nFor %s:\n" % abs_user_config, noiselevel=-1)
def _writemsg(reason, file):
writemsg(('\nThe following %s are necessary to proceed:\n'
' (see "%s" in the portage(5) man page for more details)\n')
% (colorize('BAD', reason), file), noiselevel=-1)
if root in unstable_keyword_msg:
_writemsg('keyword changes', 'package.accept_keywords')
writemsg(format_msg(unstable_keyword_msg[root]), noiselevel=-1)
if root in p_mask_change_msg:
_writemsg('mask changes', 'package.unmask')
writemsg(format_msg(p_mask_change_msg[root]), noiselevel=-1)
if root in use_changes_msg:
_writemsg('USE changes', 'package.use')
writemsg(format_msg(use_changes_msg[root]), noiselevel=-1)
if root in license_msg:
_writemsg('license changes', 'package.license')
writemsg(format_msg(license_msg[root]), noiselevel=-1)
protect_obj = {}
if write_to_file and not autounmask_continue:
for root in roots:
settings = self._frozen_config.roots[root].settings
protect_obj[root] = ConfigProtect(
settings["PORTAGE_CONFIGROOT"],
shlex_split(settings.get("CONFIG_PROTECT", "")),
shlex_split(settings.get("CONFIG_PROTECT_MASK", "")),
case_insensitive=("case-insensitive-fs"
in settings.features))
def write_changes(root, changes, file_to_write_to):
file_contents = None
try:
with io.open(
_unicode_encode(file_to_write_to,
encoding=_encodings['fs'], errors='strict'),
mode='r', encoding=_encodings['content'],
errors='replace') as f:
file_contents = f.readlines()
except IOError as e:
if e.errno == errno.ENOENT:
file_contents = []
else:
problems.append("!!! Failed to read '%s': %s\n" % \
(file_to_write_to, e))
if file_contents is not None:
file_contents.extend(changes)
if (not autounmask_continue and
protect_obj[root].isprotected(file_to_write_to)):
# We want to force new_protect_filename to ensure
# that the user will see all our changes via
# dispatch-conf, even if file_to_write_to doesn't
# exist yet, so we specify force=True.
file_to_write_to = new_protect_filename(file_to_write_to,
force=True)
try:
write_atomic(file_to_write_to, "".join(file_contents))
except PortageException:
problems.append("!!! Failed to write '%s'\n" % file_to_write_to)
if not quiet and (p_mask_change_msg or masked_by_missing_keywords):
msg = [
"",
"NOTE: The --autounmask-keep-masks option will prevent emerge",
" from creating package.unmask or ** keyword changes."
]
for line in msg:
if line:
line = colorize("INFORM", line)
writemsg(line + "\n", noiselevel=-1)
if ask and write_to_file and file_to_write_to:
prompt = "\nWould you like to add these " + \
"changes to your config files?"
if self.query(prompt, enter_invalid) == 'No':
write_to_file = False
if write_to_file and file_to_write_to:
for root in roots:
settings = self._frozen_config.roots[root].settings
abs_user_config = os.path.join(
settings["PORTAGE_CONFIGROOT"], USER_CONFIG_PATH)
ensure_dirs(abs_user_config)
if root in unstable_keyword_msg:
write_changes(root, unstable_keyword_msg[root],
file_to_write_to.get((abs_user_config, "package.keywords")))
if root in p_mask_change_msg:
write_changes(root, p_mask_change_msg[root],
file_to_write_to.get((abs_user_config, "package.unmask")))
if root in use_changes_msg:
write_changes(root, use_changes_msg[root],
file_to_write_to.get((abs_user_config, "package.use")))
if root in license_msg:
write_changes(root, license_msg[root],
file_to_write_to.get((abs_user_config, "package.license")))
if problems:
writemsg("\nThe following problems occurred while writing autounmask changes:\n", \
noiselevel=-1)
writemsg("".join(problems), noiselevel=-1)
elif write_to_file and roots:
writemsg("\nAutounmask changes successfully written.\n",
noiselevel=-1)
if autounmask_continue:
return True
for root in roots:
chk_updated_cfg_files(root,
[os.path.join(os.sep, USER_CONFIG_PATH)])
elif not pretend and not autounmask_write and roots:
writemsg("\nUse --autounmask-write to write changes to config files (honoring\n"
"CONFIG_PROTECT). Carefully examine the list of proposed changes,\n"
"paying special attention to mask or keyword changes that may expose\n"
"experimental or unstable packages.\n",
noiselevel=-1)
if self._dynamic_config._autounmask_backtrack_disabled:
msg = [
"In order to avoid wasting time, backtracking has terminated early",
"due to the above autounmask change(s). The --autounmask-backtrack=y",
"option can be used to force further backtracking, but there is no",
"guarantee that it will produce a solution.",
]
writemsg("\n", noiselevel=-1)
for line in msg:
writemsg(" %s %s\n" % (colorize("WARN", "*"), line),
noiselevel=-1)
def display_problems(self):
"""
Display problems with the dependency graph such as slot collisions.
This is called internally by display() to show the problems _after_
the merge list where it is most likely to be seen, but if display()
is not going to be called then this method should be called explicitly
to ensure that the user is notified of problems with the graph.
"""
if self._dynamic_config._circular_deps_for_display is not None:
self._show_circular_deps(
self._dynamic_config._circular_deps_for_display)
unresolved_conflicts = False
have_slot_conflict = any(self._dynamic_config._package_tracker.slot_conflicts())
if have_slot_conflict:
unresolved_conflicts = True
self._show_slot_collision_notice()
if self._dynamic_config._unsatisfied_blockers_for_display is not None:
unresolved_conflicts = True
self._show_unsatisfied_blockers(
self._dynamic_config._unsatisfied_blockers_for_display)
# Only show missed updates if there are no unresolved conflicts,
# since they may be irrelevant after the conflicts are solved.
if not unresolved_conflicts:
self._show_missed_update()
if self._frozen_config.myopts.get("--verbose-slot-rebuilds", 'y') != 'n':
self._compute_abi_rebuild_info()
self._show_abi_rebuild_info()
self._show_ignored_binaries()
self._display_autounmask()
for depgraph_sets in self._dynamic_config.sets.values():
for pset in depgraph_sets.sets.values():
for error_msg in pset.errors:
writemsg_level("%s\n" % (error_msg,),
level=logging.ERROR, noiselevel=-1)
# TODO: Add generic support for "set problem" handlers so that
# the below warnings aren't special cases for world only.
if self._dynamic_config._missing_args:
world_problems = False
if "world" in self._dynamic_config.sets[
self._frozen_config.target_root].sets:
# Filter out indirect members of world (from nested sets)
# since only direct members of world are desired here.
world_set = self._frozen_config.roots[self._frozen_config.target_root].sets["selected"]
for arg, atom in self._dynamic_config._missing_args:
if arg.name in ("selected", "world") and atom in world_set:
world_problems = True
break
if world_problems:
writemsg("\n!!! Problems have been " + \
"detected with your world file\n",
noiselevel=-1)
writemsg("!!! Please run " + \
green("emaint --check world")+"\n\n",
noiselevel=-1)
if self._dynamic_config._missing_args:
writemsg("\n" + colorize("BAD", "!!!") + \
" Ebuilds for the following packages are either all\n",
noiselevel=-1)
writemsg(colorize("BAD", "!!!") + \
" masked or don't exist:\n",
noiselevel=-1)
writemsg(" ".join(str(atom) for arg, atom in \
self._dynamic_config._missing_args) + "\n",
noiselevel=-1)
if self._dynamic_config._pprovided_args:
arg_refs = {}
for arg, atom in self._dynamic_config._pprovided_args:
if isinstance(arg, SetArg):
parent = arg.name
arg_atom = (atom, atom)
else:
parent = "args"
arg_atom = (arg.arg, atom)
refs = arg_refs.setdefault(arg_atom, [])
if parent not in refs:
refs.append(parent)
msg = []
msg.append(bad("\nWARNING: "))
if len(self._dynamic_config._pprovided_args) > 1:
msg.append("Requested packages will not be " + \
"merged because they are listed in\n")
else:
msg.append("A requested package will not be " + \
"merged because it is listed in\n")
msg.append("package.provided:\n\n")
problems_sets = set()
for (arg, atom), refs in arg_refs.items():
ref_string = ""
if refs:
problems_sets.update(refs)
refs.sort()
ref_string = ", ".join(["'%s'" % name for name in refs])
ref_string = " pulled in by " + ref_string
msg.append(" %s%s\n" % (colorize("INFORM", str(arg)), ref_string))
msg.append("\n")
if "selected" in problems_sets or "world" in problems_sets:
msg.append("This problem can be solved in one of the following ways:\n\n")
msg.append(" A) Use emaint to clean offending packages from world (if not installed).\n")
msg.append(" B) Uninstall offending packages (cleans them from world).\n")
msg.append(" C) Remove offending entries from package.provided.\n\n")
msg.append("The best course of action depends on the reason that an offending\n")
msg.append("package.provided entry exists.\n\n")
writemsg("".join(msg), noiselevel=-1)
masked_packages = []
for pkg in self._dynamic_config._masked_license_updates:
root_config = pkg.root_config
pkgsettings = self._frozen_config.pkgsettings[pkg.root]
mreasons = get_masking_status(pkg, pkgsettings, root_config, use=self._pkg_use_enabled(pkg))
masked_packages.append((root_config, pkgsettings,
pkg.cpv, pkg.repo, pkg._metadata, mreasons))
if masked_packages:
writemsg("\n" + colorize("BAD", "!!!") + \
" The following updates are masked by LICENSE changes:\n",
noiselevel=-1)
show_masked_packages(masked_packages)
show_mask_docs()
writemsg("\n", noiselevel=-1)
masked_packages = []
for pkg in self._dynamic_config._masked_installed:
root_config = pkg.root_config
pkgsettings = self._frozen_config.pkgsettings[pkg.root]
mreasons = get_masking_status(pkg, pkgsettings, root_config, use=self._pkg_use_enabled)
masked_packages.append((root_config, pkgsettings,
pkg.cpv, pkg.repo, pkg._metadata, mreasons))
if masked_packages:
writemsg("\n" + colorize("BAD", "!!!") + \
" The following installed packages are masked:\n",
noiselevel=-1)
show_masked_packages(masked_packages)
show_mask_docs()
writemsg("\n", noiselevel=-1)
for pargs, kwargs in self._dynamic_config._unsatisfied_deps_for_display:
self._show_unsatisfied_dep(*pargs, **kwargs)
if self._dynamic_config._buildpkgonly_deps_unsatisfied:
self._show_merge_list()
writemsg("\n!!! --buildpkgonly requires all "
"dependencies to be merged.\n", noiselevel=-1)
writemsg("!!! Cannot merge requested packages. "
"Merge deps and try again.\n\n", noiselevel=-1)
def saveNomergeFavorites(self):
"""Find atoms in favorites that are not in the mergelist and add them
to the world file if necessary."""
for x in ("--buildpkgonly", "--fetchonly", "--fetch-all-uri",
"--oneshot", "--onlydeps", "--pretend"):
if x in self._frozen_config.myopts:
return
root_config = self._frozen_config.roots[self._frozen_config.target_root]
world_set = root_config.sets["selected"]
world_locked = False
if hasattr(world_set, "lock"):
world_set.lock()
world_locked = True
if hasattr(world_set, "load"):
world_set.load() # maybe it's changed on disk
args_set = self._dynamic_config.sets[
self._frozen_config.target_root].sets['__non_set_args__']
added_favorites = set()
for x in self._dynamic_config._set_nodes:
if x.operation != "nomerge":
continue
if x.root != root_config.root:
continue
try:
myfavkey = create_world_atom(x, args_set, root_config)
if myfavkey:
if myfavkey in added_favorites:
continue
added_favorites.add(myfavkey)
except portage.exception.InvalidDependString as e:
writemsg("\n\n!!! '%s' has invalid PROVIDE: %s\n" % \
(x.cpv, e), noiselevel=-1)
writemsg("!!! see '%s'\n\n" % os.path.join(
x.root, portage.VDB_PATH, x.cpv, "PROVIDE"), noiselevel=-1)
del e
all_added = []
for arg in self._dynamic_config._initial_arg_list:
if not isinstance(arg, SetArg):
continue
if arg.root_config.root != root_config.root:
continue
if arg.internal:
# __auto_* sets
continue
k = arg.name
if k in ("selected", "world") or \
not root_config.sets[k].world_candidate:
continue
s = SETPREFIX + k
if s in world_set:
continue
all_added.append(SETPREFIX + k)
all_added.extend(added_favorites)
all_added.sort()
if all_added:
skip = False
if "--ask" in self._frozen_config.myopts:
writemsg_stdout("\n", noiselevel=-1)
for a in all_added:
writemsg_stdout(" %s %s\n" % (colorize("GOOD", "*"), a),
noiselevel=-1)
writemsg_stdout("\n", noiselevel=-1)
prompt = "Would you like to add these packages to your world " \
"favorites?"
enter_invalid = '--ask-enter-invalid' in \
self._frozen_config.myopts
if self.query(prompt, enter_invalid) == "No":
skip = True
if not skip:
for a in all_added:
if a.startswith(SETPREFIX):
filename = "world_sets"
else:
filename = "world"
writemsg_stdout(
">>> Recording %s in \"%s\" favorites file...\n" %
(colorize("INFORM", _unicode(a)), filename), noiselevel=-1)
world_set.update(all_added)
if world_locked:
world_set.unlock()
def _loadResumeCommand(self, resume_data, skip_masked=True,
skip_missing=True):
"""
Add a resume command to the graph and validate it in the process. This
will raise a PackageNotFound exception if a package is not available.
"""
self._load_vdb()
if not isinstance(resume_data, dict):
return False
mergelist = resume_data.get("mergelist")
if not isinstance(mergelist, list):
mergelist = []
favorites = resume_data.get("favorites")
if isinstance(favorites, list):
args = self._load_favorites(favorites)
else:
args = []
serialized_tasks = []
masked_tasks = []
for x in mergelist:
if not (isinstance(x, list) and len(x) == 4):
continue
pkg_type, myroot, pkg_key, action = x
if pkg_type not in self.pkg_tree_map:
continue
if action != "merge":
continue
root_config = self._frozen_config.roots[myroot]
# Use the resume "favorites" list to see if a repo was specified
# for this package.
depgraph_sets = self._dynamic_config.sets[root_config.root]
repo = None
for atom in depgraph_sets.atoms.getAtoms():
if atom.repo and portage.dep.match_from_list(atom, [pkg_key]):
repo = atom.repo
break
atom = "=" + pkg_key
if repo:
atom = atom + _repo_separator + repo
try:
atom = Atom(atom, allow_repo=True)
except InvalidAtom:
continue
pkg = None
for pkg in self._iter_match_pkgs(root_config, pkg_type, atom):
if not self._pkg_visibility_check(pkg) or \
self._frozen_config.excluded_pkgs.findAtomForPackage(pkg,
modified_use=self._pkg_use_enabled(pkg)):
continue
break
if pkg is None:
# It does no exist or it is corrupt.
if skip_missing:
# TODO: log these somewhere
continue
raise portage.exception.PackageNotFound(pkg_key)
if "merge" == pkg.operation and \
self._frozen_config.excluded_pkgs.findAtomForPackage(pkg, \
modified_use=self._pkg_use_enabled(pkg)):
continue
if "merge" == pkg.operation and not self._pkg_visibility_check(pkg):
if skip_masked:
masked_tasks.append(Dependency(root=pkg.root, parent=pkg))
else:
self._dynamic_config._unsatisfied_deps_for_display.append(
((pkg.root, "="+pkg.cpv), {"myparent":None}))
self._dynamic_config._package_tracker.add_pkg(pkg)
serialized_tasks.append(pkg)
self._spinner_update()
if self._dynamic_config._unsatisfied_deps_for_display:
return False
if not serialized_tasks or "--nodeps" in self._frozen_config.myopts:
self._dynamic_config._serialized_tasks_cache = serialized_tasks
self._dynamic_config._scheduler_graph = self._dynamic_config.digraph
else:
self._select_package = self._select_pkg_from_graph
self._dynamic_config.myparams["selective"] = True
# Always traverse deep dependencies in order to account for
# potentially unsatisfied dependencies of installed packages.
# This is necessary for correct --keep-going or --resume operation
# in case a package from a group of circularly dependent packages
# fails. In this case, a package which has recently been installed
# may have an unsatisfied circular dependency (pulled in by
# PDEPEND, for example). So, even though a package is already
# installed, it may not have all of it's dependencies satisfied, so
# it may not be usable. If such a package is in the subgraph of
# deep depenedencies of a scheduled build, that build needs to
# be cancelled. In order for this type of situation to be
# recognized, deep traversal of dependencies is required.
self._dynamic_config.myparams["deep"] = True
for task in serialized_tasks:
if isinstance(task, Package) and \
task.operation == "merge":
if not self._add_pkg(task, None):
return False
# Packages for argument atoms need to be explicitly
# added via _add_pkg() so that they are included in the
# digraph (needed at least for --tree display).
for arg in self._expand_set_args(args, add_to_digraph=True):
for atom in arg.pset.getAtoms():
pkg, existing_node = self._select_package(
arg.root_config.root, atom)
if existing_node is None and \
pkg is not None:
if not self._add_pkg(pkg, Dependency(atom=atom,
root=pkg.root, parent=arg)):
return False
# Allow unsatisfied deps here to avoid showing a masking
# message for an unsatisfied dep that isn't necessarily
# masked.
if not self._create_graph(allow_unsatisfied=True):
return False
unsatisfied_deps = []
for dep in self._dynamic_config._unsatisfied_deps:
if not isinstance(dep.parent, Package):
continue
if dep.parent.operation == "merge":
unsatisfied_deps.append(dep)
continue
# For unsatisfied deps of installed packages, only account for
# them if they are in the subgraph of dependencies of a package
# which is scheduled to be installed.
unsatisfied_install = False
traversed = set()
dep_stack = self._dynamic_config.digraph.parent_nodes(dep.parent)
while dep_stack:
node = dep_stack.pop()
if not isinstance(node, Package):
continue
if node.operation == "merge":
unsatisfied_install = True
break
if node in traversed:
continue
traversed.add(node)
dep_stack.extend(self._dynamic_config.digraph.parent_nodes(node))
if unsatisfied_install:
unsatisfied_deps.append(dep)
if masked_tasks or unsatisfied_deps:
# This probably means that a required package
# was dropped via --skipfirst. It makes the
# resume list invalid, so convert it to a
# UnsatisfiedResumeDep exception.
raise self.UnsatisfiedResumeDep(self,
masked_tasks + unsatisfied_deps)
self._dynamic_config._serialized_tasks_cache = None
try:
self.altlist()
except self._unknown_internal_error:
return False
return True
def _load_favorites(self, favorites):
"""
Use a list of favorites to resume state from a
previous select_files() call. This creates similar
DependencyArg instances to those that would have
been created by the original select_files() call.
This allows Package instances to be matched with
DependencyArg instances during graph creation.
"""
root_config = self._frozen_config.roots[self._frozen_config.target_root]
sets = root_config.sets
depgraph_sets = self._dynamic_config.sets[root_config.root]
args = []
for x in favorites:
if not isinstance(x, basestring):
continue
if x in ("system", "world"):
x = SETPREFIX + x
if x.startswith(SETPREFIX):
s = x[len(SETPREFIX):]
if s not in sets:
continue
if s in depgraph_sets.sets:
continue
pset = sets[s]
depgraph_sets.sets[s] = pset
args.append(SetArg(arg=x, pset=pset,
root_config=root_config))
else:
try:
x = Atom(x, allow_repo=True)
except portage.exception.InvalidAtom:
continue
args.append(AtomArg(arg=x, atom=x,
root_config=root_config))
self._set_args(args)
return args
class UnsatisfiedResumeDep(portage.exception.PortageException):
"""
A dependency of a resume list is not installed. This
can occur when a required package is dropped from the
merge list via --skipfirst.
"""
def __init__(self, depgraph, value):
portage.exception.PortageException.__init__(self, value)
self.depgraph = depgraph
class _internal_exception(portage.exception.PortageException):
def __init__(self, value=""):
portage.exception.PortageException.__init__(self, value)
class _unknown_internal_error(_internal_exception):
"""
Used by the depgraph internally to terminate graph creation.
The specific reason for the failure should have been dumped
to stderr, unfortunately, the exact reason for the failure
may not be known.
"""
class _serialize_tasks_retry(_internal_exception):
"""
This is raised by the _serialize_tasks() method when it needs to
be called again for some reason. The only case that it's currently
used for is when neglected dependencies need to be added to the
graph in order to avoid making a potentially unsafe decision.
"""
class _backtrack_mask(_internal_exception):
"""
This is raised by _show_unsatisfied_dep() when it's called with
check_backtrack=True and a matching package has been masked by
backtracking.
"""
class _autounmask_breakage(_internal_exception):
"""
This is raised by _show_unsatisfied_dep() when it's called with
check_autounmask_breakage=True and a matching package has been
been disqualified due to autounmask changes.
"""
def need_restart(self):
return self._dynamic_config._need_restart and \
not self._dynamic_config._skip_restart
def need_config_change(self):
"""
Returns true if backtracking should terminate due to a needed
configuration change.
"""
if (self._dynamic_config._success_without_autounmask or
self._dynamic_config._required_use_unsatisfied):
return True
if (self._dynamic_config._slot_conflict_handler is None and
not self._accept_blocker_conflicts() and
any(self._dynamic_config._package_tracker.slot_conflicts())):
self._dynamic_config._slot_conflict_handler = slot_conflict_handler(self)
if self._dynamic_config._slot_conflict_handler.changes:
# Terminate backtracking early if the slot conflict
# handler finds some changes to suggest. The case involving
# sci-libs/L and sci-libs/M in SlotCollisionTestCase will
# otherwise fail with --autounmask-backtrack=n, since
# backtracking will eventually lead to some autounmask
# changes. Changes suggested by the slot conflict handler
# are more likely to be useful.
return True
if (self._dynamic_config._allow_backtracking and
self._frozen_config.myopts.get("--autounmask-backtrack") != 'y' and
self._have_autounmask_changes()):
if (self._frozen_config.myopts.get("--autounmask-continue") is True and
self._frozen_config.myopts.get("--autounmask-backtrack") != 'n'):
# --autounmask-continue implies --autounmask-backtrack=y behavior,
# for backward compatibility.
return False
# This disables backtracking when there are autounmask
# config changes. The display_problems method will notify
# the user that --autounmask-backtrack=y can be used to
# force backtracking in this case.
self._dynamic_config._autounmask_backtrack_disabled = True
return True
return False
def _have_autounmask_changes(self):
digraph_nodes = self._dynamic_config.digraph.nodes
return (any(x in digraph_nodes for x in
self._dynamic_config._needed_unstable_keywords) or
any(x in digraph_nodes for x in
self._dynamic_config._needed_p_mask_changes) or
any(x in digraph_nodes for x in
self._dynamic_config._needed_use_config_changes) or
any(x in digraph_nodes for x in
self._dynamic_config._needed_license_changes))
def need_config_reload(self):
return self._dynamic_config._need_config_reload
def autounmask_breakage_detected(self):
try:
for pargs, kwargs in self._dynamic_config._unsatisfied_deps_for_display:
self._show_unsatisfied_dep(
*pargs, check_autounmask_breakage=True, **kwargs)
except self._autounmask_breakage:
return True
return False
def get_backtrack_infos(self):
return self._dynamic_config._backtrack_infos
class _dep_check_composite_db(dbapi):
"""
A dbapi-like interface that is optimized for use in dep_check() calls.
This is built on top of the existing depgraph package selection logic.
Some packages that have been added to the graph may be masked from this
view in order to influence the atom preference selection that occurs
via dep_check().
"""
def __init__(self, depgraph, root):
dbapi.__init__(self)
self._depgraph = depgraph
self._root = root
self._match_cache = {}
self._cpv_pkg_map = {}
def _clear_cache(self):
self._match_cache.clear()
self._cpv_pkg_map.clear()
def cp_list(self, cp):
"""
Emulate cp_list just so it can be used to check for existence
of new-style virtuals. Since it's a waste of time to return
more than one cpv for this use case, a maximum of one cpv will
be returned.
"""
if isinstance(cp, Atom):
atom = cp
else:
atom = Atom(cp)
ret = []
for pkg in self._depgraph._iter_match_pkgs_any(
self._depgraph._frozen_config.roots[self._root], atom):
if pkg.cp == cp:
ret.append(pkg.cpv)
break
return ret
def match_pkgs(self, atom):
cache_key = (atom, atom.unevaluated_atom)
ret = self._match_cache.get(cache_key)
if ret is not None:
for pkg in ret:
self._cpv_pkg_map[pkg.cpv] = pkg
return ret[:]
atom_set = InternalPackageSet(initial_atoms=(atom,))
ret = []
pkg, existing = self._depgraph._select_package(self._root, atom)
if pkg is not None and self._visible(pkg, atom_set):
ret.append(pkg)
if pkg is not None and \
atom.sub_slot is None and \
pkg.cp.startswith("virtual/") and \
(("remove" not in self._depgraph._dynamic_config.myparams and
"--update" not in self._depgraph._frozen_config.myopts) or
not ret):
# For new-style virtual lookahead that occurs inside dep_check()
# for bug #141118, examine all slots. This is needed so that newer
# slots will not unnecessarily be pulled in when a satisfying lower
# slot is already installed. For example, if virtual/jdk-1.5 is
# satisfied via gcj-jdk then there's no need to pull in a newer
# slot to satisfy a virtual/jdk dependency, unless --update is
# enabled.
sub_slots = set()
resolved_sub_slots = set()
for virt_pkg in self._depgraph._iter_match_pkgs_any(
self._depgraph._frozen_config.roots[self._root], atom):
if virt_pkg.cp != pkg.cp:
continue
sub_slots.add((virt_pkg.slot, virt_pkg.sub_slot))
sub_slot_key = (pkg.slot, pkg.sub_slot)
if ret:
# We've added pkg to ret already, and only one package
# per slot/sub_slot is desired here.
sub_slots.discard(sub_slot_key)
resolved_sub_slots.add(sub_slot_key)
else:
sub_slots.add(sub_slot_key)
while sub_slots:
slot, sub_slot = sub_slots.pop()
slot_atom = atom.with_slot("%s/%s" % (slot, sub_slot))
pkg, existing = self._depgraph._select_package(
self._root, slot_atom)
if not pkg:
continue
if not self._visible(pkg, atom_set,
avoid_slot_conflict=False):
# Try to force a virtual update to be pulled in
# when appropriate for bug #526160.
selected = pkg
for candidate in \
self._iter_virt_update(pkg, atom_set):
if candidate.slot != slot:
continue
if (candidate.slot, candidate.sub_slot) in \
resolved_sub_slots:
continue
if selected is None or \
selected < candidate:
selected = candidate
if selected is pkg:
continue
pkg = selected
resolved_sub_slots.add((pkg.slot, pkg.sub_slot))
ret.append(pkg)
if len(ret) > 1:
ret = sorted(set(ret))
self._match_cache[cache_key] = ret
for pkg in ret:
self._cpv_pkg_map[pkg.cpv] = pkg
return ret[:]
def _visible(self, pkg, atom_set, avoid_slot_conflict=True,
probe_virt_update=True):
if pkg.installed and not self._depgraph._want_installed_pkg(pkg):
return False
if pkg.installed and \
(pkg.masks or not self._depgraph._pkg_visibility_check(pkg)):
# Account for packages with masks (like KEYWORDS masks)
# that are usually ignored in visibility checks for
# installed packages, in order to handle cases like
# bug #350285.
myopts = self._depgraph._frozen_config.myopts
use_ebuild_visibility = myopts.get(
'--use-ebuild-visibility', 'n') != 'n'
avoid_update = "--update" not in myopts and \
"remove" not in self._depgraph._dynamic_config.myparams
usepkgonly = "--usepkgonly" in myopts
if not avoid_update:
if not use_ebuild_visibility and usepkgonly:
return False
elif not self._depgraph._equiv_ebuild_visible(pkg):
return False
if pkg.cp.startswith("virtual/"):
if not self._depgraph._virt_deps_visible(
pkg, ignore_use=True):
return False
if probe_virt_update and \
self._have_virt_update(pkg, atom_set):
# Force virtual updates to be pulled in when appropriate
# for bug #526160.
return False
if not avoid_slot_conflict:
# This is useful when trying to pull in virtual updates,
# since we don't want another instance that was previously
# pulled in to mask an update that we're trying to pull
# into the same slot.
return True
# Use reversed iteration in order to get descending order here,
# so that the highest version involved in a slot conflict is
# selected (see bug 554070).
in_graph = next(reversed(list(
self._depgraph._dynamic_config._package_tracker.match(
self._root, pkg.slot_atom, installed=False))), None)
if in_graph is None:
# Mask choices for packages which are not the highest visible
# version within their slot (since they usually trigger slot
# conflicts).
highest_visible, in_graph = self._depgraph._select_package(
self._root, pkg.slot_atom)
# Note: highest_visible is not necessarily the real highest
# visible, especially when --update is not enabled, so use
# < operator instead of !=.
if (highest_visible is not None and pkg < highest_visible
and atom_set.findAtomForPackage(highest_visible,
modified_use=self._depgraph._pkg_use_enabled(highest_visible))):
return False
elif in_graph != pkg:
# Mask choices for packages that would trigger a slot
# conflict with a previously selected package.
if not atom_set.findAtomForPackage(in_graph,
modified_use=self._depgraph._pkg_use_enabled(in_graph)):
# Only mask if the graph package matches the given
# atom (fixes bug #515230).
return True
return False
return True
def _iter_virt_update(self, pkg, atom_set):
if self._depgraph._select_atoms_parent is not None and \
self._depgraph._want_update_pkg(
self._depgraph._select_atoms_parent, pkg):
for new_child in self._depgraph._iter_similar_available(
pkg, next(iter(atom_set))):
if not self._depgraph._virt_deps_visible(
new_child, ignore_use=True):
continue
if not self._visible(new_child, atom_set,
avoid_slot_conflict=False,
probe_virt_update=False):
continue
yield new_child
def _have_virt_update(self, pkg, atom_set):
for new_child in self._iter_virt_update(pkg, atom_set):
if pkg < new_child:
return True
return False
def aux_get(self, cpv, wants):
metadata = self._cpv_pkg_map[cpv]._metadata
return [metadata.get(x, "") for x in wants]
def match(self, atom):
return [pkg.cpv for pkg in self.match_pkgs(atom)]
def ambiguous_package_name(arg, atoms, root_config, spinner, myopts):
if "--quiet" in myopts:
writemsg("!!! The short ebuild name \"%s\" is ambiguous. Please specify\n" % arg, noiselevel=-1)
writemsg("!!! one of the following fully-qualified ebuild names instead:\n\n", noiselevel=-1)
for cp in sorted(set(portage.dep_getkey(atom) for atom in atoms)):
writemsg(" " + colorize("INFORM", cp) + "\n", noiselevel=-1)
return
s = search(root_config, spinner, "--searchdesc" in myopts,
"--quiet" not in myopts, "--usepkg" in myopts,
"--usepkgonly" in myopts, search_index = False)
null_cp = portage.dep_getkey(insert_category_into_atom(
arg, "null"))
cat, atom_pn = portage.catsplit(null_cp)
s.searchkey = atom_pn
for cp in sorted(set(portage.dep_getkey(atom) for atom in atoms)):
s.addCP(cp)
s.output()
writemsg("!!! The short ebuild name \"%s\" is ambiguous. Please specify\n" % arg, noiselevel=-1)
writemsg("!!! one of the above fully-qualified ebuild names instead.\n\n", noiselevel=-1)
def _spinner_start(spinner, myopts):
if spinner is None:
return
if "--quiet" not in myopts and \
("--pretend" in myopts or "--ask" in myopts or \
"--tree" in myopts or "--verbose" in myopts):
action = ""
if "--fetchonly" in myopts or "--fetch-all-uri" in myopts:
action = "fetched"
elif "--buildpkgonly" in myopts:
action = "built"
else:
action = "merged"
if "--tree" in myopts and action != "fetched": # Tree doesn't work with fetching
if "--unordered-display" in myopts:
portage.writemsg_stdout("\n" + \
darkgreen("These are the packages that " + \
"would be %s:" % action) + "\n\n")
else:
portage.writemsg_stdout("\n" + \
darkgreen("These are the packages that " + \
"would be %s, in reverse order:" % action) + "\n\n")
else:
portage.writemsg_stdout("\n" + \
darkgreen("These are the packages that " + \
"would be %s, in order:" % action) + "\n\n")
show_spinner = "--quiet" not in myopts and "--nodeps" not in myopts
if not show_spinner:
spinner.update = spinner.update_quiet
if show_spinner:
portage.writemsg_stdout("Calculating dependencies ")
def _spinner_stop(spinner):
if spinner is None or \
spinner.update == spinner.update_quiet:
return
if spinner.update != spinner.update_basic:
# update_basic is used for non-tty output,
# so don't output backspaces in that case.
portage.writemsg_stdout("\b\b")
portage.writemsg_stdout("... done!\n")
def backtrack_depgraph(settings, trees, myopts, myparams,
myaction, myfiles, spinner):
"""
Raises PackageSetNotFound if myfiles contains a missing package set.
"""
_spinner_start(spinner, myopts)
try:
return _backtrack_depgraph(settings, trees, myopts, myparams,
myaction, myfiles, spinner)
finally:
_spinner_stop(spinner)
def _backtrack_depgraph(settings, trees, myopts, myparams, myaction, myfiles, spinner):
debug = "--debug" in myopts
mydepgraph = None
max_retries = myopts.get('--backtrack', 10)
max_depth = max(1, (max_retries + 1) // 2)
allow_backtracking = max_retries > 0
backtracker = Backtracker(max_depth)
backtracked = 0
frozen_config = _frozen_depgraph_config(settings, trees,
myopts, myparams, spinner)
while backtracker:
if debug and mydepgraph is not None:
writemsg_level(
"\n\nbacktracking try %s \n\n" % \
backtracked, noiselevel=-1, level=logging.DEBUG)
mydepgraph.display_problems()
backtrack_parameters = backtracker.get()
if debug and backtrack_parameters.runtime_pkg_mask:
writemsg_level(
"\n\nruntime_pkg_mask: %s \n\n" %
backtrack_parameters.runtime_pkg_mask,
noiselevel=-1, level=logging.DEBUG)
mydepgraph = depgraph(settings, trees, myopts, myparams, spinner,
frozen_config=frozen_config,
allow_backtracking=allow_backtracking,
backtrack_parameters=backtrack_parameters)
success, favorites = mydepgraph.select_files(myfiles)
if success or mydepgraph.need_config_change():
break
elif not allow_backtracking:
break
elif backtracked >= max_retries:
break
elif mydepgraph.need_restart():
backtracked += 1
backtracker.feedback(mydepgraph.get_backtrack_infos())
else:
break
if not (success or mydepgraph.need_config_change()) and backtracked:
if debug:
writemsg_level(
"\n\nbacktracking aborted after %s tries\n\n" % \
backtracked, noiselevel=-1, level=logging.DEBUG)
mydepgraph.display_problems()
mydepgraph = depgraph(settings, trees, myopts, myparams, spinner,
frozen_config=frozen_config,
allow_backtracking=False,
backtrack_parameters=backtracker.get_best_run())
success, favorites = mydepgraph.select_files(myfiles)
if not success and mydepgraph.autounmask_breakage_detected():
if debug:
writemsg_level(
"\n\nautounmask breakage detected\n\n",
noiselevel=-1, level=logging.DEBUG)
mydepgraph.display_problems()
myopts["--autounmask"] = "n"
mydepgraph = depgraph(settings, trees, myopts, myparams, spinner,
frozen_config=frozen_config, allow_backtracking=False)
success, favorites = mydepgraph.select_files(myfiles)
return (success, mydepgraph, favorites)
def resume_depgraph(settings, trees, mtimedb, myopts, myparams, spinner):
"""
Raises PackageSetNotFound if myfiles contains a missing package set.
"""
_spinner_start(spinner, myopts)
try:
return _resume_depgraph(settings, trees, mtimedb, myopts,
myparams, spinner)
finally:
_spinner_stop(spinner)
def _resume_depgraph(settings, trees, mtimedb, myopts, myparams, spinner):
"""
Construct a depgraph for the given resume list. This will raise
PackageNotFound or depgraph.UnsatisfiedResumeDep when necessary.
TODO: Return reasons for dropped_tasks, for display/logging.
@rtype: tuple
@return: (success, depgraph, dropped_tasks)
"""
skip_masked = True
skip_unsatisfied = True
mergelist = mtimedb["resume"]["mergelist"]
dropped_tasks = {}
frozen_config = _frozen_depgraph_config(settings, trees,
myopts, myparams, spinner)
while True:
mydepgraph = depgraph(settings, trees,
myopts, myparams, spinner, frozen_config=frozen_config)
try:
success = mydepgraph._loadResumeCommand(mtimedb["resume"],
skip_masked=skip_masked)
except depgraph.UnsatisfiedResumeDep as e:
if not skip_unsatisfied:
raise
graph = mydepgraph._dynamic_config.digraph
unsatisfied_parents = {}
traversed_nodes = set()
unsatisfied_stack = [(dep.parent, dep.atom) for dep in e.value]
while unsatisfied_stack:
pkg, atom = unsatisfied_stack.pop()
if atom is not None and \
mydepgraph._select_pkg_from_installed(
pkg.root, atom)[0] is not None:
continue
atoms = unsatisfied_parents.get(pkg)
if atoms is None:
atoms = []
unsatisfied_parents[pkg] = atoms
if atom is not None:
atoms.append(atom)
if pkg in traversed_nodes:
continue
traversed_nodes.add(pkg)
# If this package was pulled in by a parent
# package scheduled for merge, removing this
# package may cause the the parent package's
# dependency to become unsatisfied.
for parent_node, atom in \
mydepgraph._dynamic_config._parent_atoms.get(pkg, []):
if not isinstance(parent_node, Package) \
or parent_node.operation not in ("merge", "nomerge"):
continue
# We need to traverse all priorities here, in order to
# ensure that a package with an unsatisfied depenedency
# won't get pulled in, even indirectly via a soft
# dependency.
unsatisfied_stack.append((parent_node, atom))
unsatisfied_tuples = frozenset(tuple(parent_node)
for parent_node in unsatisfied_parents
if isinstance(parent_node, Package))
pruned_mergelist = []
for x in mergelist:
if isinstance(x, list) and \
tuple(x) not in unsatisfied_tuples:
pruned_mergelist.append(x)
# If the mergelist doesn't shrink then this loop is infinite.
if len(pruned_mergelist) == len(mergelist):
# This happens if a package can't be dropped because
# it's already installed, but it has unsatisfied PDEPEND.
raise
mergelist[:] = pruned_mergelist
# Exclude installed packages that have been removed from the graph due
# to failure to build/install runtime dependencies after the dependent
# package has already been installed.
dropped_tasks.update((pkg, atoms) for pkg, atoms in \
unsatisfied_parents.items() if pkg.operation != "nomerge")
del e, graph, traversed_nodes, \
unsatisfied_parents, unsatisfied_stack
continue
else:
break
return (success, mydepgraph, dropped_tasks)
def get_mask_info(root_config, cpv, pkgsettings,
db, pkg_type, built, installed, db_keys, myrepo = None, _pkg_use_enabled=None):
try:
metadata = dict(zip(db_keys,
db.aux_get(cpv, db_keys, myrepo=myrepo)))
except KeyError:
metadata = None
if metadata is None:
mreasons = ["corruption"]
else:
eapi = metadata['EAPI']
if not portage.eapi_is_supported(eapi):
mreasons = ['EAPI %s' % eapi]
else:
pkg = Package(type_name=pkg_type, root_config=root_config,
cpv=cpv, built=built, installed=installed, metadata=metadata)
modified_use = None
if _pkg_use_enabled is not None:
modified_use = _pkg_use_enabled(pkg)
mreasons = get_masking_status(pkg, pkgsettings, root_config, myrepo=myrepo, use=modified_use)
return metadata, mreasons
def show_masked_packages(masked_packages):
shown_licenses = set()
shown_comments = set()
# Maybe there is both an ebuild and a binary. Only
# show one of them to avoid redundant appearance.
shown_cpvs = set()
have_eapi_mask = False
for (root_config, pkgsettings, cpv, repo,
metadata, mreasons) in masked_packages:
output_cpv = cpv
if repo:
output_cpv += _repo_separator + repo
if output_cpv in shown_cpvs:
continue
shown_cpvs.add(output_cpv)
eapi_masked = metadata is not None and \
not portage.eapi_is_supported(metadata["EAPI"])
if eapi_masked:
have_eapi_mask = True
# When masked by EAPI, metadata is mostly useless since
# it doesn't contain essential things like SLOT.
metadata = None
comment, filename = None, None
if not eapi_masked and \
"package.mask" in mreasons:
comment, filename = \
portage.getmaskingreason(
cpv, metadata=metadata,
settings=pkgsettings,
portdb=root_config.trees["porttree"].dbapi,
return_location=True)
missing_licenses = []
if not eapi_masked and metadata is not None:
try:
missing_licenses = \
pkgsettings._getMissingLicenses(
cpv, metadata)
except portage.exception.InvalidDependString:
# This will have already been reported
# above via mreasons.
pass
writemsg("- "+output_cpv+" (masked by: "+", ".join(mreasons)+")\n",
noiselevel=-1)
if comment and comment not in shown_comments:
writemsg(filename + ":\n" + comment + "\n",
noiselevel=-1)
shown_comments.add(comment)
portdb = root_config.trees["porttree"].dbapi
for l in missing_licenses:
if l in shown_licenses:
continue
l_path = portdb.findLicensePath(l)
if l_path is None:
continue
msg = ("A copy of the '%s' license" + \
" is located at '%s'.\n\n") % (l, l_path)
writemsg(msg, noiselevel=-1)
shown_licenses.add(l)
return have_eapi_mask
def show_mask_docs():
writemsg("For more information, see the MASKED PACKAGES "
"section in the emerge\n", noiselevel=-1)
writemsg("man page or refer to the Gentoo Handbook.\n", noiselevel=-1)
def show_blocker_docs_link():
writemsg("\nFor more information about " + bad("Blocked Packages") + ", please refer to the following\n", noiselevel=-1)
writemsg("section of the Gentoo Linux x86 Handbook (architecture is irrelevant):\n\n", noiselevel=-1)
writemsg("https://wiki.gentoo.org/wiki/Handbook:X86/Working/Portage#Blocked_packages\n\n", noiselevel=-1)
def get_masking_status(pkg, pkgsettings, root_config, myrepo=None, use=None):
return [mreason.message for \
mreason in _get_masking_status(pkg, pkgsettings, root_config, myrepo=myrepo, use=use)]
def _get_masking_status(pkg, pkgsettings, root_config, myrepo=None, use=None):
mreasons = _getmaskingstatus(
pkg, settings=pkgsettings,
portdb=root_config.trees["porttree"].dbapi, myrepo=myrepo)
if not pkg.installed:
if not pkgsettings._accept_chost(pkg.cpv, pkg._metadata):
mreasons.append(_MaskReason("CHOST", "CHOST: %s" % \
pkg._metadata["CHOST"]))
if pkg.invalid:
for msgs in pkg.invalid.values():
for msg in msgs:
mreasons.append(
_MaskReason("invalid", "invalid: %s" % (msg,)))
if not pkg._metadata["SLOT"]:
mreasons.append(
_MaskReason("invalid", "SLOT: undefined"))
return mreasons
| gpl-2.0 |
kjniemi/seastar | test.py | 1 | 5784 | #!/usr/bin/env python3
#
# This file is open source software, licensed to you under the terms
# of the Apache License, Version 2.0 (the "License"). See the NOTICE file
# distributed with this work for additional information regarding copyright
# ownership. You may not use this file except in compliance with the License.
#
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
import os
import sys
import argparse
import subprocess
import signal
import re
boost_tests = [
'alloc_test',
'futures_test',
'thread_test',
'memcached/test_ascii_parser',
'sstring_test',
'output_stream_test',
'httpd',
'fstream_test',
'foreign_ptr_test',
'semaphore_test',
'shared_ptr_test',
'fileiotest',
'packet_test',
'tls_test',
'rpc_test',
'connect_test',
]
other_tests = [
'smp_test',
'timertest',
'distributed_test',
'allocator_test',
'directory_test',
'thread_context_switch',
'fair_queue_test',
]
last_len = 0
def print_status_short(msg):
global last_len
print('\r' + ' '*last_len, end='')
last_len = len(msg)
print('\r' + msg, end='')
print_status_verbose = print
class Alarm(Exception):
pass
def alarm_handler(signum, frame):
raise Alarm
if __name__ == "__main__":
all_modes = ['debug', 'release']
parser = argparse.ArgumentParser(description="Seastar test runner")
parser.add_argument('--fast', action="store_true", help="Run only fast tests")
parser.add_argument('--name', action="store", help="Run only test whose name contains given string")
parser.add_argument('--mode', choices=all_modes, help="Run only tests for given build mode")
parser.add_argument('--timeout', action="store",default="300",type=int, help="timeout value for test execution")
parser.add_argument('--jenkins', action="store",help="jenkins output file prefix")
parser.add_argument('--verbose', '-v', action = 'store_true', default = False,
help = 'Verbose reporting')
args = parser.parse_args()
black_hole = open('/dev/null', 'w')
print_status = print_status_verbose if args.verbose else print_status_short
test_to_run = []
modes_to_run = all_modes if not args.mode else [args.mode]
for mode in modes_to_run:
prefix = os.path.join('build', mode, 'tests')
for test in other_tests:
test_to_run.append((os.path.join(prefix, test),'other'))
for test in boost_tests:
test_to_run.append((os.path.join(prefix, test),'boost'))
test_to_run.append(('tests/memcached/test.py --mode ' + mode + (' --fast' if args.fast else ''),'other'))
test_to_run.append((os.path.join(prefix, 'distributed_test') + ' -c 2','other'))
allocator_test_path = os.path.join(prefix, 'allocator_test')
if args.fast:
if mode == 'debug':
test_to_run.append((allocator_test_path + ' --iterations 5','other'))
else:
test_to_run.append((allocator_test_path + ' --time 0.1','other'))
else:
test_to_run.append((allocator_test_path,'other'))
if args.name:
test_to_run = [t for t in test_to_run if args.name in t[0]]
all_ok = True
n_total = len(test_to_run)
env = os.environ
# disable false positive due to new (with_alignment(...)) ...
env['ASAN_OPTIONS'] = 'alloc_dealloc_mismatch=0'
for n, test in enumerate(test_to_run):
path = test[0]
prefix = '[%d/%d]' % (n + 1, n_total)
print_status('%s RUNNING %s' % (prefix, path))
signal.signal(signal.SIGALRM, alarm_handler)
if args.jenkins and test[1] == 'boost':
mode = 'release'
if test[0].startswith(os.path.join('build','debug')):
mode = 'debug'
xmlout = args.jenkins+"."+mode+"."+os.path.basename(test[0])+".boost.xml"
path = path + " --output_format=XML --log_level=all --report_level=no --log_sink=" + xmlout
print(path)
if os.path.isfile('tmp.out'):
os.remove('tmp.out')
outf=open('tmp.out','w')
proc = subprocess.Popen(path.split(' '), stdout=outf, stderr=subprocess.PIPE, env=env,preexec_fn=os.setsid)
signal.alarm(args.timeout)
err = None
out = None
try:
out,err = proc.communicate()
signal.alarm(0)
except:
os.killpg(os.getpgid(proc.pid), signal.SIGKILL)
proc.kill()
proc.returncode = -1
finally:
outf.close();
if proc.returncode:
print_status('FAILED: %s\n' % (path))
if proc.returncode == -1:
print_status('TIMED OUT\n')
else:
print_status(' with error code {code}\n'.format(code=proc.returncode))
print('=== stdout START ===')
with open('tmp.out') as outf:
for line in outf:
print(line)
print('=== stdout END ===')
if err:
print('=== stderr START ===')
print(err.decode())
print('=== stderr END ===')
all_ok = False
else:
print_status('%s PASSED %s' % (prefix, path))
if all_ok:
print('\nOK.')
else:
print_status('')
sys.exit(1)
| apache-2.0 |
sikmir/QGIS | python/plugins/processing/algs/qgis/DeleteColumn.py | 15 | 3951 | # -*- coding: utf-8 -*-
"""
***************************************************************************
DeleteColumn.py
---------------------
Date : May 2010
Copyright : (C) 2010 by Michael Minn
Email : pyqgis at michaelminn dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Michael Minn'
__date__ = 'May 2010'
__copyright__ = '(C) 2010, Michael Minn'
from qgis.PyQt.QtCore import QCoreApplication
from qgis.core import (QgsProcessingParameterField,
QgsProcessing,
QgsProcessingAlgorithm,
QgsProcessingFeatureSource)
from processing.algs.qgis.QgisAlgorithm import QgisFeatureBasedAlgorithm
class DeleteColumn(QgisFeatureBasedAlgorithm):
COLUMNS = 'COLUMN'
def flags(self):
return super().flags() & ~QgsProcessingAlgorithm.FlagSupportsInPlaceEdits
def tags(self):
return self.tr('drop,delete,remove,fields,columns,attributes').split(',')
def group(self):
return self.tr('Vector table')
def groupId(self):
return 'vectortable'
def __init__(self):
super().__init__()
self.fields_to_delete = []
self.field_indices = []
def initParameters(self, config=None):
self.addParameter(QgsProcessingParameterField(self.COLUMNS,
self.tr('Fields to drop'),
None, 'INPUT', QgsProcessingParameterField.Any, True))
def inputLayerTypes(self):
return [QgsProcessing.TypeVector]
def name(self):
return 'deletecolumn'
def displayName(self):
return self.tr('Drop field(s)')
def outputName(self):
return self.tr('Remaining fields')
def prepareAlgorithm(self, parameters, context, feedback):
self.fields_to_delete = self.parameterAsFields(parameters, self.COLUMNS, context)
source = self.parameterAsSource(parameters, 'INPUT', context)
if source is not None:
for f in self.fields_to_delete:
index = source.fields().lookupField(f)
if index < 0:
feedback.pushInfo(QCoreApplication.translate('DeleteColumn', 'Field “{}” does not exist in input layer').format(f))
return super().prepareAlgorithm(parameters, context, feedback)
def outputFields(self, input_fields):
# loop through twice - first we need to build up a list of original attribute indices
for f in self.fields_to_delete:
index = input_fields.lookupField(f)
if index >= 0:
self.field_indices.append(index)
# important - make sure we remove from the end so we aren't changing used indices as we go
self.field_indices.sort(reverse=True)
# this second time we make a cleaned version of the fields
for index in self.field_indices:
input_fields.remove(index)
return input_fields
def sourceFlags(self):
return QgsProcessingFeatureSource.FlagSkipGeometryValidityChecks
def processFeature(self, feature, context, feedback):
attributes = feature.attributes()
for index in self.field_indices:
del attributes[index]
feature.setAttributes(attributes)
return [feature]
| gpl-2.0 |
jdobes/spacewalk | proxy/proxy/apacheHandler.py | 10 | 23646 | # Main entry point for apacheServer.py for the Spacewalk Proxy
# and/or SSL Redirect Server.
#
# Copyright (c) 2008--2015 Red Hat, Inc.
#
# This software is licensed to you under the GNU General Public License,
# version 2 (GPLv2). There is NO WARRANTY for this software, express or
# implied, including the implied warranties of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. You should have received a copy of GPLv2
# along with this software; if not, see
# http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
#
# Red Hat trademarks are not licensed under GPLv2. No permission is
# granted to use or replicate Red Hat trademarks that are incorporated
# in this software or its documentation.
#
# -----------------------------------------------------------------------------
# language imports
import os
import base64
import xmlrpclib
import re
# common imports
from spacewalk.common.rhnConfig import CFG
from spacewalk.common.rhnLog import log_debug, log_error
from spacewalk.common.rhnApache import rhnApache
from spacewalk.common.rhnTB import Traceback
from spacewalk.common.rhnException import rhnFault, rhnException
from spacewalk.common import rhnFlags, apache
from spacewalk.common.rhnLib import setHeaderValue
from spacewalk.common import byterange
from rhn import rpclib, connections
from rhn.UserDictCase import UserDictCase
from rhnConstants import HEADER_ACTUAL_URI, HEADER_EFFECTIVE_URI, \
HEADER_CHECKSUM, SCHEME_HTTP, SCHEME_HTTPS, URI_PREFIX_KS, \
URI_PREFIX_KS_CHECKSUM, COMPONENT_BROKER, COMPONENT_REDIRECT
# local imports
from proxy.rhnProxyAuth import get_proxy_auth
def getComponentType(req):
"""
Are we a 'proxy.broker' or a 'proxy.redirect'.
Checks to see if the last visited Spacewalk Proxy was itself. If so, we
are a 'proxy.redirect'. If not, then we must be a 'proxy.broker'.
"""
# NOTE: X-RHN-Proxy-Auth described in broker/rhnProxyAuth.py
if not req.headers_in.has_key('X-RHN-Proxy-Auth'):
# Request comes from a client, Must be the broker
return COMPONENT_BROKER
# pull server id out of "t:o:k:e:n:hostname1,t:o:k:e:n:hostname2,..."
proxy_auth = req.headers_in['X-RHN-Proxy-Auth']
last_auth = proxy_auth.split(',')[-1]
last_visited = last_auth.split(':')[0]
proxy_server_id = get_proxy_auth().getProxyServerId()
# is it the same box?
try:
log_debug(4, "last_visited", last_visited, "; proxy server id",
proxy_server_id)
# pylint: disable=W0702
except:
# pylint: disable=W0702
# incase called prior to the log files being initialized
pass
if last_visited == proxy_server_id:
# XXX this assumes redirect runs on the same box as the broker
return COMPONENT_REDIRECT
return COMPONENT_BROKER
class apacheHandler(rhnApache):
""" Main apache entry point for the proxy. """
_lang_catalog = "proxy"
def __init__(self):
rhnApache.__init__(self)
self.input = None
self._component = None
def set_component(self, component):
self._component = component
@staticmethod
def _setSessionToken(headers):
# extended to always return a token, even if an empty one
ret = rhnApache._setSessionToken(headers)
if ret:
log_debug(4, "Returning", ret)
return ret
# Session token did not verify, we have an empty auth token
token = UserDictCase()
rhnFlags.set("AUTH_SESSION_TOKEN", token)
return token
def headerParserHandler(self, req):
""" Name-munging if request came from anaconda in response to a
kickstart. """
ret = rhnApache.headerParserHandler(self, req)
if ret != apache.OK:
return ret
self.input = rpclib.transports.Input(req.headers_in)
# Before we allow the main handler code to commence, we'll first check
# to see if this request came from anaconda in response to a kickstart.
# If so, we'll need to do some special name-munging before we continue.
ret = self._transformKickstartRequest(req)
return ret
def _transformKickstartRequest(self, req):
""" If necessary, this routine will transform a "tinified" anaconda-
generated kickstart request into a normalized form capable of being
cached effectively by squid.
This is done by first making a HEAD request
to the satellite for the purpose of updating the kickstart progress and
retrieving an MD5 sum for the requested file. We then replace the
tinyURL part of the URI with the retrieved MD5 sum. This effectively
removes session-specific information while allowing us to still cache
based on the uniqueness of the file.
"""
# Kickstart requests only come in the form of a GET, so short-circuit
# if that is not the case.
if (req.method != "GET"):
return apache.OK
log_debug(6, "URI", req.uri)
log_debug(6, "COMPONENT", self._component)
# If we're a broker, we know that this is a kickstart request from
# anaconda by checking if the URI begins with /ty/*, otherwise just
# return. If we're an SSL redirect, we check that the URI begins with
# /ty-cksm/*, otherwise return.
if self._component == COMPONENT_BROKER:
if req.uri.startswith(URI_PREFIX_KS):
log_debug(3, "Found a kickstart URI: %s" % req.uri)
return self._transformKsRequestForBroker(req)
elif self._component == COMPONENT_REDIRECT:
if req.uri.startswith(URI_PREFIX_KS_CHECKSUM):
log_debug(3, "Found a kickstart checksum URI: %s" % req.uri)
return self._transformKsRequestForRedirect(req)
return apache.OK
def _transformKsRequestForBroker(self, req):
# Get the checksum for the requested resource from the satellite.
(status, checksum) = self._querySatelliteForChecksum(req)
if status != apache.OK or not checksum:
return status
# If we got this far, we have the checksum. Create a new URI based on
# the checksum.
newURI = self._generateCacheableKickstartURI(req.uri, checksum)
if not newURI:
# Couldn't create a cacheable URI, log an error and revert to
# BZ 158236 behavior.
log_error('Could not create cacheable ks URI from "%s"' % req.uri)
return apache.OK
# Now we must embed the old URI into a header in the original request
# so that the SSL Redirect has it available if the resource has not
# been cached yet. We will also embed a header that holds the new URI,
# so that the content handler can use it later.
log_debug(3, "Generated new kickstart URI: %s" % newURI)
req.headers_in[HEADER_ACTUAL_URI] = req.uri
req.headers_in[HEADER_EFFECTIVE_URI] = newURI
return apache.OK
@staticmethod
def _transformKsRequestForRedirect(req):
# If we don't get the actual URI in the headers, we'll decline the
# request.
if not req.headers_in or not req.headers_in.has_key(HEADER_ACTUAL_URI):
log_error("Kickstart request header did not include '%s'"
% HEADER_ACTUAL_URI)
return apache.DECLINED
# The original URI is embedded in the headers under X-RHN-ActualURI.
# Remove it, and place it in the X-RHN-EffectiveURI header.
req.headers_in[HEADER_EFFECTIVE_URI] = req.headers_in[HEADER_ACTUAL_URI]
log_debug(3, "Reverting to old URI: %s" % req.headers_in[HEADER_ACTUAL_URI])
return apache.OK
def _querySatelliteForChecksum(self, req):
""" Sends a HEAD request to the satellite for the purpose of obtaining
the checksum for the requested resource. A (status, checksum)
tuple is returned. If status is not apache.OK, checksum will be
None. If status is OK, and a checksum is not returned, the old
BZ 158236 behavior will be used.
"""
scheme = SCHEME_HTTP
if req.server.port == 443:
scheme = SCHEME_HTTPS
log_debug(6, "Using scheme: %s" % scheme)
# Initiate a HEAD request to the satellite to retrieve the MD5 sum.
# Actually, we make the request through our own proxy first, so
# that we don't accidentally bypass necessary authentication
# routines. Since it's a HEAD request, the proxy will forward it
# directly to the satellite like it would a POST request.
host = "127.0.0.1"
port = req.connection.local_addr[1]
connection = self._createConnection(host, port, scheme)
if not connection:
# Couldn't form the connection. Log an error and revert to the
# old BZ 158236 behavior. In order to be as robust as possible,
# we won't fail here.
log_error('HEAD req - Could not create connection to %s://%s:%s'
% (scheme, host, str(port)))
return (apache.OK, None)
# We obtained the connection successfully. Construct the URL that
# we'll connect to.
pingURL = "%s://%s:%s%s" % (scheme, host, str(port), req.uri)
log_debug(6, "Ping URI: %s" % pingURL)
hdrs = UserDictCase()
for k in req.headers_in.keys():
if k.lower() != 'range': # we want checksum of whole file
hdrs[k] = re.sub(r'\n(?![ \t])|\r(?![ \t\n])', '', str(req.headers_in[k]))
log_debug(9, "Using existing headers_in", hdrs)
connection.request("HEAD", pingURL, None, hdrs)
log_debug(6, "Connection made, awaiting response.")
# Get the response.
response = connection.getresponse()
log_debug(6, "Received response status: %s" % response.status)
connection.close()
if (response.status != apache.HTTP_OK) and (response.status != apache.HTTP_PARTIAL_CONTENT):
# Something bad happened. Return back back to the client.
log_debug(1, "HEAD req - Received error code in reponse: %s"
% (str(response.status)))
return (response.status, None)
# The request was successful. Dig the MD5 checksum out of the headers.
responseHdrs = response.msg
if not responseHdrs:
# No headers?! This shouldn't happen at all. But if it does,
# revert to the old # BZ 158236 behavior.
log_error("HEAD response - No HTTP headers!")
return (apache.OK, None)
if not responseHdrs.has_key(HEADER_CHECKSUM):
# No checksum was provided. This could happen if a newer
# proxy is talking to an older satellite. To keep things
# running smoothly, we'll just revert to the BZ 158236
# behavior.
log_debug(1, "HEAD response - No X-RHN-Checksum field provided!")
return (apache.OK, None)
checksum = responseHdrs[HEADER_CHECKSUM]
return (apache.OK, checksum)
@staticmethod
def _generateCacheableKickstartURI(oldURI, checksum):
"""
This routine computes a new cacheable URI based on the old URI and the
checksum. For example, if the checksum is 1234ABCD and the oldURI was:
/ty/AljAmCEt/RedHat/base/comps.xml
Then, the new URI will be:
/ty-cksm/1234ABCD/RedHat/base/comps.xml
If for some reason the new URI could not be generated, return None.
"""
newURI = URI_PREFIX_KS_CHECKSUM + checksum
# Strip the first two path pieces off of the oldURI.
uriParts = oldURI.split('/')
numParts = 0
for part in uriParts:
if len(part) is not 0: # Account for double slashes ("//")
numParts += 1
if numParts > 2:
newURI += "/" + part
# If the URI didn't have enough parts, return None.
if numParts <= 2:
newURI = None
return newURI
@staticmethod
def _createConnection(host, port, scheme):
params = {'host': host,
'port': port}
if CFG.has_key('timeout'):
params['timeout'] = CFG.TIMEOUT
if scheme == SCHEME_HTTPS:
conn_class = connections.HTTPSConnection
else:
conn_class = connections.HTTPConnection
return conn_class(**params)
def handler(self, req):
""" Main handler to handle all requests pumped through this server. """
ret = rhnApache.handler(self, req)
if ret != apache.OK:
return ret
log_debug(4, "METHOD", req.method)
log_debug(4, "PATH_INFO", req.path_info)
log_debug(4, "URI (full path info)", req.uri)
log_debug(4, "Component", self._component)
if self._component == COMPONENT_BROKER:
from broker import rhnBroker
handlerObj = rhnBroker.BrokerHandler(req)
else:
# Redirect
from redirect import rhnRedirect
handlerObj = rhnRedirect.RedirectHandler(req)
try:
ret = handlerObj.handler()
except rhnFault, e:
return self.response(req, e)
if rhnFlags.test("NeedEncoding"):
return self.response(req, ret)
# All good; we expect ret to be an HTTP return code
if not isinstance(ret, type(1)):
raise rhnException("Invalid status code type %s" % type(ret))
log_debug(1, "Leaving with status code %s" % ret)
return ret
@staticmethod
def normalize(response):
""" convert a response to the right type for passing back to
rpclib.xmlrpclib.dumps
"""
if isinstance(response, xmlrpclib.Fault):
return response
return (response,)
@staticmethod
def response_file(req, response):
""" send a file out """
log_debug(3, response.name)
# We may set the content type remotely
if rhnFlags.test("Content-Type"):
req.content_type = rhnFlags.get("Content-Type")
else:
# Safe default
req.content_type = "application/octet-stream"
# find out the size of the file
if response.length == 0:
response.file_obj.seek(0, 2)
file_size = response.file_obj.tell()
response.file_obj.seek(0, 0)
else:
file_size = response.length
success_response = apache.OK
response_size = file_size
# Serve up the requested byte range
if req.headers_in.has_key("Range"):
try:
range_start, range_end = \
byterange.parse_byteranges(req.headers_in["Range"],
file_size)
response_size = range_end - range_start
req.headers_out["Content-Range"] = \
byterange.get_content_range(range_start, range_end, file_size)
req.headers_out["Accept-Ranges"] = "bytes"
response.file_obj.seek(range_start)
# We'll want to send back a partial content rather than ok
# if this works
req.status = apache.HTTP_PARTIAL_CONTENT
success_response = apache.HTTP_PARTIAL_CONTENT
# For now we will just return the file file on the following exceptions
except byterange.InvalidByteRangeException:
pass
except byterange.UnsatisfyableByteRangeException:
pass
req.headers_out["Content-Length"] = str(response_size)
# if we loaded this from a real fd, set it as the X-Replace-Content
# check for "name" since sometimes we get xmlrpclib.transports.File's that have
# a stringIO as the file_obj, and they dont have a .name (ie,
# fileLists...)
if response.name:
req.headers_out["X-Package-FileName"] = response.name
xrepcon = req.headers_in.has_key("X-Replace-Content-Active") \
and rhnFlags.test("Download-Accelerator-Path")
if xrepcon:
fpath = rhnFlags.get("Download-Accelerator-Path")
log_debug(1, "Serving file %s" % fpath)
req.headers_out["X-Replace-Content"] = fpath
# Only set a byte rate if xrepcon is active
byte_rate = rhnFlags.get("QOS-Max-Bandwidth")
if byte_rate:
req.headers_out["X-Replace-Content-Throttle"] = str(byte_rate)
# send the headers
req.send_http_header()
if req.headers_in.has_key("Range"):
# and the file
read = 0
while read < response_size:
# We check the size here in case we're not asked for the entire file.
buf = response.read(CFG.BUFFER_SIZE)
if not buf:
break
try:
req.write(buf)
read = read + CFG.BUFFER_SIZE
except IOError:
if xrepcon:
# We're talking to a proxy, so don't bother to report
# a SIGPIPE
break
return apache.HTTP_BAD_REQUEST
response.close()
else:
if 'wsgi.file_wrapper' in req.headers_in:
req.output = req.headers_in['wsgi.file_wrapper'](response, CFG.BUFFER_SIZE)
else:
req.output = iter(lambda: response.read(CFG.BUFFER_SIZE), '')
return success_response
def response(self, req, response):
""" send the response (common code) """
# Send the xml-rpc response back
log_debug(5, "Response type", type(response))
needs_xmlrpc_encoding = rhnFlags.test("NeedEncoding")
compress_response = rhnFlags.test("compress_response")
# Init an output object; we'll use it for sending data in various
# formats
if isinstance(response, rpclib.transports.File):
if not hasattr(response.file_obj, 'fileno') and compress_response:
# This is a StringIO that has to be compressed, so read it in
# memory; mark that we don't have to do any xmlrpc encoding
response = response.file_obj.read()
needs_xmlrpc_encoding = 0
else:
# Just treat is as a file
return self.response_file(req, response)
is_fault = 0
if isinstance(response, rhnFault):
if req.method == 'GET':
return self._response_fault_get(req, response.getxml())
# Need to encode the response as xmlrpc
response = response.getxml()
is_fault = 1
# No compression
compress_response = 0
# This is an xmlrpc Fault, so we have to encode it
needs_xmlrpc_encoding = 1
output = rpclib.transports.Output()
if not is_fault:
# First, use the same encoding/transfer that the client used
output.set_transport_flags(
transfer=rpclib.transports.lookupTransfer(self.input.transfer),
encoding=rpclib.transports.lookupEncoding(self.input.encoding))
if compress_response:
# check if we have to compress this result
log_debug(4, "Compression on for client version", self.clientVersion)
if self.clientVersion > 0:
output.set_transport_flags(output.TRANSFER_BINARY,
output.ENCODE_ZLIB)
else: # original clients had the binary transport support broken
output.set_transport_flags(output.TRANSFER_BASE64,
output.ENCODE_ZLIB)
# We simply add the transport options to the output headers
output.headers.update(rhnFlags.get('outputTransportOptions').dict())
if needs_xmlrpc_encoding:
# Normalize the response
response = self.normalize(response)
try:
response = rpclib.xmlrpclib.dumps(response, methodresponse=1)
except TypeError, e:
log_debug(-1, "Error \"%s\" encoding response = %s" % (e, response))
Traceback("apacheHandler.response", req,
extra="Error \"%s\" encoding response = %s" % (e, response),
severity="notification")
return apache.HTTP_INTERNAL_SERVER_ERROR
except Exception: # pylint: disable=E0012, W0703
# Uncaught exception; signal the error
Traceback("apacheHandler.response", req,
severity="unhandled")
return apache.HTTP_INTERNAL_SERVER_ERROR
# we're about done here, patch up the headers
output.process(response)
# Copy the rest of the fields
for k, v in output.headers.items():
if k.lower() == 'content-type':
# Content-type
req.content_type = v
else:
setHeaderValue(req.headers_out, k, v)
if CFG.DEBUG == 4:
# I wrap this in an "if" so we don't parse a large file for no reason.
log_debug(4, "The response: %s[...SNIP (for sanity) SNIP...]%s" %
(response[:100], response[-100:]))
elif CFG.DEBUG >= 5:
# if you absolutely must have that whole response in the log file
log_debug(5, "The response: %s" % response)
# send the headers
req.send_http_header()
try:
# XXX: in case data is really large maybe we should split
# it in smaller chunks instead of blasting everything at
# once. Not yet a problem...
req.write(output.data)
except IOError:
# send_http_header is already sent, so it doesn't make a lot of
# sense to return a non-200 error; but there is no better solution
return apache.HTTP_BAD_REQUEST
del output
return apache.OK
@staticmethod
def _response_fault_get(req, response):
req.headers_out["X-RHN-Fault-Code"] = str(response.faultCode)
faultString = base64.encodestring(response.faultString).strip()
# Split the faultString into multiple lines
for line in faultString.split('\n'):
req.headers_out.add("X-RHN-Fault-String", line.strip())
# And then send all the other things
for k, v in rhnFlags.get('outputTransportOptions').items():
setHeaderValue(req.headers_out, k, v)
return apache.HTTP_NOT_FOUND
def cleanupHandler(self, req):
""" Clean up stuff before we close down the session when we are
called from apacheServer.Cleanup()
"""
log_debug(1)
self.input = None
# kill all of our child processes (if any)
while 1:
pid = status = -1
try:
(pid, status) = os.waitpid(-1, 0)
except OSError:
break
else:
log_error("Reaped child process %d with status %d" % (pid, status))
ret = rhnApache.cleanupHandler(self, req)
return ret
# =============================================================================
| gpl-2.0 |
HKuz/Test_Code | CodeFights/frequencyAnalysis.py | 1 | 1652 | #!/usr/local/bin/python
# Code Fights Frequency Analysis Problem
from collections import Counter
def frequencyAnalysis(encryptedText):
return Counter(encryptedText).most_common(1)[0][0]
def main():
tests = [
["$~NmiNmim$/NVeirp@dlzrCCCCfFfQQQ", "C"],
["Agoodglassinthebishop'shostelinthedevil'sseattwenty-onedegreesandthirteenminutesnortheastandbynorthmainbranchseventhlimbeastsideshootfromthelefteyeofthedeath's-headabeelinefromthetreethroughtheshotfiftyfeetout.", "e"],
["Q", "Q"],
["):<<}:BnUUKc=>~LKU><,;U><U=~BKc=>~}~jKB;UU~n== ~c=fS<c~}~:w~~Unc}=>Kw=~~ceKc*=~Uc<w=>~nU=nc}Lfc<w=>enKcLwncY>U~j~c=>BKeL~nU=UK}~U><<=mw<e=>~B~m=~f~<m=>~}~n=>;US>~n}nL~~BKc~mw<e=>~=w~~=>w<*:>=>~U><=mKm=fm~~=<*=k", "~"],
["(:c:@%aF;:NBo@o:'X:%CFCBoFB@X@iFCTPc@iFi::@o%;@a!PXCF:iTcCNbCFPoFCc;:YCo%a@a}Pcco@Cc:%@FF;::o%BYBo:bi@oT;=nFv:|i@o%`a%Ci:TFCBo<!PXCF:i%iBXaF;:bP|F;iBP|;Bo:: :aBT}:F@o%v:|i@o%X@T:aBPFFB@aXBF*;:i:F;:|iBPXb:|CoaFB%C|=$Co%Co|oBF;Co|F;:i:<v:|i@o%;@a!PXCF:iTcCNbF;:Fi::@|@Co@o%%iBXF;:bP|F;iBP|;F;:a}Pcc`aBF;:i: :dF;: T;BBa:@%CYY:i:oFaXBFFB%C|<F;CaFCN:YCo%Co|F*Ba}:c:FBoa@o%@T;:aFYCcc:%*CF;|Bc%TBCoa@o%D:*:ci =V;: :aFCN@F:F;:FBF@cE@cP:@Fj1=5NCccCBo<bPF:E:oF;@FYC|Pi:XiBE:aFBb:b:cB*F;:@TFP@c*BiF;*;:oF;: :E:oFP@cc a:ccF;:CF:Na=", ":"]
]
for t in tests:
res = frequencyAnalysis(t[0])
if t[1] == res:
print("PASSED: frequencyAnalysis({}) returned {}"
.format(t[0], res))
else:
print(("FAILED: frequencyAnalysis({}) returned {},"
"answer: {}").format(t[0], res, t[1]))
if __name__ == '__main__':
main()
| mit |
seanli9jan/tensorflow | tensorflow/python/autograph/converters/lists.py | 30 | 8339 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Converter for list operations.
This includes converting Python lists to TensorArray/TensorList.
"""
# TODO(mdan): Elaborate the logic here.
# TODO(mdan): Does it even make sense to attempt to try to use TAs?
# The current rule (always convert to TensorArray) is naive and insufficient.
# In general, a better mechanism could look like:
# * convert to TensorList by default
# * leave as Python list if the user explicitly forbids it
# * convert to TensorArray only when complete write once behavior can be
# guaranteed (e.g. list comprehensions)
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import gast
from tensorflow.python.autograph.core import converter
from tensorflow.python.autograph.lang import directives
from tensorflow.python.autograph.pyct import anno
from tensorflow.python.autograph.pyct import parser
from tensorflow.python.autograph.pyct import templates
from tensorflow.python.autograph.pyct.static_analysis.annos import NodeAnno
# Tags for local state.
POP_USES = 'pop_uses'
class ListTransformer(converter.Base):
"""Converts lists and related operations to their TF counterpart."""
def visit_List(self, node):
node = self.generic_visit(node)
template = """
ag__.new_list(elements)
"""
return templates.replace_as_expression(template, elements=node)
def _replace_append_call(self, node):
assert len(node.args) == 1
assert isinstance(node.func, gast.Attribute)
template = """
target = ag__.list_append(target, element)
"""
return templates.replace(
template,
target=node.func.value,
element=node.args[0])
def _replace_pop_call(self, node):
# Expressions that use pop() are converted to a statement + expression.
#
# For example:
#
# print(target.pop())
#
# ... is converted to:
#
# target, target_pop = ag__.list_pop(target)
# print(target_pop)
#
# Here, we just generate the variable name and swap it in,
# and _generate_pop_operation will handle the rest.
#
# Multiple uses of pop() are allowed:
#
# print(tartget.pop(), target.pop())
# print(tartget.pop().pop())
#
assert isinstance(node.func, gast.Attribute)
scope = anno.getanno(node, NodeAnno.ARGS_SCOPE)
target_node = node.func.value
# Attempt to use a related name if one exists. Otherwise use something
# generic.
if anno.hasanno(target_node, anno.Basic.QN):
target_name = anno.getanno(target_node, anno.Basic.QN).ssf()
else:
target_name = 'list_'
pop_var_name = self.ctx.namer.new_symbol(target_name, scope.referenced)
pop_uses = self.get_local(POP_USES, [])
pop_uses.append((node, pop_var_name))
self.set_local(POP_USES, pop_uses)
return templates.replace_as_expression('var_name', var_name=pop_var_name)
def _replace_stack_call(self, node):
assert len(node.args) == 1
dtype = self.get_definition_directive(
node.args[0],
directives.set_element_type,
'dtype',
default=templates.replace_as_expression('None'))
template = """
ag__.list_stack(
target,
opts=ag__.ListStackOpts(
element_dtype=dtype,
original_call=orig_call))
"""
return templates.replace_as_expression(
template,
dtype=dtype,
target=node.args[0],
orig_call=node.func)
def visit_Call(self, node):
node = self.generic_visit(node)
# TODO(mdan): This is insufficient if target is a function argument.
# In the case of function arguments, we need to add the list to the
# function's return value, because it is being modified.
# TODO(mdan): Checking just the name is brittle, can it be improved?
if isinstance(node.func, gast.Attribute):
func_name = node.func.attr
if func_name == 'append' and (len(node.args) == 1):
node = self._replace_append_call(node)
elif func_name == 'pop' and (len(node.args) <= 1):
node = self._replace_pop_call(node)
elif (func_name == 'stack' and (len(node.args) == 1) and
(not node.keywords or node.keywords[0].arg == 'strict')):
# This avoids false positives with keyword args.
# TODO(mdan): handle kwargs properly.
node = self._replace_stack_call(node)
return node
def _generate_pop_operation(self, original_call_node, pop_var_name):
assert isinstance(original_call_node.func, gast.Attribute)
if original_call_node.args:
pop_element = original_call_node.args[0]
else:
pop_element = parser.parse_expression('None')
# The call will be something like "target.pop()", and the dtype is hooked to
# target, hence the func.value.
# TODO(mdan): For lists of lists, this won't work.
# The reason why it won't work is because it's unclear how to annotate
# the list as a "list of lists with a certain element type" when using
# operations like `l.pop().pop()`.
dtype = self.get_definition_directive(
original_call_node.func.value,
directives.set_element_type,
'dtype',
default=templates.replace_as_expression('None'))
shape = self.get_definition_directive(
original_call_node.func.value,
directives.set_element_type,
'shape',
default=templates.replace_as_expression('None'))
template = """
target, pop_var_name = ag__.list_pop(
target, element,
opts=ag__.ListPopOpts(element_dtype=dtype, element_shape=shape))
"""
return templates.replace(
template,
target=original_call_node.func.value,
pop_var_name=pop_var_name,
element=pop_element,
dtype=dtype,
shape=shape)
def _postprocess_statement(self, node):
"""Inserts any separate pop() calls that node may use."""
pop_uses = self.get_local(POP_USES, None)
if pop_uses:
replacements = []
for original_call_node, pop_var_name in pop_uses:
replacements.extend(
self._generate_pop_operation(original_call_node, pop_var_name))
replacements.append(node)
node = replacements
self.exit_local_scope()
return node, None
# TODO(mdan): Should we have a generic visit_block instead?
# Right now it feels that a visit_block would add too much magic that's
# hard to follow.
def _visit_and_process_block(self, block):
return self.visit_block(
block,
before_visit=self.enter_local_scope,
after_visit=self._postprocess_statement)
def visit_FunctionDef(self, node):
node.args = self.generic_visit(node.args)
node.decorator_list = self.visit_block(node.decorator_list)
node.body = self._visit_and_process_block(node.body)
return node
def visit_For(self, node):
node.target = self.visit(node.target)
node.body = self._visit_and_process_block(node.body)
node.orelse = self._visit_and_process_block(node.orelse)
return node
def visit_While(self, node):
node.test = self.visit(node.test)
node.body = self._visit_and_process_block(node.body)
node.orelse = self._visit_and_process_block(node.orelse)
return node
def visit_If(self, node):
node.test = self.visit(node.test)
node.body = self._visit_and_process_block(node.body)
node.orelse = self._visit_and_process_block(node.orelse)
return node
def visit_With(self, node):
node.items = self.visit_block(node.items)
node.body = self._visit_and_process_block(node.body)
return node
def transform(node, ctx):
return ListTransformer(ctx).visit(node)
| apache-2.0 |
Silight/ComicEnv | ComicEnv/lib/python3.2/site-packages/pkg_resources/tests/test_resources.py | 242 | 23622 | import os
import sys
import tempfile
import shutil
import string
import pytest
import pkg_resources
from pkg_resources import (parse_requirements, VersionConflict, parse_version,
Distribution, EntryPoint, Requirement, safe_version, safe_name,
WorkingSet)
packaging = pkg_resources.packaging
def safe_repr(obj, short=False):
""" copied from Python2.7"""
try:
result = repr(obj)
except Exception:
result = object.__repr__(obj)
if not short or len(result) < pkg_resources._MAX_LENGTH:
return result
return result[:pkg_resources._MAX_LENGTH] + ' [truncated]...'
class Metadata(pkg_resources.EmptyProvider):
"""Mock object to return metadata as if from an on-disk distribution"""
def __init__(self, *pairs):
self.metadata = dict(pairs)
def has_metadata(self, name):
return name in self.metadata
def get_metadata(self, name):
return self.metadata[name]
def get_metadata_lines(self, name):
return pkg_resources.yield_lines(self.get_metadata(name))
dist_from_fn = pkg_resources.Distribution.from_filename
class TestDistro:
def testCollection(self):
# empty path should produce no distributions
ad = pkg_resources.Environment([], platform=None, python=None)
assert list(ad) == []
assert ad['FooPkg'] == []
ad.add(dist_from_fn("FooPkg-1.3_1.egg"))
ad.add(dist_from_fn("FooPkg-1.4-py2.4-win32.egg"))
ad.add(dist_from_fn("FooPkg-1.2-py2.4.egg"))
# Name is in there now
assert ad['FooPkg']
# But only 1 package
assert list(ad) == ['foopkg']
# Distributions sort by version
assert [dist.version for dist in ad['FooPkg']] == ['1.4','1.3-1','1.2']
# Removing a distribution leaves sequence alone
ad.remove(ad['FooPkg'][1])
assert [dist.version for dist in ad['FooPkg']] == ['1.4','1.2']
# And inserting adds them in order
ad.add(dist_from_fn("FooPkg-1.9.egg"))
assert [dist.version for dist in ad['FooPkg']] == ['1.9','1.4','1.2']
ws = WorkingSet([])
foo12 = dist_from_fn("FooPkg-1.2-py2.4.egg")
foo14 = dist_from_fn("FooPkg-1.4-py2.4-win32.egg")
req, = parse_requirements("FooPkg>=1.3")
# Nominal case: no distros on path, should yield all applicable
assert ad.best_match(req, ws).version == '1.9'
# If a matching distro is already installed, should return only that
ws.add(foo14)
assert ad.best_match(req, ws).version == '1.4'
# If the first matching distro is unsuitable, it's a version conflict
ws = WorkingSet([])
ws.add(foo12)
ws.add(foo14)
with pytest.raises(VersionConflict):
ad.best_match(req, ws)
# If more than one match on the path, the first one takes precedence
ws = WorkingSet([])
ws.add(foo14)
ws.add(foo12)
ws.add(foo14)
assert ad.best_match(req, ws).version == '1.4'
def checkFooPkg(self,d):
assert d.project_name == "FooPkg"
assert d.key == "foopkg"
assert d.version == "1.3.post1"
assert d.py_version == "2.4"
assert d.platform == "win32"
assert d.parsed_version == parse_version("1.3-1")
def testDistroBasics(self):
d = Distribution(
"/some/path",
project_name="FooPkg",version="1.3-1",py_version="2.4",platform="win32"
)
self.checkFooPkg(d)
d = Distribution("/some/path")
assert d.py_version == sys.version[:3]
assert d.platform == None
def testDistroParse(self):
d = dist_from_fn("FooPkg-1.3.post1-py2.4-win32.egg")
self.checkFooPkg(d)
d = dist_from_fn("FooPkg-1.3.post1-py2.4-win32.egg-info")
self.checkFooPkg(d)
def testDistroMetadata(self):
d = Distribution(
"/some/path", project_name="FooPkg", py_version="2.4", platform="win32",
metadata = Metadata(
('PKG-INFO',"Metadata-Version: 1.0\nVersion: 1.3-1\n")
)
)
self.checkFooPkg(d)
def distRequires(self, txt):
return Distribution("/foo", metadata=Metadata(('depends.txt', txt)))
def checkRequires(self, dist, txt, extras=()):
assert list(dist.requires(extras)) == list(parse_requirements(txt))
def testDistroDependsSimple(self):
for v in "Twisted>=1.5", "Twisted>=1.5\nZConfig>=2.0":
self.checkRequires(self.distRequires(v), v)
def testResolve(self):
ad = pkg_resources.Environment([])
ws = WorkingSet([])
# Resolving no requirements -> nothing to install
assert list(ws.resolve([], ad)) == []
# Request something not in the collection -> DistributionNotFound
with pytest.raises(pkg_resources.DistributionNotFound):
ws.resolve(parse_requirements("Foo"), ad)
Foo = Distribution.from_filename(
"/foo_dir/Foo-1.2.egg",
metadata=Metadata(('depends.txt', "[bar]\nBaz>=2.0"))
)
ad.add(Foo)
ad.add(Distribution.from_filename("Foo-0.9.egg"))
# Request thing(s) that are available -> list to activate
for i in range(3):
targets = list(ws.resolve(parse_requirements("Foo"), ad))
assert targets == [Foo]
list(map(ws.add,targets))
with pytest.raises(VersionConflict):
ws.resolve(parse_requirements("Foo==0.9"), ad)
ws = WorkingSet([]) # reset
# Request an extra that causes an unresolved dependency for "Baz"
with pytest.raises(pkg_resources.DistributionNotFound):
ws.resolve(parse_requirements("Foo[bar]"), ad)
Baz = Distribution.from_filename(
"/foo_dir/Baz-2.1.egg", metadata=Metadata(('depends.txt', "Foo"))
)
ad.add(Baz)
# Activation list now includes resolved dependency
assert list(ws.resolve(parse_requirements("Foo[bar]"), ad)) ==[Foo,Baz]
# Requests for conflicting versions produce VersionConflict
with pytest.raises(VersionConflict) as vc:
ws.resolve(parse_requirements("Foo==1.2\nFoo!=1.2"), ad)
msg = 'Foo 0.9 is installed but Foo==1.2 is required'
assert vc.value.report() == msg
def testDistroDependsOptions(self):
d = self.distRequires("""
Twisted>=1.5
[docgen]
ZConfig>=2.0
docutils>=0.3
[fastcgi]
fcgiapp>=0.1""")
self.checkRequires(d,"Twisted>=1.5")
self.checkRequires(
d,"Twisted>=1.5 ZConfig>=2.0 docutils>=0.3".split(), ["docgen"]
)
self.checkRequires(
d,"Twisted>=1.5 fcgiapp>=0.1".split(), ["fastcgi"]
)
self.checkRequires(
d,"Twisted>=1.5 ZConfig>=2.0 docutils>=0.3 fcgiapp>=0.1".split(),
["docgen","fastcgi"]
)
self.checkRequires(
d,"Twisted>=1.5 fcgiapp>=0.1 ZConfig>=2.0 docutils>=0.3".split(),
["fastcgi", "docgen"]
)
with pytest.raises(pkg_resources.UnknownExtra):
d.requires(["foo"])
class TestWorkingSet:
def test_find_conflicting(self):
ws = WorkingSet([])
Foo = Distribution.from_filename("/foo_dir/Foo-1.2.egg")
ws.add(Foo)
# create a requirement that conflicts with Foo 1.2
req = next(parse_requirements("Foo<1.2"))
with pytest.raises(VersionConflict) as vc:
ws.find(req)
msg = 'Foo 1.2 is installed but Foo<1.2 is required'
assert vc.value.report() == msg
def test_resolve_conflicts_with_prior(self):
"""
A ContextualVersionConflict should be raised when a requirement
conflicts with a prior requirement for a different package.
"""
# Create installation where Foo depends on Baz 1.0 and Bar depends on
# Baz 2.0.
ws = WorkingSet([])
md = Metadata(('depends.txt', "Baz==1.0"))
Foo = Distribution.from_filename("/foo_dir/Foo-1.0.egg", metadata=md)
ws.add(Foo)
md = Metadata(('depends.txt', "Baz==2.0"))
Bar = Distribution.from_filename("/foo_dir/Bar-1.0.egg", metadata=md)
ws.add(Bar)
Baz = Distribution.from_filename("/foo_dir/Baz-1.0.egg")
ws.add(Baz)
Baz = Distribution.from_filename("/foo_dir/Baz-2.0.egg")
ws.add(Baz)
with pytest.raises(VersionConflict) as vc:
ws.resolve(parse_requirements("Foo\nBar\n"))
msg = "Baz 1.0 is installed but Baz==2.0 is required by {'Bar'}"
if pkg_resources.PY2:
msg = msg.replace("{'Bar'}", "set(['Bar'])")
assert vc.value.report() == msg
class TestEntryPoints:
def assertfields(self, ep):
assert ep.name == "foo"
assert ep.module_name == "pkg_resources.tests.test_resources"
assert ep.attrs == ("TestEntryPoints",)
assert ep.extras == ("x",)
assert ep.load() is TestEntryPoints
expect = "foo = pkg_resources.tests.test_resources:TestEntryPoints [x]"
assert str(ep) == expect
def setup_method(self, method):
self.dist = Distribution.from_filename(
"FooPkg-1.2-py2.4.egg", metadata=Metadata(('requires.txt','[x]')))
def testBasics(self):
ep = EntryPoint(
"foo", "pkg_resources.tests.test_resources", ["TestEntryPoints"],
["x"], self.dist
)
self.assertfields(ep)
def testParse(self):
s = "foo = pkg_resources.tests.test_resources:TestEntryPoints [x]"
ep = EntryPoint.parse(s, self.dist)
self.assertfields(ep)
ep = EntryPoint.parse("bar baz= spammity[PING]")
assert ep.name == "bar baz"
assert ep.module_name == "spammity"
assert ep.attrs == ()
assert ep.extras == ("ping",)
ep = EntryPoint.parse(" fizzly = wocka:foo")
assert ep.name == "fizzly"
assert ep.module_name == "wocka"
assert ep.attrs == ("foo",)
assert ep.extras == ()
# plus in the name
spec = "html+mako = mako.ext.pygmentplugin:MakoHtmlLexer"
ep = EntryPoint.parse(spec)
assert ep.name == 'html+mako'
reject_specs = "foo", "x=a:b:c", "q=x/na", "fez=pish:tush-z", "x=f[a]>2"
@pytest.mark.parametrize("reject_spec", reject_specs)
def test_reject_spec(self, reject_spec):
with pytest.raises(ValueError):
EntryPoint.parse(reject_spec)
def test_printable_name(self):
"""
Allow any printable character in the name.
"""
# Create a name with all printable characters; strip the whitespace.
name = string.printable.strip()
spec = "{name} = module:attr".format(**locals())
ep = EntryPoint.parse(spec)
assert ep.name == name
def checkSubMap(self, m):
assert len(m) == len(self.submap_expect)
for key, ep in pkg_resources.iteritems(self.submap_expect):
assert repr(m.get(key)) == repr(ep)
submap_expect = dict(
feature1=EntryPoint('feature1', 'somemodule', ['somefunction']),
feature2=EntryPoint('feature2', 'another.module', ['SomeClass'], ['extra1','extra2']),
feature3=EntryPoint('feature3', 'this.module', extras=['something'])
)
submap_str = """
# define features for blah blah
feature1 = somemodule:somefunction
feature2 = another.module:SomeClass [extra1,extra2]
feature3 = this.module [something]
"""
def testParseList(self):
self.checkSubMap(EntryPoint.parse_group("xyz", self.submap_str))
with pytest.raises(ValueError):
EntryPoint.parse_group("x a", "foo=bar")
with pytest.raises(ValueError):
EntryPoint.parse_group("x", ["foo=baz", "foo=bar"])
def testParseMap(self):
m = EntryPoint.parse_map({'xyz':self.submap_str})
self.checkSubMap(m['xyz'])
assert list(m.keys()) == ['xyz']
m = EntryPoint.parse_map("[xyz]\n"+self.submap_str)
self.checkSubMap(m['xyz'])
assert list(m.keys()) == ['xyz']
with pytest.raises(ValueError):
EntryPoint.parse_map(["[xyz]", "[xyz]"])
with pytest.raises(ValueError):
EntryPoint.parse_map(self.submap_str)
class TestRequirements:
def testBasics(self):
r = Requirement.parse("Twisted>=1.2")
assert str(r) == "Twisted>=1.2"
assert repr(r) == "Requirement.parse('Twisted>=1.2')"
assert r == Requirement("Twisted", [('>=','1.2')], ())
assert r == Requirement("twisTed", [('>=','1.2')], ())
assert r != Requirement("Twisted", [('>=','2.0')], ())
assert r != Requirement("Zope", [('>=','1.2')], ())
assert r != Requirement("Zope", [('>=','3.0')], ())
assert r != Requirement.parse("Twisted[extras]>=1.2")
def testOrdering(self):
r1 = Requirement("Twisted", [('==','1.2c1'),('>=','1.2')], ())
r2 = Requirement("Twisted", [('>=','1.2'),('==','1.2c1')], ())
assert r1 == r2
assert str(r1) == str(r2)
assert str(r2) == "Twisted==1.2c1,>=1.2"
def testBasicContains(self):
r = Requirement("Twisted", [('>=','1.2')], ())
foo_dist = Distribution.from_filename("FooPkg-1.3_1.egg")
twist11 = Distribution.from_filename("Twisted-1.1.egg")
twist12 = Distribution.from_filename("Twisted-1.2.egg")
assert parse_version('1.2') in r
assert parse_version('1.1') not in r
assert '1.2' in r
assert '1.1' not in r
assert foo_dist not in r
assert twist11 not in r
assert twist12 in r
def testOptionsAndHashing(self):
r1 = Requirement.parse("Twisted[foo,bar]>=1.2")
r2 = Requirement.parse("Twisted[bar,FOO]>=1.2")
assert r1 == r2
assert r1.extras == ("foo","bar")
assert r2.extras == ("bar","foo") # extras are normalized
assert hash(r1) == hash(r2)
assert (
hash(r1)
==
hash((
"twisted",
packaging.specifiers.SpecifierSet(">=1.2"),
frozenset(["foo","bar"]),
))
)
def testVersionEquality(self):
r1 = Requirement.parse("foo==0.3a2")
r2 = Requirement.parse("foo!=0.3a4")
d = Distribution.from_filename
assert d("foo-0.3a4.egg") not in r1
assert d("foo-0.3a1.egg") not in r1
assert d("foo-0.3a4.egg") not in r2
assert d("foo-0.3a2.egg") in r1
assert d("foo-0.3a2.egg") in r2
assert d("foo-0.3a3.egg") in r2
assert d("foo-0.3a5.egg") in r2
def testSetuptoolsProjectName(self):
"""
The setuptools project should implement the setuptools package.
"""
assert (
Requirement.parse('setuptools').project_name == 'setuptools')
# setuptools 0.7 and higher means setuptools.
assert (
Requirement.parse('setuptools == 0.7').project_name == 'setuptools')
assert (
Requirement.parse('setuptools == 0.7a1').project_name == 'setuptools')
assert (
Requirement.parse('setuptools >= 0.7').project_name == 'setuptools')
class TestParsing:
def testEmptyParse(self):
assert list(parse_requirements('')) == []
def testYielding(self):
for inp,out in [
([], []), ('x',['x']), ([[]],[]), (' x\n y', ['x','y']),
(['x\n\n','y'], ['x','y']),
]:
assert list(pkg_resources.yield_lines(inp)) == out
def testSplitting(self):
sample = """
x
[Y]
z
a
[b ]
# foo
c
[ d]
[q]
v
"""
assert (
list(pkg_resources.split_sections(sample))
==
[
(None, ["x"]),
("Y", ["z", "a"]),
("b", ["c"]),
("d", []),
("q", ["v"]),
]
)
with pytest.raises(ValueError):
list(pkg_resources.split_sections("[foo"))
def testSafeName(self):
assert safe_name("adns-python") == "adns-python"
assert safe_name("WSGI Utils") == "WSGI-Utils"
assert safe_name("WSGI Utils") == "WSGI-Utils"
assert safe_name("Money$$$Maker") == "Money-Maker"
assert safe_name("peak.web") != "peak-web"
def testSafeVersion(self):
assert safe_version("1.2-1") == "1.2.post1"
assert safe_version("1.2 alpha") == "1.2.alpha"
assert safe_version("2.3.4 20050521") == "2.3.4.20050521"
assert safe_version("Money$$$Maker") == "Money-Maker"
assert safe_version("peak.web") == "peak.web"
def testSimpleRequirements(self):
assert (
list(parse_requirements('Twis-Ted>=1.2-1'))
==
[Requirement('Twis-Ted',[('>=','1.2-1')], ())]
)
assert (
list(parse_requirements('Twisted >=1.2, \ # more\n<2.0'))
==
[Requirement('Twisted',[('>=','1.2'),('<','2.0')], ())]
)
assert (
Requirement.parse("FooBar==1.99a3")
==
Requirement("FooBar", [('==','1.99a3')], ())
)
with pytest.raises(ValueError):
Requirement.parse(">=2.3")
with pytest.raises(ValueError):
Requirement.parse("x\\")
with pytest.raises(ValueError):
Requirement.parse("x==2 q")
with pytest.raises(ValueError):
Requirement.parse("X==1\nY==2")
with pytest.raises(ValueError):
Requirement.parse("#")
def testVersionEquality(self):
def c(s1,s2):
p1, p2 = parse_version(s1),parse_version(s2)
assert p1 == p2, (s1,s2,p1,p2)
c('1.2-rc1', '1.2rc1')
c('0.4', '0.4.0')
c('0.4.0.0', '0.4.0')
c('0.4.0-0', '0.4-0')
c('0post1', '0.0post1')
c('0pre1', '0.0c1')
c('0.0.0preview1', '0c1')
c('0.0c1', '0-rc1')
c('1.2a1', '1.2.a.1')
c('1.2.a', '1.2a')
def testVersionOrdering(self):
def c(s1,s2):
p1, p2 = parse_version(s1),parse_version(s2)
assert p1<p2, (s1,s2,p1,p2)
c('2.1','2.1.1')
c('2a1','2b0')
c('2a1','2.1')
c('2.3a1', '2.3')
c('2.1-1', '2.1-2')
c('2.1-1', '2.1.1')
c('2.1', '2.1post4')
c('2.1a0-20040501', '2.1')
c('1.1', '02.1')
c('3.2', '3.2.post0')
c('3.2post1', '3.2post2')
c('0.4', '4.0')
c('0.0.4', '0.4.0')
c('0post1', '0.4post1')
c('2.1.0-rc1','2.1.0')
c('2.1dev','2.1a0')
torture ="""
0.80.1-3 0.80.1-2 0.80.1-1 0.79.9999+0.80.0pre4-1
0.79.9999+0.80.0pre2-3 0.79.9999+0.80.0pre2-2
0.77.2-1 0.77.1-1 0.77.0-1
""".split()
for p,v1 in enumerate(torture):
for v2 in torture[p+1:]:
c(v2,v1)
def testVersionBuildout(self):
"""
Buildout has a function in it's bootstrap.py that inspected the return
value of parse_version. The new parse_version returns a Version class
which needs to support this behavior, at least for now.
"""
def buildout(parsed_version):
_final_parts = '*final-', '*final'
def _final_version(parsed_version):
for part in parsed_version:
if (part[:1] == '*') and (part not in _final_parts):
return False
return True
return _final_version(parsed_version)
assert buildout(parse_version("1.0"))
assert not buildout(parse_version("1.0a1"))
def testVersionIndexable(self):
"""
Some projects were doing things like parse_version("v")[0], so we'll
support indexing the same as we support iterating.
"""
assert parse_version("1.0")[0] == "00000001"
def testVersionTupleSort(self):
"""
Some projects expected to be able to sort tuples against the return
value of parse_version. So again we'll add a warning enabled shim to
make this possible.
"""
assert parse_version("1.0") < tuple(parse_version("2.0"))
assert parse_version("1.0") <= tuple(parse_version("2.0"))
assert parse_version("1.0") == tuple(parse_version("1.0"))
assert parse_version("3.0") > tuple(parse_version("2.0"))
assert parse_version("3.0") >= tuple(parse_version("2.0"))
assert parse_version("3.0") != tuple(parse_version("2.0"))
assert not (parse_version("3.0") != tuple(parse_version("3.0")))
def testVersionHashable(self):
"""
Ensure that our versions stay hashable even though we've subclassed
them and added some shim code to them.
"""
assert (
hash(parse_version("1.0"))
==
hash(parse_version("1.0"))
)
class TestNamespaces:
def setup_method(self, method):
self._ns_pkgs = pkg_resources._namespace_packages.copy()
self._tmpdir = tempfile.mkdtemp(prefix="tests-setuptools-")
os.makedirs(os.path.join(self._tmpdir, "site-pkgs"))
self._prev_sys_path = sys.path[:]
sys.path.append(os.path.join(self._tmpdir, "site-pkgs"))
def teardown_method(self, method):
shutil.rmtree(self._tmpdir)
pkg_resources._namespace_packages = self._ns_pkgs.copy()
sys.path = self._prev_sys_path[:]
@pytest.mark.skipif(os.path.islink(tempfile.gettempdir()),
reason="Test fails when /tmp is a symlink. See #231")
def test_two_levels_deep(self):
"""
Test nested namespace packages
Create namespace packages in the following tree :
site-packages-1/pkg1/pkg2
site-packages-2/pkg1/pkg2
Check both are in the _namespace_packages dict and that their __path__
is correct
"""
sys.path.append(os.path.join(self._tmpdir, "site-pkgs2"))
os.makedirs(os.path.join(self._tmpdir, "site-pkgs", "pkg1", "pkg2"))
os.makedirs(os.path.join(self._tmpdir, "site-pkgs2", "pkg1", "pkg2"))
ns_str = "__import__('pkg_resources').declare_namespace(__name__)\n"
for site in ["site-pkgs", "site-pkgs2"]:
pkg1_init = open(os.path.join(self._tmpdir, site,
"pkg1", "__init__.py"), "w")
pkg1_init.write(ns_str)
pkg1_init.close()
pkg2_init = open(os.path.join(self._tmpdir, site,
"pkg1", "pkg2", "__init__.py"), "w")
pkg2_init.write(ns_str)
pkg2_init.close()
import pkg1
assert "pkg1" in pkg_resources._namespace_packages
# attempt to import pkg2 from site-pkgs2
import pkg1.pkg2
# check the _namespace_packages dict
assert "pkg1.pkg2" in pkg_resources._namespace_packages
assert pkg_resources._namespace_packages["pkg1"] == ["pkg1.pkg2"]
# check the __path__ attribute contains both paths
expected = [
os.path.join(self._tmpdir, "site-pkgs", "pkg1", "pkg2"),
os.path.join(self._tmpdir, "site-pkgs2", "pkg1", "pkg2"),
]
assert pkg1.pkg2.__path__ == expected
| mit |
cathyyul/sumo-0.18 | tests/complex/netconvert/left_turn_noblocking_longveh/runner.py | 2 | 1146 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# call jtrrouter twice and check that the output differs
import sys,os,subprocess,random
sys.path.append(os.path.join(os.path.dirname(sys.argv[0]), '..', '..', '..', '..', "tools"))
from sumolib import checkBinary
net_output = 'joined.net.xml'
trips_output = 'trips.log'
netconvert = checkBinary('netconvert')
assert(netconvert)
sumo = checkBinary('sumo')
assert(sumo)
args_netc = [netconvert,
'--node-files', 'input_nodes.nod.xml',
'--edge-files', 'input_edges.edg.xml',
'--output', net_output,
'--offset.disable-normalization']
args_sumo = [sumo,
'--net-file', net_output,
'--route-files', 'input_routes.rou.xml',
'--end', '50',
'--no-step-log',
'--no-duration-log',
'--tripinfo-output', trips_output]
subprocess.call(args_netc)
subprocess.call(args_sumo)
# vehicles should have completed their trips
complete = False
for line in open(trips_output):
if 'veh0' in line:
complete = True
if complete:
print('test passed. no blocking occured')
else:
print('test failed. vehicles were blocked')
| gpl-3.0 |
lukw00/shogun | examples/undocumented/python_modular/kernel_weighted_degree_position_string_modular.py | 26 | 1136 | #!/usr/bin/env python
from tools.load import LoadMatrix
lm=LoadMatrix()
traindat = lm.load_dna('../data/fm_train_dna.dat')
testdat = lm.load_dna('../data/fm_test_dna.dat')
parameter_list = [[traindat,testdat,20],[traindat,testdat,22]]
def kernel_weighted_degree_position_string_modular (fm_train_dna=traindat,fm_test_dna=testdat,degree=20):
from modshogun import StringCharFeatures, DNA
from modshogun import WeightedDegreePositionStringKernel, MSG_DEBUG
feats_train=StringCharFeatures(fm_train_dna, DNA)
#feats_train.io.set_loglevel(MSG_DEBUG)
feats_test=StringCharFeatures(fm_test_dna, DNA)
kernel=WeightedDegreePositionStringKernel(feats_train, feats_train, degree)
from numpy import zeros,ones,float64,int32
kernel.set_shifts(10*ones(len(fm_train_dna[0]), dtype=int32))
kernel.set_position_weights(ones(len(fm_train_dna[0]), dtype=float64))
km_train=kernel.get_kernel_matrix()
kernel.init(feats_train, feats_test)
km_test=kernel.get_kernel_matrix()
return km_train,km_test,kernel
if __name__=='__main__':
print('WeightedDegreePositionString')
kernel_weighted_degree_position_string_modular(*parameter_list[0])
| gpl-3.0 |
JoshuaKGoldberg/General-Language-Syntax-Compiler | Samples/basic.py | 2 | 1406 | """
Basic GLS Syntax
Version 0.0.1
Josh Goldberg
"""
# Function Definitions
def sayHello():
print("Hello world!")
def combineStrings(a, b):
return a + b
# Class Declarations
class Point:
x = None
y = None
def __init__(self, x, y):
self.x = x
self.y = y
def setX(self, x):
self.x = x
def setY(self, y):
self.y = y
def getX(self):
return self.x
def getY(self):
return self.y
def getManhattanTotal(self):
return self.x + self.y
# Main
if __name__ == '__main__':
# Basic Usage
print("Hello world!") # Basic printing here...
# Variables
a = "Hello world!"
b = 7
c = 11.7
d = True
# Operations
e = 1 + 2
f = b < c
# If Statements
if d:
print("d is true!")
if c < 14:
print("c is less than 14!")
# While Loops
while d:
print("d is", d)
d = False
while c > 3:
print("c is", c)
c -= 1
# For Loops
for i in range(0, 7):
print("i plus one is", i + 1)
# Calling Functions
sayHello()
combineStrings("hello", "world")
combineStrings("hello" + " ", "world")
combineStrings(combineStrings("hello", "world"), "world")
# Class Usage
g = Point(3, 7)
g.setX(4)
print(g.getManhattanTotal())
# fin
| mit |
isotoma/django-cms | cms/migrations/0063_auto__chg_field_staticplaceholder_site.py | 63 | 17740 | # -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Changing field 'StaticPlaceholder.site'
db.alter_column(u'cms_staticplaceholder', 'site_id', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['sites.Site'], null=True))
def backwards(self, orm):
# Changing field 'StaticPlaceholder.site'
db.alter_column(u'cms_staticplaceholder', 'site_id', self.gf('django.db.models.fields.related.ForeignKey')(default=1, to=orm['sites.Site']))
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'cms.cmsplugin': {
'Meta': {'object_name': 'CMSPlugin'},
'changed_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'creation_date': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.CharField', [], {'max_length': '15', 'db_index': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'lft': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cms.CMSPlugin']", 'null': 'True', 'blank': 'True'}),
'placeholder': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cms.Placeholder']", 'null': 'True'}),
'plugin_type': ('django.db.models.fields.CharField', [], {'max_length': '50', 'db_index': 'True'}),
'position': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True', 'blank': 'True'}),
'rght': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'tree_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'})
},
'cms.globalpagepermission': {
'Meta': {'object_name': 'GlobalPagePermission'},
'can_add': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_change': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_change_advanced_settings': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'can_change_permissions': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'can_delete': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_move_page': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_publish': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_recover_page': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_view': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'group': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.Group']", 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'sites': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': u"orm['sites.Site']", 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']", 'null': 'True', 'blank': 'True'})
},
'cms.page': {
'Meta': {'ordering': "('tree_id', 'lft')", 'unique_together': "(('publisher_is_draft', 'application_namespace'), ('reverse_id', 'site', 'publisher_is_draft'))", 'object_name': 'Page'},
'application_namespace': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'application_urls': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '200', 'null': 'True', 'blank': 'True'}),
'changed_by': ('django.db.models.fields.CharField', [], {'max_length': '70'}),
'changed_date': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'created_by': ('django.db.models.fields.CharField', [], {'max_length': '70'}),
'creation_date': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'in_navigation': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'db_index': 'True'}),
'is_home': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'languages': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'lft': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'limit_visibility_in_menu': ('django.db.models.fields.SmallIntegerField', [], {'default': 'None', 'null': 'True', 'db_index': 'True', 'blank': 'True'}),
'login_required': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'navigation_extenders': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '80', 'null': 'True', 'blank': 'True'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'children'", 'null': 'True', 'to': "orm['cms.Page']"}),
'placeholders': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['cms.Placeholder']", 'symmetrical': 'False'}),
'publication_date': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'publication_end_date': ('django.db.models.fields.DateTimeField', [], {'db_index': 'True', 'null': 'True', 'blank': 'True'}),
'publisher_is_draft': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'db_index': 'True'}),
'publisher_public': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'publisher_draft'", 'unique': 'True', 'null': 'True', 'to': "orm['cms.Page']"}),
'reverse_id': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '40', 'null': 'True', 'blank': 'True'}),
'revision_id': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'rght': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'site': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'djangocms_pages'", 'to': u"orm['sites.Site']"}),
'soft_root': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
'template': ('django.db.models.fields.CharField', [], {'default': "'INHERIT'", 'max_length': '100'}),
'tree_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'xframe_options': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
'cms.pagepermission': {
'Meta': {'object_name': 'PagePermission'},
'can_add': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_change': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_change_advanced_settings': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'can_change_permissions': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'can_delete': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_move_page': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_publish': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'can_view': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'grant_on': ('django.db.models.fields.IntegerField', [], {'default': '5'}),
'group': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.Group']", 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'page': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cms.Page']", 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']", 'null': 'True', 'blank': 'True'})
},
'cms.pageuser': {
'Meta': {'object_name': 'PageUser', '_ormbases': [u'auth.User']},
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'created_users'", 'to': u"orm['auth.User']"}),
u'user_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['auth.User']", 'unique': 'True', 'primary_key': 'True'})
},
'cms.pageusergroup': {
'Meta': {'object_name': 'PageUserGroup', '_ormbases': [u'auth.Group']},
'created_by': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'created_usergroups'", 'to': u"orm['auth.User']"}),
u'group_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['auth.Group']", 'unique': 'True', 'primary_key': 'True'})
},
'cms.placeholder': {
'Meta': {'object_name': 'Placeholder'},
'default_width': ('django.db.models.fields.PositiveSmallIntegerField', [], {'null': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'slot': ('django.db.models.fields.CharField', [], {'max_length': '50', 'db_index': 'True'})
},
'cms.placeholderreference': {
'Meta': {'object_name': 'PlaceholderReference', '_ormbases': ['cms.CMSPlugin']},
u'cmsplugin_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['cms.CMSPlugin']", 'unique': 'True', 'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'placeholder_ref': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cms.Placeholder']", 'null': 'True'})
},
'cms.staticplaceholder': {
'Meta': {'unique_together': "(('code', 'site'),)", 'object_name': 'StaticPlaceholder'},
'code': ('django.db.models.fields.CharField', [], {'max_length': '255', 'blank': 'True'}),
'creation_method': ('django.db.models.fields.CharField', [], {'default': "'code'", 'max_length': '20', 'blank': 'True'}),
'dirty': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'draft': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'static_draft'", 'null': 'True', 'to': "orm['cms.Placeholder']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '255', 'blank': 'True'}),
'public': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'static_public'", 'null': 'True', 'to': "orm['cms.Placeholder']"}),
'site': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['sites.Site']", 'null': 'True', 'blank': 'True'})
},
'cms.title': {
'Meta': {'unique_together': "(('language', 'page'),)", 'object_name': 'Title'},
'creation_date': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'has_url_overwrite': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'db_index': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.CharField', [], {'max_length': '15', 'db_index': 'True'}),
'menu_title': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'meta_description': ('django.db.models.fields.TextField', [], {'max_length': '155', 'null': 'True', 'blank': 'True'}),
'page': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'title_set'", 'to': "orm['cms.Page']"}),
'page_title': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'path': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'}),
'published': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'publisher_is_draft': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'db_index': 'True'}),
'publisher_public': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'publisher_draft'", 'unique': 'True', 'null': 'True', 'to': "orm['cms.Title']"}),
'publisher_state': ('django.db.models.fields.SmallIntegerField', [], {'default': '0', 'db_index': 'True'}),
'redirect': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '255'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
'cms.usersettings': {
'Meta': {'object_name': 'UserSettings'},
'clipboard': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['cms.Placeholder']", 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'language': ('django.db.models.fields.CharField', [], {'max_length': '10'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'djangocms_usersettings'", 'to': u"orm['auth.User']"})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'sites.site': {
'Meta': {'ordering': "('domain',)", 'object_name': 'Site', 'db_table': "'django_site'"},
'domain': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
}
}
complete_apps = ['cms'] | bsd-3-clause |
thundernet8/WRGameVideos-Server | venv/lib/python2.7/site-packages/pip/_vendor/packaging/__about__.py | 441 | 1073 | # Copyright 2014 Donald Stufft
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, division, print_function
__all__ = [
"__title__", "__summary__", "__uri__", "__version__", "__author__",
"__email__", "__license__", "__copyright__",
]
__title__ = "packaging"
__summary__ = "Core utilities for Python packages"
__uri__ = "https://github.com/pypa/packaging"
__version__ = "15.3"
__author__ = "Donald Stufft"
__email__ = "donald@stufft.io"
__license__ = "Apache License, Version 2.0"
__copyright__ = "Copyright 2014 %s" % __author__
| gpl-2.0 |
40223139/39g7test | static/Brython3.1.0-20150301-090019/Lib/test/test_re.py | 718 | 56009 | # FIXME: brython: implement test.support
#from test.support import verbose, run_unittest, gc_collect, bigmemtest, _2G, \
# cpython_only
verbose = True
# FIXME: brython: Not used in this module ?
#import io
import re
# FIXME: brython: implement re.Scanner
#from re import Scanner
import sre_constants
import sys
import string
import traceback
# FIXME: brython: implement _weakref
#from weakref import proxy
# Misc tests from Tim Peters' re.doc
# WARNING: Don't change details in these tests if you don't know
# what you're doing. Some of these tests were carefully modeled to
# cover most of the code.
import unittest
class ReTests(unittest.TestCase):
# FIXME: brython: implement test.support
# def test_keep_buffer(self):
# # See bug 14212
# b = bytearray(b'x')
# it = re.finditer(b'a', b)
# with self.assertRaises(BufferError):
# b.extend(b'x'*400)
# list(it)
# del it
# gc_collect()
# b.extend(b'x'*400)
# FIXME: brython: implement _weakref
# def test_weakref(self):
# s = 'QabbbcR'
# x = re.compile('ab+c')
# y = proxy(x)
# self.assertEqual(x.findall('QabbbcR'), y.findall('QabbbcR'))
def test_search_star_plus(self):
self.assertEqual(re.search('x*', 'axx').span(0), (0, 0))
self.assertEqual(re.search('x*', 'axx').span(), (0, 0))
self.assertEqual(re.search('x+', 'axx').span(0), (1, 3))
self.assertEqual(re.search('x+', 'axx').span(), (1, 3))
self.assertEqual(re.search('x', 'aaa'), None)
self.assertEqual(re.match('a*', 'xxx').span(0), (0, 0))
self.assertEqual(re.match('a*', 'xxx').span(), (0, 0))
self.assertEqual(re.match('x*', 'xxxa').span(0), (0, 3))
self.assertEqual(re.match('x*', 'xxxa').span(), (0, 3))
self.assertEqual(re.match('a+', 'xxx'), None)
def bump_num(self, matchobj):
int_value = int(matchobj.group(0))
return str(int_value + 1)
def test_basic_re_sub(self):
self.assertEqual(re.sub("(?i)b+", "x", "bbbb BBBB"), 'x x')
self.assertEqual(re.sub(r'\d+', self.bump_num, '08.2 -2 23x99y'),
'9.3 -3 24x100y')
self.assertEqual(re.sub(r'\d+', self.bump_num, '08.2 -2 23x99y', 3),
'9.3 -3 23x99y')
self.assertEqual(re.sub('.', lambda m: r"\n", 'x'), '\\n')
self.assertEqual(re.sub('.', r"\n", 'x'), '\n')
s = r"\1\1"
self.assertEqual(re.sub('(.)', s, 'x'), 'xx')
self.assertEqual(re.sub('(.)', re.escape(s), 'x'), s)
self.assertEqual(re.sub('(.)', lambda m: s, 'x'), s)
self.assertEqual(re.sub('(?P<a>x)', '\g<a>\g<a>', 'xx'), 'xxxx')
self.assertEqual(re.sub('(?P<a>x)', '\g<a>\g<1>', 'xx'), 'xxxx')
self.assertEqual(re.sub('(?P<unk>x)', '\g<unk>\g<unk>', 'xx'), 'xxxx')
self.assertEqual(re.sub('(?P<unk>x)', '\g<1>\g<1>', 'xx'), 'xxxx')
self.assertEqual(re.sub('a',r'\t\n\v\r\f\a\b\B\Z\a\A\w\W\s\S\d\D','a'),
'\t\n\v\r\f\a\b\\B\\Z\a\\A\\w\\W\\s\\S\\d\\D')
self.assertEqual(re.sub('a', '\t\n\v\r\f\a', 'a'), '\t\n\v\r\f\a')
self.assertEqual(re.sub('a', '\t\n\v\r\f\a', 'a'),
(chr(9)+chr(10)+chr(11)+chr(13)+chr(12)+chr(7)))
self.assertEqual(re.sub('^\s*', 'X', 'test'), 'Xtest')
def test_bug_449964(self):
# fails for group followed by other escape
self.assertEqual(re.sub(r'(?P<unk>x)', '\g<1>\g<1>\\b', 'xx'),
'xx\bxx\b')
def test_bug_449000(self):
# Test for sub() on escaped characters
self.assertEqual(re.sub(r'\r\n', r'\n', 'abc\r\ndef\r\n'),
'abc\ndef\n')
self.assertEqual(re.sub('\r\n', r'\n', 'abc\r\ndef\r\n'),
'abc\ndef\n')
self.assertEqual(re.sub(r'\r\n', '\n', 'abc\r\ndef\r\n'),
'abc\ndef\n')
self.assertEqual(re.sub('\r\n', '\n', 'abc\r\ndef\r\n'),
'abc\ndef\n')
def test_bug_1661(self):
# Verify that flags do not get silently ignored with compiled patterns
pattern = re.compile('.')
self.assertRaises(ValueError, re.match, pattern, 'A', re.I)
self.assertRaises(ValueError, re.search, pattern, 'A', re.I)
self.assertRaises(ValueError, re.findall, pattern, 'A', re.I)
self.assertRaises(ValueError, re.compile, pattern, re.I)
def test_bug_3629(self):
# A regex that triggered a bug in the sre-code validator
re.compile("(?P<quote>)(?(quote))")
def test_sub_template_numeric_escape(self):
# bug 776311 and friends
self.assertEqual(re.sub('x', r'\0', 'x'), '\0')
self.assertEqual(re.sub('x', r'\000', 'x'), '\000')
self.assertEqual(re.sub('x', r'\001', 'x'), '\001')
self.assertEqual(re.sub('x', r'\008', 'x'), '\0' + '8')
self.assertEqual(re.sub('x', r'\009', 'x'), '\0' + '9')
self.assertEqual(re.sub('x', r'\111', 'x'), '\111')
self.assertEqual(re.sub('x', r'\117', 'x'), '\117')
self.assertEqual(re.sub('x', r'\1111', 'x'), '\1111')
self.assertEqual(re.sub('x', r'\1111', 'x'), '\111' + '1')
self.assertEqual(re.sub('x', r'\00', 'x'), '\x00')
self.assertEqual(re.sub('x', r'\07', 'x'), '\x07')
self.assertEqual(re.sub('x', r'\08', 'x'), '\0' + '8')
self.assertEqual(re.sub('x', r'\09', 'x'), '\0' + '9')
self.assertEqual(re.sub('x', r'\0a', 'x'), '\0' + 'a')
self.assertEqual(re.sub('x', r'\400', 'x'), '\0')
self.assertEqual(re.sub('x', r'\777', 'x'), '\377')
self.assertRaises(re.error, re.sub, 'x', r'\1', 'x')
self.assertRaises(re.error, re.sub, 'x', r'\8', 'x')
self.assertRaises(re.error, re.sub, 'x', r'\9', 'x')
self.assertRaises(re.error, re.sub, 'x', r'\11', 'x')
self.assertRaises(re.error, re.sub, 'x', r'\18', 'x')
self.assertRaises(re.error, re.sub, 'x', r'\1a', 'x')
self.assertRaises(re.error, re.sub, 'x', r'\90', 'x')
self.assertRaises(re.error, re.sub, 'x', r'\99', 'x')
self.assertRaises(re.error, re.sub, 'x', r'\118', 'x') # r'\11' + '8'
self.assertRaises(re.error, re.sub, 'x', r'\11a', 'x')
self.assertRaises(re.error, re.sub, 'x', r'\181', 'x') # r'\18' + '1'
self.assertRaises(re.error, re.sub, 'x', r'\800', 'x') # r'\80' + '0'
# in python2.3 (etc), these loop endlessly in sre_parser.py
self.assertEqual(re.sub('(((((((((((x)))))))))))', r'\11', 'x'), 'x')
self.assertEqual(re.sub('((((((((((y))))))))))(.)', r'\118', 'xyz'),
'xz8')
self.assertEqual(re.sub('((((((((((y))))))))))(.)', r'\11a', 'xyz'),
'xza')
def test_qualified_re_sub(self):
self.assertEqual(re.sub('a', 'b', 'aaaaa'), 'bbbbb')
self.assertEqual(re.sub('a', 'b', 'aaaaa', 1), 'baaaa')
def test_bug_114660(self):
self.assertEqual(re.sub(r'(\S)\s+(\S)', r'\1 \2', 'hello there'),
'hello there')
def test_bug_462270(self):
# Test for empty sub() behaviour, see SF bug #462270
self.assertEqual(re.sub('x*', '-', 'abxd'), '-a-b-d-')
self.assertEqual(re.sub('x+', '-', 'abxd'), 'ab-d')
def test_symbolic_groups(self):
re.compile('(?P<a>x)(?P=a)(?(a)y)')
re.compile('(?P<a1>x)(?P=a1)(?(a1)y)')
self.assertRaises(re.error, re.compile, '(?P<a>)(?P<a>)')
self.assertRaises(re.error, re.compile, '(?Px)')
self.assertRaises(re.error, re.compile, '(?P=)')
self.assertRaises(re.error, re.compile, '(?P=1)')
self.assertRaises(re.error, re.compile, '(?P=a)')
self.assertRaises(re.error, re.compile, '(?P=a1)')
self.assertRaises(re.error, re.compile, '(?P=a.)')
self.assertRaises(re.error, re.compile, '(?P<)')
self.assertRaises(re.error, re.compile, '(?P<>)')
self.assertRaises(re.error, re.compile, '(?P<1>)')
self.assertRaises(re.error, re.compile, '(?P<a.>)')
self.assertRaises(re.error, re.compile, '(?())')
self.assertRaises(re.error, re.compile, '(?(a))')
self.assertRaises(re.error, re.compile, '(?(1a))')
self.assertRaises(re.error, re.compile, '(?(a.))')
# New valid/invalid identifiers in Python 3
re.compile('(?P<µ>x)(?P=µ)(?(µ)y)')
re.compile('(?P<𝔘𝔫𝔦𝔠𝔬𝔡𝔢>x)(?P=𝔘𝔫𝔦𝔠𝔬𝔡𝔢)(?(𝔘𝔫𝔦𝔠𝔬𝔡𝔢)y)')
self.assertRaises(re.error, re.compile, '(?P<©>x)')
def test_symbolic_refs(self):
self.assertRaises(re.error, re.sub, '(?P<a>x)', '\g<a', 'xx')
self.assertRaises(re.error, re.sub, '(?P<a>x)', '\g<', 'xx')
self.assertRaises(re.error, re.sub, '(?P<a>x)', '\g', 'xx')
self.assertRaises(re.error, re.sub, '(?P<a>x)', '\g<a a>', 'xx')
self.assertRaises(re.error, re.sub, '(?P<a>x)', '\g<>', 'xx')
self.assertRaises(re.error, re.sub, '(?P<a>x)', '\g<1a1>', 'xx')
self.assertRaises(IndexError, re.sub, '(?P<a>x)', '\g<ab>', 'xx')
self.assertRaises(re.error, re.sub, '(?P<a>x)|(?P<b>y)', '\g<b>', 'xx')
self.assertRaises(re.error, re.sub, '(?P<a>x)|(?P<b>y)', '\\2', 'xx')
self.assertRaises(re.error, re.sub, '(?P<a>x)', '\g<-1>', 'xx')
# New valid/invalid identifiers in Python 3
self.assertEqual(re.sub('(?P<µ>x)', r'\g<µ>', 'xx'), 'xx')
self.assertEqual(re.sub('(?P<𝔘𝔫𝔦𝔠𝔬𝔡𝔢>x)', r'\g<𝔘𝔫𝔦𝔠𝔬𝔡𝔢>', 'xx'), 'xx')
self.assertRaises(re.error, re.sub, '(?P<a>x)', r'\g<©>', 'xx')
def test_re_subn(self):
self.assertEqual(re.subn("(?i)b+", "x", "bbbb BBBB"), ('x x', 2))
self.assertEqual(re.subn("b+", "x", "bbbb BBBB"), ('x BBBB', 1))
self.assertEqual(re.subn("b+", "x", "xyz"), ('xyz', 0))
self.assertEqual(re.subn("b*", "x", "xyz"), ('xxxyxzx', 4))
self.assertEqual(re.subn("b*", "x", "xyz", 2), ('xxxyz', 2))
def test_re_split(self):
self.assertEqual(re.split(":", ":a:b::c"), ['', 'a', 'b', '', 'c'])
self.assertEqual(re.split(":*", ":a:b::c"), ['', 'a', 'b', 'c'])
self.assertEqual(re.split("(:*)", ":a:b::c"),
['', ':', 'a', ':', 'b', '::', 'c'])
self.assertEqual(re.split("(?::*)", ":a:b::c"), ['', 'a', 'b', 'c'])
self.assertEqual(re.split("(:)*", ":a:b::c"),
['', ':', 'a', ':', 'b', ':', 'c'])
self.assertEqual(re.split("([b:]+)", ":a:b::c"),
['', ':', 'a', ':b::', 'c'])
self.assertEqual(re.split("(b)|(:+)", ":a:b::c"),
['', None, ':', 'a', None, ':', '', 'b', None, '',
None, '::', 'c'])
self.assertEqual(re.split("(?:b)|(?::+)", ":a:b::c"),
['', 'a', '', '', 'c'])
def test_qualified_re_split(self):
self.assertEqual(re.split(":", ":a:b::c", 2), ['', 'a', 'b::c'])
self.assertEqual(re.split(':', 'a:b:c:d', 2), ['a', 'b', 'c:d'])
self.assertEqual(re.split("(:)", ":a:b::c", 2),
['', ':', 'a', ':', 'b::c'])
self.assertEqual(re.split("(:*)", ":a:b::c", 2),
['', ':', 'a', ':', 'b::c'])
def test_re_findall(self):
self.assertEqual(re.findall(":+", "abc"), [])
self.assertEqual(re.findall(":+", "a:b::c:::d"), [":", "::", ":::"])
self.assertEqual(re.findall("(:+)", "a:b::c:::d"), [":", "::", ":::"])
self.assertEqual(re.findall("(:)(:*)", "a:b::c:::d"), [(":", ""),
(":", ":"),
(":", "::")])
def test_bug_117612(self):
self.assertEqual(re.findall(r"(a|(b))", "aba"),
[("a", ""),("b", "b"),("a", "")])
def test_re_match(self):
self.assertEqual(re.match('a', 'a').groups(), ())
self.assertEqual(re.match('(a)', 'a').groups(), ('a',))
self.assertEqual(re.match(r'(a)', 'a').group(0), 'a')
self.assertEqual(re.match(r'(a)', 'a').group(1), 'a')
self.assertEqual(re.match(r'(a)', 'a').group(1, 1), ('a', 'a'))
pat = re.compile('((a)|(b))(c)?')
self.assertEqual(pat.match('a').groups(), ('a', 'a', None, None))
self.assertEqual(pat.match('b').groups(), ('b', None, 'b', None))
self.assertEqual(pat.match('ac').groups(), ('a', 'a', None, 'c'))
self.assertEqual(pat.match('bc').groups(), ('b', None, 'b', 'c'))
self.assertEqual(pat.match('bc').groups(""), ('b', "", 'b', 'c'))
# A single group
m = re.match('(a)', 'a')
self.assertEqual(m.group(0), 'a')
self.assertEqual(m.group(0), 'a')
self.assertEqual(m.group(1), 'a')
self.assertEqual(m.group(1, 1), ('a', 'a'))
pat = re.compile('(?:(?P<a1>a)|(?P<b2>b))(?P<c3>c)?')
self.assertEqual(pat.match('a').group(1, 2, 3), ('a', None, None))
self.assertEqual(pat.match('b').group('a1', 'b2', 'c3'),
(None, 'b', None))
self.assertEqual(pat.match('ac').group(1, 'b2', 3), ('a', None, 'c'))
def test_re_groupref_exists(self):
self.assertEqual(re.match('^(\()?([^()]+)(?(1)\))$', '(a)').groups(),
('(', 'a'))
self.assertEqual(re.match('^(\()?([^()]+)(?(1)\))$', 'a').groups(),
(None, 'a'))
self.assertEqual(re.match('^(\()?([^()]+)(?(1)\))$', 'a)'), None)
self.assertEqual(re.match('^(\()?([^()]+)(?(1)\))$', '(a'), None)
self.assertEqual(re.match('^(?:(a)|c)((?(1)b|d))$', 'ab').groups(),
('a', 'b'))
self.assertEqual(re.match('^(?:(a)|c)((?(1)b|d))$', 'cd').groups(),
(None, 'd'))
self.assertEqual(re.match('^(?:(a)|c)((?(1)|d))$', 'cd').groups(),
(None, 'd'))
self.assertEqual(re.match('^(?:(a)|c)((?(1)|d))$', 'a').groups(),
('a', ''))
# Tests for bug #1177831: exercise groups other than the first group
p = re.compile('(?P<g1>a)(?P<g2>b)?((?(g2)c|d))')
self.assertEqual(p.match('abc').groups(),
('a', 'b', 'c'))
self.assertEqual(p.match('ad').groups(),
('a', None, 'd'))
self.assertEqual(p.match('abd'), None)
self.assertEqual(p.match('ac'), None)
def test_re_groupref(self):
self.assertEqual(re.match(r'^(\|)?([^()]+)\1$', '|a|').groups(),
('|', 'a'))
self.assertEqual(re.match(r'^(\|)?([^()]+)\1?$', 'a').groups(),
(None, 'a'))
self.assertEqual(re.match(r'^(\|)?([^()]+)\1$', 'a|'), None)
self.assertEqual(re.match(r'^(\|)?([^()]+)\1$', '|a'), None)
self.assertEqual(re.match(r'^(?:(a)|c)(\1)$', 'aa').groups(),
('a', 'a'))
self.assertEqual(re.match(r'^(?:(a)|c)(\1)?$', 'c').groups(),
(None, None))
def test_groupdict(self):
self.assertEqual(re.match('(?P<first>first) (?P<second>second)',
'first second').groupdict(),
{'first':'first', 'second':'second'})
def test_expand(self):
self.assertEqual(re.match("(?P<first>first) (?P<second>second)",
"first second")
.expand(r"\2 \1 \g<second> \g<first>"),
"second first second first")
def test_repeat_minmax(self):
self.assertEqual(re.match("^(\w){1}$", "abc"), None)
self.assertEqual(re.match("^(\w){1}?$", "abc"), None)
self.assertEqual(re.match("^(\w){1,2}$", "abc"), None)
self.assertEqual(re.match("^(\w){1,2}?$", "abc"), None)
self.assertEqual(re.match("^(\w){3}$", "abc").group(1), "c")
self.assertEqual(re.match("^(\w){1,3}$", "abc").group(1), "c")
self.assertEqual(re.match("^(\w){1,4}$", "abc").group(1), "c")
self.assertEqual(re.match("^(\w){3,4}?$", "abc").group(1), "c")
self.assertEqual(re.match("^(\w){3}?$", "abc").group(1), "c")
self.assertEqual(re.match("^(\w){1,3}?$", "abc").group(1), "c")
self.assertEqual(re.match("^(\w){1,4}?$", "abc").group(1), "c")
self.assertEqual(re.match("^(\w){3,4}?$", "abc").group(1), "c")
self.assertEqual(re.match("^x{1}$", "xxx"), None)
self.assertEqual(re.match("^x{1}?$", "xxx"), None)
self.assertEqual(re.match("^x{1,2}$", "xxx"), None)
self.assertEqual(re.match("^x{1,2}?$", "xxx"), None)
self.assertNotEqual(re.match("^x{3}$", "xxx"), None)
self.assertNotEqual(re.match("^x{1,3}$", "xxx"), None)
self.assertNotEqual(re.match("^x{1,4}$", "xxx"), None)
self.assertNotEqual(re.match("^x{3,4}?$", "xxx"), None)
self.assertNotEqual(re.match("^x{3}?$", "xxx"), None)
self.assertNotEqual(re.match("^x{1,3}?$", "xxx"), None)
self.assertNotEqual(re.match("^x{1,4}?$", "xxx"), None)
self.assertNotEqual(re.match("^x{3,4}?$", "xxx"), None)
self.assertEqual(re.match("^x{}$", "xxx"), None)
self.assertNotEqual(re.match("^x{}$", "x{}"), None)
def test_getattr(self):
self.assertEqual(re.compile("(?i)(a)(b)").pattern, "(?i)(a)(b)")
self.assertEqual(re.compile("(?i)(a)(b)").flags, re.I | re.U)
self.assertEqual(re.compile("(?i)(a)(b)").groups, 2)
self.assertEqual(re.compile("(?i)(a)(b)").groupindex, {})
self.assertEqual(re.compile("(?i)(?P<first>a)(?P<other>b)").groupindex,
{'first': 1, 'other': 2})
self.assertEqual(re.match("(a)", "a").pos, 0)
self.assertEqual(re.match("(a)", "a").endpos, 1)
self.assertEqual(re.match("(a)", "a").string, "a")
self.assertEqual(re.match("(a)", "a").regs, ((0, 1), (0, 1)))
self.assertNotEqual(re.match("(a)", "a").re, None)
def test_special_escapes(self):
self.assertEqual(re.search(r"\b(b.)\b",
"abcd abc bcd bx").group(1), "bx")
self.assertEqual(re.search(r"\B(b.)\B",
"abc bcd bc abxd").group(1), "bx")
self.assertEqual(re.search(r"\b(b.)\b",
"abcd abc bcd bx", re.LOCALE).group(1), "bx")
self.assertEqual(re.search(r"\B(b.)\B",
"abc bcd bc abxd", re.LOCALE).group(1), "bx")
self.assertEqual(re.search(r"\b(b.)\b",
"abcd abc bcd bx", re.UNICODE).group(1), "bx")
self.assertEqual(re.search(r"\B(b.)\B",
"abc bcd bc abxd", re.UNICODE).group(1), "bx")
self.assertEqual(re.search(r"^abc$", "\nabc\n", re.M).group(0), "abc")
self.assertEqual(re.search(r"^\Aabc\Z$", "abc", re.M).group(0), "abc")
self.assertEqual(re.search(r"^\Aabc\Z$", "\nabc\n", re.M), None)
self.assertEqual(re.search(r"\b(b.)\b",
"abcd abc bcd bx").group(1), "bx")
self.assertEqual(re.search(r"\B(b.)\B",
"abc bcd bc abxd").group(1), "bx")
self.assertEqual(re.search(r"^abc$", "\nabc\n", re.M).group(0), "abc")
self.assertEqual(re.search(r"^\Aabc\Z$", "abc", re.M).group(0), "abc")
self.assertEqual(re.search(r"^\Aabc\Z$", "\nabc\n", re.M), None)
self.assertEqual(re.search(r"\d\D\w\W\s\S",
"1aa! a").group(0), "1aa! a")
self.assertEqual(re.search(r"\d\D\w\W\s\S",
"1aa! a", re.LOCALE).group(0), "1aa! a")
self.assertEqual(re.search(r"\d\D\w\W\s\S",
"1aa! a", re.UNICODE).group(0), "1aa! a")
def test_string_boundaries(self):
# See http://bugs.python.org/issue10713
self.assertEqual(re.search(r"\b(abc)\b", "abc").group(1),
"abc")
# There's a word boundary at the start of a string.
self.assertTrue(re.match(r"\b", "abc"))
# A non-empty string includes a non-boundary zero-length match.
self.assertTrue(re.search(r"\B", "abc"))
# There is no non-boundary match at the start of a string.
self.assertFalse(re.match(r"\B", "abc"))
# However, an empty string contains no word boundaries, and also no
# non-boundaries.
self.assertEqual(re.search(r"\B", ""), None)
# This one is questionable and different from the perlre behaviour,
# but describes current behavior.
self.assertEqual(re.search(r"\b", ""), None)
# A single word-character string has two boundaries, but no
# non-boundary gaps.
self.assertEqual(len(re.findall(r"\b", "a")), 2)
self.assertEqual(len(re.findall(r"\B", "a")), 0)
# If there are no words, there are no boundaries
self.assertEqual(len(re.findall(r"\b", " ")), 0)
self.assertEqual(len(re.findall(r"\b", " ")), 0)
# Can match around the whitespace.
self.assertEqual(len(re.findall(r"\B", " ")), 2)
def test_bigcharset(self):
self.assertEqual(re.match("([\u2222\u2223])",
"\u2222").group(1), "\u2222")
self.assertEqual(re.match("([\u2222\u2223])",
"\u2222", re.UNICODE).group(1), "\u2222")
def test_big_codesize(self):
# Issue #1160
r = re.compile('|'.join(('%d'%x for x in range(10000))))
self.assertIsNotNone(r.match('1000'))
self.assertIsNotNone(r.match('9999'))
def test_anyall(self):
self.assertEqual(re.match("a.b", "a\nb", re.DOTALL).group(0),
"a\nb")
self.assertEqual(re.match("a.*b", "a\n\nb", re.DOTALL).group(0),
"a\n\nb")
def test_non_consuming(self):
self.assertEqual(re.match("(a(?=\s[^a]))", "a b").group(1), "a")
self.assertEqual(re.match("(a(?=\s[^a]*))", "a b").group(1), "a")
self.assertEqual(re.match("(a(?=\s[abc]))", "a b").group(1), "a")
self.assertEqual(re.match("(a(?=\s[abc]*))", "a bc").group(1), "a")
self.assertEqual(re.match(r"(a)(?=\s\1)", "a a").group(1), "a")
self.assertEqual(re.match(r"(a)(?=\s\1*)", "a aa").group(1), "a")
self.assertEqual(re.match(r"(a)(?=\s(abc|a))", "a a").group(1), "a")
self.assertEqual(re.match(r"(a(?!\s[^a]))", "a a").group(1), "a")
self.assertEqual(re.match(r"(a(?!\s[abc]))", "a d").group(1), "a")
self.assertEqual(re.match(r"(a)(?!\s\1)", "a b").group(1), "a")
self.assertEqual(re.match(r"(a)(?!\s(abc|a))", "a b").group(1), "a")
def test_ignore_case(self):
self.assertEqual(re.match("abc", "ABC", re.I).group(0), "ABC")
self.assertEqual(re.match("abc", "ABC", re.I).group(0), "ABC")
self.assertEqual(re.match(r"(a\s[^a])", "a b", re.I).group(1), "a b")
self.assertEqual(re.match(r"(a\s[^a]*)", "a bb", re.I).group(1), "a bb")
self.assertEqual(re.match(r"(a\s[abc])", "a b", re.I).group(1), "a b")
self.assertEqual(re.match(r"(a\s[abc]*)", "a bb", re.I).group(1), "a bb")
self.assertEqual(re.match(r"((a)\s\2)", "a a", re.I).group(1), "a a")
self.assertEqual(re.match(r"((a)\s\2*)", "a aa", re.I).group(1), "a aa")
self.assertEqual(re.match(r"((a)\s(abc|a))", "a a", re.I).group(1), "a a")
self.assertEqual(re.match(r"((a)\s(abc|a)*)", "a aa", re.I).group(1), "a aa")
def test_category(self):
self.assertEqual(re.match(r"(\s)", " ").group(1), " ")
def test_getlower(self):
import _sre
self.assertEqual(_sre.getlower(ord('A'), 0), ord('a'))
self.assertEqual(_sre.getlower(ord('A'), re.LOCALE), ord('a'))
self.assertEqual(_sre.getlower(ord('A'), re.UNICODE), ord('a'))
self.assertEqual(re.match("abc", "ABC", re.I).group(0), "ABC")
self.assertEqual(re.match("abc", "ABC", re.I).group(0), "ABC")
def test_not_literal(self):
self.assertEqual(re.search("\s([^a])", " b").group(1), "b")
self.assertEqual(re.search("\s([^a]*)", " bb").group(1), "bb")
def test_search_coverage(self):
self.assertEqual(re.search("\s(b)", " b").group(1), "b")
self.assertEqual(re.search("a\s", "a ").group(0), "a ")
def assertMatch(self, pattern, text, match=None, span=None,
matcher=re.match):
if match is None and span is None:
# the pattern matches the whole text
match = text
span = (0, len(text))
elif match is None or span is None:
raise ValueError('If match is not None, span should be specified '
'(and vice versa).')
m = matcher(pattern, text)
self.assertTrue(m)
self.assertEqual(m.group(), match)
self.assertEqual(m.span(), span)
def test_re_escape(self):
alnum_chars = string.ascii_letters + string.digits + '_'
p = ''.join(chr(i) for i in range(256))
for c in p:
if c in alnum_chars:
self.assertEqual(re.escape(c), c)
elif c == '\x00':
self.assertEqual(re.escape(c), '\\000')
else:
self.assertEqual(re.escape(c), '\\' + c)
self.assertMatch(re.escape(c), c)
self.assertMatch(re.escape(p), p)
def test_re_escape_byte(self):
alnum_chars = (string.ascii_letters + string.digits + '_').encode('ascii')
p = bytes(range(256))
for i in p:
b = bytes([i])
if b in alnum_chars:
self.assertEqual(re.escape(b), b)
elif i == 0:
self.assertEqual(re.escape(b), b'\\000')
else:
self.assertEqual(re.escape(b), b'\\' + b)
self.assertMatch(re.escape(b), b)
self.assertMatch(re.escape(p), p)
def test_re_escape_non_ascii(self):
s = 'xxx\u2620\u2620\u2620xxx'
s_escaped = re.escape(s)
self.assertEqual(s_escaped, 'xxx\\\u2620\\\u2620\\\u2620xxx')
self.assertMatch(s_escaped, s)
self.assertMatch('.%s+.' % re.escape('\u2620'), s,
'x\u2620\u2620\u2620x', (2, 7), re.search)
def test_re_escape_non_ascii_bytes(self):
b = 'y\u2620y\u2620y'.encode('utf-8')
b_escaped = re.escape(b)
self.assertEqual(b_escaped, b'y\\\xe2\\\x98\\\xa0y\\\xe2\\\x98\\\xa0y')
self.assertMatch(b_escaped, b)
res = re.findall(re.escape('\u2620'.encode('utf-8')), b)
self.assertEqual(len(res), 2)
def pickle_test(self, pickle):
oldpat = re.compile('a(?:b|(c|e){1,2}?|d)+?(.)')
s = pickle.dumps(oldpat)
newpat = pickle.loads(s)
self.assertEqual(oldpat, newpat)
def test_constants(self):
self.assertEqual(re.I, re.IGNORECASE)
self.assertEqual(re.L, re.LOCALE)
self.assertEqual(re.M, re.MULTILINE)
self.assertEqual(re.S, re.DOTALL)
self.assertEqual(re.X, re.VERBOSE)
def test_flags(self):
for flag in [re.I, re.M, re.X, re.S, re.L]:
self.assertNotEqual(re.compile('^pattern$', flag), None)
def test_sre_character_literals(self):
for i in [0, 8, 16, 32, 64, 127, 128, 255, 256, 0xFFFF, 0x10000, 0x10FFFF]:
if i < 256:
self.assertIsNotNone(re.match(r"\%03o" % i, chr(i)))
self.assertIsNotNone(re.match(r"\%03o0" % i, chr(i)+"0"))
self.assertIsNotNone(re.match(r"\%03o8" % i, chr(i)+"8"))
self.assertIsNotNone(re.match(r"\x%02x" % i, chr(i)))
self.assertIsNotNone(re.match(r"\x%02x0" % i, chr(i)+"0"))
self.assertIsNotNone(re.match(r"\x%02xz" % i, chr(i)+"z"))
if i < 0x10000:
self.assertIsNotNone(re.match(r"\u%04x" % i, chr(i)))
self.assertIsNotNone(re.match(r"\u%04x0" % i, chr(i)+"0"))
self.assertIsNotNone(re.match(r"\u%04xz" % i, chr(i)+"z"))
self.assertIsNotNone(re.match(r"\U%08x" % i, chr(i)))
self.assertIsNotNone(re.match(r"\U%08x0" % i, chr(i)+"0"))
self.assertIsNotNone(re.match(r"\U%08xz" % i, chr(i)+"z"))
self.assertIsNotNone(re.match(r"\0", "\000"))
self.assertIsNotNone(re.match(r"\08", "\0008"))
self.assertIsNotNone(re.match(r"\01", "\001"))
self.assertIsNotNone(re.match(r"\018", "\0018"))
self.assertIsNotNone(re.match(r"\567", chr(0o167)))
self.assertRaises(re.error, re.match, r"\911", "")
self.assertRaises(re.error, re.match, r"\x1", "")
self.assertRaises(re.error, re.match, r"\x1z", "")
self.assertRaises(re.error, re.match, r"\u123", "")
self.assertRaises(re.error, re.match, r"\u123z", "")
self.assertRaises(re.error, re.match, r"\U0001234", "")
self.assertRaises(re.error, re.match, r"\U0001234z", "")
self.assertRaises(re.error, re.match, r"\U00110000", "")
def test_sre_character_class_literals(self):
for i in [0, 8, 16, 32, 64, 127, 128, 255, 256, 0xFFFF, 0x10000, 0x10FFFF]:
if i < 256:
self.assertIsNotNone(re.match(r"[\%o]" % i, chr(i)))
self.assertIsNotNone(re.match(r"[\%o8]" % i, chr(i)))
self.assertIsNotNone(re.match(r"[\%03o]" % i, chr(i)))
self.assertIsNotNone(re.match(r"[\%03o0]" % i, chr(i)))
self.assertIsNotNone(re.match(r"[\%03o8]" % i, chr(i)))
self.assertIsNotNone(re.match(r"[\x%02x]" % i, chr(i)))
self.assertIsNotNone(re.match(r"[\x%02x0]" % i, chr(i)))
self.assertIsNotNone(re.match(r"[\x%02xz]" % i, chr(i)))
if i < 0x10000:
self.assertIsNotNone(re.match(r"[\u%04x]" % i, chr(i)))
self.assertIsNotNone(re.match(r"[\u%04x0]" % i, chr(i)))
self.assertIsNotNone(re.match(r"[\u%04xz]" % i, chr(i)))
self.assertIsNotNone(re.match(r"[\U%08x]" % i, chr(i)))
self.assertIsNotNone(re.match(r"[\U%08x0]" % i, chr(i)+"0"))
self.assertIsNotNone(re.match(r"[\U%08xz]" % i, chr(i)+"z"))
self.assertIsNotNone(re.match(r"[\U0001d49c-\U0001d4b5]", "\U0001d49e"))
self.assertRaises(re.error, re.match, r"[\911]", "")
self.assertRaises(re.error, re.match, r"[\x1z]", "")
self.assertRaises(re.error, re.match, r"[\u123z]", "")
self.assertRaises(re.error, re.match, r"[\U0001234z]", "")
self.assertRaises(re.error, re.match, r"[\U00110000]", "")
def test_sre_byte_literals(self):
for i in [0, 8, 16, 32, 64, 127, 128, 255]:
self.assertIsNotNone(re.match((r"\%03o" % i).encode(), bytes([i])))
self.assertIsNotNone(re.match((r"\%03o0" % i).encode(), bytes([i])+b"0"))
self.assertIsNotNone(re.match((r"\%03o8" % i).encode(), bytes([i])+b"8"))
self.assertIsNotNone(re.match((r"\x%02x" % i).encode(), bytes([i])))
self.assertIsNotNone(re.match((r"\x%02x0" % i).encode(), bytes([i])+b"0"))
self.assertIsNotNone(re.match((r"\x%02xz" % i).encode(), bytes([i])+b"z"))
self.assertIsNotNone(re.match(br"\u", b'u'))
self.assertIsNotNone(re.match(br"\U", b'U'))
self.assertIsNotNone(re.match(br"\0", b"\000"))
self.assertIsNotNone(re.match(br"\08", b"\0008"))
self.assertIsNotNone(re.match(br"\01", b"\001"))
self.assertIsNotNone(re.match(br"\018", b"\0018"))
self.assertIsNotNone(re.match(br"\567", bytes([0o167])))
self.assertRaises(re.error, re.match, br"\911", b"")
self.assertRaises(re.error, re.match, br"\x1", b"")
self.assertRaises(re.error, re.match, br"\x1z", b"")
def test_sre_byte_class_literals(self):
for i in [0, 8, 16, 32, 64, 127, 128, 255]:
self.assertIsNotNone(re.match((r"[\%o]" % i).encode(), bytes([i])))
self.assertIsNotNone(re.match((r"[\%o8]" % i).encode(), bytes([i])))
self.assertIsNotNone(re.match((r"[\%03o]" % i).encode(), bytes([i])))
self.assertIsNotNone(re.match((r"[\%03o0]" % i).encode(), bytes([i])))
self.assertIsNotNone(re.match((r"[\%03o8]" % i).encode(), bytes([i])))
self.assertIsNotNone(re.match((r"[\x%02x]" % i).encode(), bytes([i])))
self.assertIsNotNone(re.match((r"[\x%02x0]" % i).encode(), bytes([i])))
self.assertIsNotNone(re.match((r"[\x%02xz]" % i).encode(), bytes([i])))
self.assertIsNotNone(re.match(br"[\u]", b'u'))
self.assertIsNotNone(re.match(br"[\U]", b'U'))
self.assertRaises(re.error, re.match, br"[\911]", "")
self.assertRaises(re.error, re.match, br"[\x1z]", "")
def test_bug_113254(self):
self.assertEqual(re.match(r'(a)|(b)', 'b').start(1), -1)
self.assertEqual(re.match(r'(a)|(b)', 'b').end(1), -1)
self.assertEqual(re.match(r'(a)|(b)', 'b').span(1), (-1, -1))
def test_bug_527371(self):
# bug described in patches 527371/672491
self.assertEqual(re.match(r'(a)?a','a').lastindex, None)
self.assertEqual(re.match(r'(a)(b)?b','ab').lastindex, 1)
self.assertEqual(re.match(r'(?P<a>a)(?P<b>b)?b','ab').lastgroup, 'a')
self.assertEqual(re.match("(?P<a>a(b))", "ab").lastgroup, 'a')
self.assertEqual(re.match("((a))", "a").lastindex, 1)
def test_bug_545855(self):
# bug 545855 -- This pattern failed to cause a compile error as it
# should, instead provoking a TypeError.
self.assertRaises(re.error, re.compile, 'foo[a-')
def test_bug_418626(self):
# bugs 418626 at al. -- Testing Greg Chapman's addition of op code
# SRE_OP_MIN_REPEAT_ONE for eliminating recursion on simple uses of
# pattern '*?' on a long string.
self.assertEqual(re.match('.*?c', 10000*'ab'+'cd').end(0), 20001)
self.assertEqual(re.match('.*?cd', 5000*'ab'+'c'+5000*'ab'+'cde').end(0),
20003)
self.assertEqual(re.match('.*?cd', 20000*'abc'+'de').end(0), 60001)
# non-simple '*?' still used to hit the recursion limit, before the
# non-recursive scheme was implemented.
self.assertEqual(re.search('(a|b)*?c', 10000*'ab'+'cd').end(0), 20001)
def test_bug_612074(self):
pat="["+re.escape("\u2039")+"]"
self.assertEqual(re.compile(pat) and 1, 1)
def test_stack_overflow(self):
# nasty cases that used to overflow the straightforward recursive
# implementation of repeated groups.
self.assertEqual(re.match('(x)*', 50000*'x').group(1), 'x')
self.assertEqual(re.match('(x)*y', 50000*'x'+'y').group(1), 'x')
self.assertEqual(re.match('(x)*?y', 50000*'x'+'y').group(1), 'x')
def test_unlimited_zero_width_repeat(self):
# Issue #9669
self.assertIsNone(re.match(r'(?:a?)*y', 'z'))
self.assertIsNone(re.match(r'(?:a?)+y', 'z'))
self.assertIsNone(re.match(r'(?:a?){2,}y', 'z'))
self.assertIsNone(re.match(r'(?:a?)*?y', 'z'))
self.assertIsNone(re.match(r'(?:a?)+?y', 'z'))
self.assertIsNone(re.match(r'(?:a?){2,}?y', 'z'))
# def test_scanner(self):
# def s_ident(scanner, token): return token
# def s_operator(scanner, token): return "op%s" % token
# def s_float(scanner, token): return float(token)
# def s_int(scanner, token): return int(token)
#
# scanner = Scanner([
# (r"[a-zA-Z_]\w*", s_ident),
# (r"\d+\.\d*", s_float),
# (r"\d+", s_int),
# (r"=|\+|-|\*|/", s_operator),
# (r"\s+", None),
# ])
#
# self.assertNotEqual(scanner.scanner.scanner("").pattern, None)
#
# self.assertEqual(scanner.scan("sum = 3*foo + 312.50 + bar"),
# (['sum', 'op=', 3, 'op*', 'foo', 'op+', 312.5,
# 'op+', 'bar'], ''))
def test_bug_448951(self):
# bug 448951 (similar to 429357, but with single char match)
# (Also test greedy matches.)
for op in '','?','*':
self.assertEqual(re.match(r'((.%s):)?z'%op, 'z').groups(),
(None, None))
self.assertEqual(re.match(r'((.%s):)?z'%op, 'a:z').groups(),
('a:', 'a'))
def test_bug_725106(self):
# capturing groups in alternatives in repeats
self.assertEqual(re.match('^((a)|b)*', 'abc').groups(),
('b', 'a'))
self.assertEqual(re.match('^(([ab])|c)*', 'abc').groups(),
('c', 'b'))
self.assertEqual(re.match('^((d)|[ab])*', 'abc').groups(),
('b', None))
self.assertEqual(re.match('^((a)c|[ab])*', 'abc').groups(),
('b', None))
self.assertEqual(re.match('^((a)|b)*?c', 'abc').groups(),
('b', 'a'))
self.assertEqual(re.match('^(([ab])|c)*?d', 'abcd').groups(),
('c', 'b'))
self.assertEqual(re.match('^((d)|[ab])*?c', 'abc').groups(),
('b', None))
self.assertEqual(re.match('^((a)c|[ab])*?c', 'abc').groups(),
('b', None))
def test_bug_725149(self):
# mark_stack_base restoring before restoring marks
self.assertEqual(re.match('(a)(?:(?=(b)*)c)*', 'abb').groups(),
('a', None))
self.assertEqual(re.match('(a)((?!(b)*))*', 'abb').groups(),
('a', None, None))
def test_bug_764548(self):
# bug 764548, re.compile() barfs on str/unicode subclasses
class my_unicode(str): pass
pat = re.compile(my_unicode("abc"))
self.assertEqual(pat.match("xyz"), None)
def test_finditer(self):
iter = re.finditer(r":+", "a:b::c:::d")
self.assertEqual([item.group(0) for item in iter],
[":", "::", ":::"])
pat = re.compile(r":+")
iter = pat.finditer("a:b::c:::d", 1, 10)
self.assertEqual([item.group(0) for item in iter],
[":", "::", ":::"])
pat = re.compile(r":+")
iter = pat.finditer("a:b::c:::d", pos=1, endpos=10)
self.assertEqual([item.group(0) for item in iter],
[":", "::", ":::"])
pat = re.compile(r":+")
iter = pat.finditer("a:b::c:::d", endpos=10, pos=1)
self.assertEqual([item.group(0) for item in iter],
[":", "::", ":::"])
pat = re.compile(r":+")
iter = pat.finditer("a:b::c:::d", pos=3, endpos=8)
self.assertEqual([item.group(0) for item in iter],
["::", "::"])
def test_bug_926075(self):
self.assertTrue(re.compile('bug_926075') is not
re.compile(b'bug_926075'))
def test_bug_931848(self):
pattern = eval('"[\u002E\u3002\uFF0E\uFF61]"')
self.assertEqual(re.compile(pattern).split("a.b.c"),
['a','b','c'])
def test_bug_581080(self):
iter = re.finditer(r"\s", "a b")
self.assertEqual(next(iter).span(), (1,2))
self.assertRaises(StopIteration, next, iter)
scanner = re.compile(r"\s").scanner("a b")
self.assertEqual(scanner.search().span(), (1, 2))
self.assertEqual(scanner.search(), None)
def test_bug_817234(self):
iter = re.finditer(r".*", "asdf")
self.assertEqual(next(iter).span(), (0, 4))
self.assertEqual(next(iter).span(), (4, 4))
self.assertRaises(StopIteration, next, iter)
def test_bug_6561(self):
# '\d' should match characters in Unicode category 'Nd'
# (Number, Decimal Digit), but not those in 'Nl' (Number,
# Letter) or 'No' (Number, Other).
decimal_digits = [
'\u0037', # '\N{DIGIT SEVEN}', category 'Nd'
'\u0e58', # '\N{THAI DIGIT SIX}', category 'Nd'
'\uff10', # '\N{FULLWIDTH DIGIT ZERO}', category 'Nd'
]
for x in decimal_digits:
self.assertEqual(re.match('^\d$', x).group(0), x)
not_decimal_digits = [
'\u2165', # '\N{ROMAN NUMERAL SIX}', category 'Nl'
'\u3039', # '\N{HANGZHOU NUMERAL TWENTY}', category 'Nl'
'\u2082', # '\N{SUBSCRIPT TWO}', category 'No'
'\u32b4', # '\N{CIRCLED NUMBER THIRTY NINE}', category 'No'
]
for x in not_decimal_digits:
self.assertIsNone(re.match('^\d$', x))
def test_empty_array(self):
# SF buf 1647541
import array
for typecode in 'bBuhHiIlLfd':
a = array.array(typecode)
self.assertEqual(re.compile(b"bla").match(a), None)
self.assertEqual(re.compile(b"").match(a).groups(), ())
def test_inline_flags(self):
# Bug #1700
upper_char = chr(0x1ea0) # Latin Capital Letter A with Dot Bellow
lower_char = chr(0x1ea1) # Latin Small Letter A with Dot Bellow
p = re.compile(upper_char, re.I | re.U)
q = p.match(lower_char)
self.assertNotEqual(q, None)
p = re.compile(lower_char, re.I | re.U)
q = p.match(upper_char)
self.assertNotEqual(q, None)
p = re.compile('(?i)' + upper_char, re.U)
q = p.match(lower_char)
self.assertNotEqual(q, None)
p = re.compile('(?i)' + lower_char, re.U)
q = p.match(upper_char)
self.assertNotEqual(q, None)
p = re.compile('(?iu)' + upper_char)
q = p.match(lower_char)
self.assertNotEqual(q, None)
p = re.compile('(?iu)' + lower_char)
q = p.match(upper_char)
self.assertNotEqual(q, None)
def test_dollar_matches_twice(self):
"$ matches the end of string, and just before the terminating \n"
pattern = re.compile('$')
self.assertEqual(pattern.sub('#', 'a\nb\n'), 'a\nb#\n#')
self.assertEqual(pattern.sub('#', 'a\nb\nc'), 'a\nb\nc#')
self.assertEqual(pattern.sub('#', '\n'), '#\n#')
pattern = re.compile('$', re.MULTILINE)
self.assertEqual(pattern.sub('#', 'a\nb\n' ), 'a#\nb#\n#' )
self.assertEqual(pattern.sub('#', 'a\nb\nc'), 'a#\nb#\nc#')
self.assertEqual(pattern.sub('#', '\n'), '#\n#')
def test_bytes_str_mixing(self):
# Mixing str and bytes is disallowed
pat = re.compile('.')
bpat = re.compile(b'.')
self.assertRaises(TypeError, pat.match, b'b')
self.assertRaises(TypeError, bpat.match, 'b')
self.assertRaises(TypeError, pat.sub, b'b', 'c')
self.assertRaises(TypeError, pat.sub, 'b', b'c')
self.assertRaises(TypeError, pat.sub, b'b', b'c')
self.assertRaises(TypeError, bpat.sub, b'b', 'c')
self.assertRaises(TypeError, bpat.sub, 'b', b'c')
self.assertRaises(TypeError, bpat.sub, 'b', 'c')
def test_ascii_and_unicode_flag(self):
# String patterns
for flags in (0, re.UNICODE):
pat = re.compile('\xc0', flags | re.IGNORECASE)
self.assertNotEqual(pat.match('\xe0'), None)
pat = re.compile('\w', flags)
self.assertNotEqual(pat.match('\xe0'), None)
pat = re.compile('\xc0', re.ASCII | re.IGNORECASE)
self.assertEqual(pat.match('\xe0'), None)
pat = re.compile('(?a)\xc0', re.IGNORECASE)
self.assertEqual(pat.match('\xe0'), None)
pat = re.compile('\w', re.ASCII)
self.assertEqual(pat.match('\xe0'), None)
pat = re.compile('(?a)\w')
self.assertEqual(pat.match('\xe0'), None)
# Bytes patterns
for flags in (0, re.ASCII):
pat = re.compile(b'\xc0', re.IGNORECASE)
self.assertEqual(pat.match(b'\xe0'), None)
pat = re.compile(b'\w')
self.assertEqual(pat.match(b'\xe0'), None)
# Incompatibilities
self.assertRaises(ValueError, re.compile, b'\w', re.UNICODE)
self.assertRaises(ValueError, re.compile, b'(?u)\w')
self.assertRaises(ValueError, re.compile, '\w', re.UNICODE | re.ASCII)
self.assertRaises(ValueError, re.compile, '(?u)\w', re.ASCII)
self.assertRaises(ValueError, re.compile, '(?a)\w', re.UNICODE)
self.assertRaises(ValueError, re.compile, '(?au)\w')
def test_bug_6509(self):
# Replacement strings of both types must parse properly.
# all strings
pat = re.compile('a(\w)')
self.assertEqual(pat.sub('b\\1', 'ac'), 'bc')
pat = re.compile('a(.)')
self.assertEqual(pat.sub('b\\1', 'a\u1234'), 'b\u1234')
pat = re.compile('..')
self.assertEqual(pat.sub(lambda m: 'str', 'a5'), 'str')
# all bytes
pat = re.compile(b'a(\w)')
self.assertEqual(pat.sub(b'b\\1', b'ac'), b'bc')
pat = re.compile(b'a(.)')
self.assertEqual(pat.sub(b'b\\1', b'a\xCD'), b'b\xCD')
pat = re.compile(b'..')
self.assertEqual(pat.sub(lambda m: b'bytes', b'a5'), b'bytes')
def test_dealloc(self):
# issue 3299: check for segfault in debug build
import _sre
# the overflow limit is different on wide and narrow builds and it
# depends on the definition of SRE_CODE (see sre.h).
# 2**128 should be big enough to overflow on both. For smaller values
# a RuntimeError is raised instead of OverflowError.
long_overflow = 2**128
self.assertRaises(TypeError, re.finditer, "a", {})
self.assertRaises(OverflowError, _sre.compile, "abc", 0, [long_overflow])
self.assertRaises(TypeError, _sre.compile, {}, 0, [])
def test_search_dot_unicode(self):
self.assertIsNotNone(re.search("123.*-", '123abc-'))
self.assertIsNotNone(re.search("123.*-", '123\xe9-'))
self.assertIsNotNone(re.search("123.*-", '123\u20ac-'))
self.assertIsNotNone(re.search("123.*-", '123\U0010ffff-'))
self.assertIsNotNone(re.search("123.*-", '123\xe9\u20ac\U0010ffff-'))
def test_compile(self):
# Test return value when given string and pattern as parameter
pattern = re.compile('random pattern')
self.assertIsInstance(pattern, re._pattern_type)
same_pattern = re.compile(pattern)
self.assertIsInstance(same_pattern, re._pattern_type)
self.assertIs(same_pattern, pattern)
# Test behaviour when not given a string or pattern as parameter
self.assertRaises(TypeError, re.compile, 0)
def test_bug_13899(self):
# Issue #13899: re pattern r"[\A]" should work like "A" but matches
# nothing. Ditto B and Z.
self.assertEqual(re.findall(r'[\A\B\b\C\Z]', 'AB\bCZ'),
['A', 'B', '\b', 'C', 'Z'])
# FIXME: brython: implement test.support
# @bigmemtest(size=_2G, memuse=1)
# def test_large_search(self, size):
# # Issue #10182: indices were 32-bit-truncated.
# s = 'a' * size
# m = re.search('$', s)
# self.assertIsNotNone(m)
# self.assertEqual(m.start(), size)
# self.assertEqual(m.end(), size)
# FIXME: brython: implement test.support
# The huge memuse is because of re.sub() using a list and a join()
# to create the replacement result.
# @bigmemtest(size=_2G, memuse=16 + 2)
# def test_large_subn(self, size):
# # Issue #10182: indices were 32-bit-truncated.
# s = 'a' * size
# r, n = re.subn('', '', s)
# self.assertEqual(r, s)
# self.assertEqual(n, size + 1)
def test_bug_16688(self):
# Issue 16688: Backreferences make case-insensitive regex fail on
# non-ASCII strings.
self.assertEqual(re.findall(r"(?i)(a)\1", "aa \u0100"), ['a'])
self.assertEqual(re.match(r"(?s).{1,3}", "\u0100\u0100").span(), (0, 2))
def test_repeat_minmax_overflow(self):
# Issue #13169
string = "x" * 100000
self.assertEqual(re.match(r".{65535}", string).span(), (0, 65535))
self.assertEqual(re.match(r".{,65535}", string).span(), (0, 65535))
self.assertEqual(re.match(r".{65535,}?", string).span(), (0, 65535))
self.assertEqual(re.match(r".{65536}", string).span(), (0, 65536))
self.assertEqual(re.match(r".{,65536}", string).span(), (0, 65536))
self.assertEqual(re.match(r".{65536,}?", string).span(), (0, 65536))
# 2**128 should be big enough to overflow both SRE_CODE and Py_ssize_t.
self.assertRaises(OverflowError, re.compile, r".{%d}" % 2**128)
self.assertRaises(OverflowError, re.compile, r".{,%d}" % 2**128)
self.assertRaises(OverflowError, re.compile, r".{%d,}?" % 2**128)
self.assertRaises(OverflowError, re.compile, r".{%d,%d}" % (2**129, 2**128))
# FIXME: brython: implement test.support
# @cpython_only
# def test_repeat_minmax_overflow_maxrepeat(self):
# try:
# from _sre import MAXREPEAT
# except ImportError:
# self.skipTest('requires _sre.MAXREPEAT constant')
# string = "x" * 100000
# self.assertIsNone(re.match(r".{%d}" % (MAXREPEAT - 1), string))
# self.assertEqual(re.match(r".{,%d}" % (MAXREPEAT - 1), string).span(),
# (0, 100000))
# self.assertIsNone(re.match(r".{%d,}?" % (MAXREPEAT - 1), string))
# self.assertRaises(OverflowError, re.compile, r".{%d}" % MAXREPEAT)
# self.assertRaises(OverflowError, re.compile, r".{,%d}" % MAXREPEAT)
# self.assertRaises(OverflowError, re.compile, r".{%d,}?" % MAXREPEAT)
def test_backref_group_name_in_exception(self):
# Issue 17341: Poor error message when compiling invalid regex
with self.assertRaisesRegex(sre_constants.error, '<foo>'):
re.compile('(?P=<foo>)')
def test_group_name_in_exception(self):
# Issue 17341: Poor error message when compiling invalid regex
with self.assertRaisesRegex(sre_constants.error, '\?foo'):
re.compile('(?P<?foo>)')
def run_re_tests():
from test.re_tests import tests, SUCCEED, FAIL, SYNTAX_ERROR
if verbose:
print('Running re_tests test suite')
else:
# To save time, only run the first and last 10 tests
#tests = tests[:10] + tests[-10:]
pass
for t in tests:
sys.stdout.flush()
pattern = s = outcome = repl = expected = None
if len(t) == 5:
pattern, s, outcome, repl, expected = t
elif len(t) == 3:
pattern, s, outcome = t
else:
raise ValueError('Test tuples should have 3 or 5 fields', t)
try:
obj = re.compile(pattern)
except re.error:
if outcome == SYNTAX_ERROR: pass # Expected a syntax error
else:
print('=== Syntax error:', t)
except KeyboardInterrupt: raise KeyboardInterrupt
except:
print('*** Unexpected error ***', t)
if verbose:
traceback.print_exc(file=sys.stdout)
else:
try:
result = obj.search(s)
except re.error as msg:
print('=== Unexpected exception', t, repr(msg))
if outcome == SYNTAX_ERROR:
# This should have been a syntax error; forget it.
pass
elif outcome == FAIL:
if result is None: pass # No match, as expected
else: print('=== Succeeded incorrectly', t)
elif outcome == SUCCEED:
if result is not None:
# Matched, as expected, so now we compute the
# result string and compare it to our expected result.
start, end = result.span(0)
vardict={'found': result.group(0),
'groups': result.group(),
'flags': result.re.flags}
for i in range(1, 100):
try:
gi = result.group(i)
# Special hack because else the string concat fails:
if gi is None:
gi = "None"
except IndexError:
gi = "Error"
vardict['g%d' % i] = gi
for i in result.re.groupindex.keys():
try:
gi = result.group(i)
if gi is None:
gi = "None"
except IndexError:
gi = "Error"
vardict[i] = gi
repl = eval(repl, vardict)
if repl != expected:
print('=== grouping error', t, end=' ')
print(repr(repl) + ' should be ' + repr(expected))
else:
print('=== Failed incorrectly', t)
# Try the match with both pattern and string converted to
# bytes, and check that it still succeeds.
try:
bpat = bytes(pattern, "ascii")
bs = bytes(s, "ascii")
except UnicodeEncodeError:
# skip non-ascii tests
pass
else:
try:
bpat = re.compile(bpat)
except Exception:
print('=== Fails on bytes pattern compile', t)
if verbose:
traceback.print_exc(file=sys.stdout)
else:
bytes_result = bpat.search(bs)
if bytes_result is None:
print('=== Fails on bytes pattern match', t)
# Try the match with the search area limited to the extent
# of the match and see if it still succeeds. \B will
# break (because it won't match at the end or start of a
# string), so we'll ignore patterns that feature it.
if pattern[:2] != '\\B' and pattern[-2:] != '\\B' \
and result is not None:
obj = re.compile(pattern)
result = obj.search(s, result.start(0), result.end(0) + 1)
if result is None:
print('=== Failed on range-limited match', t)
# Try the match with IGNORECASE enabled, and check that it
# still succeeds.
obj = re.compile(pattern, re.IGNORECASE)
result = obj.search(s)
if result is None:
print('=== Fails on case-insensitive match', t)
# Try the match with LOCALE enabled, and check that it
# still succeeds.
if '(?u)' not in pattern:
obj = re.compile(pattern, re.LOCALE)
result = obj.search(s)
if result is None:
print('=== Fails on locale-sensitive match', t)
# Try the match with UNICODE locale enabled, and check
# that it still succeeds.
obj = re.compile(pattern, re.UNICODE)
result = obj.search(s)
if result is None:
print('=== Fails on unicode-sensitive match', t)
def test_main():
# FIXME: brython: implement test.support
# run_unittest(ReTests)
run_re_tests()
if __name__ == "__main__":
test_main()
| gpl-3.0 |
derDavidT/sympy | sympy/ntheory/continued_fraction.py | 75 | 6975 | from sympy.core.numbers import Integer, Rational
def continued_fraction_periodic(p, q, d=0):
r"""
Find the periodic continued fraction expansion of a quadratic irrational.
Compute the continued fraction expansion of a rational or a
quadratic irrational number, i.e. `\frac{p + \sqrt{d}}{q}`, where
`p`, `q` and `d \ge 0` are integers.
Returns the continued fraction representation (canonical form) as
a list of integers, optionally ending (for quadratic irrationals)
with repeating block as the last term of this list.
Parameters
==========
p : int
the rational part of the number's numerator
q : int
the denominator of the number
d : int, optional
the irrational part (discriminator) of the number's numerator
Examples
========
>>> from sympy.ntheory.continued_fraction import continued_fraction_periodic
>>> continued_fraction_periodic(3, 2, 7)
[2, [1, 4, 1, 1]]
Golden ratio has the simplest continued fraction expansion:
>>> continued_fraction_periodic(1, 2, 5)
[[1]]
If the discriminator is zero or a perfect square then the number will be a
rational number:
>>> continued_fraction_periodic(4, 3, 0)
[1, 3]
>>> continued_fraction_periodic(4, 3, 49)
[3, 1, 2]
See Also
========
continued_fraction_iterator, continued_fraction_reduce
References
==========
.. [1] http://en.wikipedia.org/wiki/Periodic_continued_fraction
.. [2] K. Rosen. Elementary Number theory and its applications.
Addison-Wesley, 3 Sub edition, pages 379-381, January 1992.
"""
from sympy.core.compatibility import as_int
from sympy.functions import sqrt
p, q, d = list(map(as_int, [p, q, d]))
sd = sqrt(d)
if q == 0:
raise ValueError("The denominator is zero.")
if d < 0:
raise ValueError("Delta supposed to be a non-negative "
"integer, got %d" % d)
elif d == 0 or sd.is_integer:
# the number is a rational number
return list(continued_fraction_iterator(Rational(p + sd, q)))
if (d - p**2)%q:
d *= q**2
sd *= q
p *= abs(q)
q *= abs(q)
terms = []
pq = {}
while (p, q) not in pq:
pq[(p, q)] = len(terms)
terms.append(int((p + sd)/q))
p = terms[-1]*q - p
q = (d - p**2)/q
i = pq[(p, q)]
return terms[:i] + [terms[i:]]
def continued_fraction_reduce(cf):
"""
Reduce a continued fraction to a rational or quadratic irrational.
Compute the rational or quadratic irrational number from its
terminating or periodic continued fraction expansion. The
continued fraction expansion (cf) should be supplied as a
terminating iterator supplying the terms of the expansion. For
terminating continued fractions, this is equivalent to
``list(continued_fraction_convergents(cf))[-1]``, only a little more
efficient. If the expansion has a repeating part, a list of the
repeating terms should be returned as the last element from the
iterator. This is the format returned by
continued_fraction_periodic.
For quadratic irrationals, returns the largest solution found,
which is generally the one sought, if the fraction is in canonical
form (all terms positive except possibly the first).
Examples
========
>>> from sympy.ntheory.continued_fraction import continued_fraction_reduce
>>> continued_fraction_reduce([1, 2, 3, 4, 5])
225/157
>>> continued_fraction_reduce([-2, 1, 9, 7, 1, 2])
-256/233
>>> continued_fraction_reduce([2, 1, 2, 1, 1, 4, 1, 1, 6, 1, 1, 8]).n(10)
2.718281835
>>> continued_fraction_reduce([1, 4, 2, [3, 1]])
(sqrt(21) + 287)/238
>>> continued_fraction_reduce([[1]])
1/2 + sqrt(5)/2
>>> from sympy.ntheory.continued_fraction import continued_fraction_periodic
>>> continued_fraction_reduce(continued_fraction_periodic(8, 5, 13))
(sqrt(13) + 8)/5
See Also
========
continued_fraction_periodic
"""
from sympy.core.symbol import Dummy
from sympy.solvers import solve
period = []
x = Dummy('x')
def untillist(cf):
for nxt in cf:
if isinstance(nxt, list):
period.extend(nxt)
yield x
break
yield nxt
a = Integer(0)
for a in continued_fraction_convergents(untillist(cf)):
pass
if period:
y = Dummy('y')
solns = solve(continued_fraction_reduce(period + [y]) - y, y)
solns.sort()
pure = solns[-1]
return a.subs(x, pure).radsimp()
else:
return a
def continued_fraction_iterator(x):
"""
Return continued fraction expansion of x as iterator.
Examples
========
>>> from sympy.core import Rational, pi
>>> from sympy.ntheory.continued_fraction import continued_fraction_iterator
>>> list(continued_fraction_iterator(Rational(3, 8)))
[0, 2, 1, 2]
>>> list(continued_fraction_iterator(Rational(-3, 8)))
[-1, 1, 1, 1, 2]
>>> for i, v in enumerate(continued_fraction_iterator(pi)):
... if i > 7:
... break
... print(v)
3
7
15
1
292
1
1
1
References
==========
.. [1] http://en.wikipedia.org/wiki/Continued_fraction
"""
from sympy.functions import floor
while True:
i = floor(x)
yield i
x -= i
if not x:
break
x = 1/x
def continued_fraction_convergents(cf):
"""
Return an iterator over the convergents of a continued fraction (cf).
The parameter should be an iterable returning successive
partial quotients of the continued fraction, such as might be
returned by continued_fraction_iterator. In computing the
convergents, the continued fraction need not be strictly in
canonical form (all integers, all but the first positive).
Rational and negative elements may be present in the expansion.
Examples
========
>>> from sympy.core import Rational, pi
>>> from sympy import S
>>> from sympy.ntheory.continued_fraction import \
continued_fraction_convergents, continued_fraction_iterator
>>> list(continued_fraction_convergents([0, 2, 1, 2]))
[0, 1/2, 1/3, 3/8]
>>> list(continued_fraction_convergents([1, S('1/2'), -7, S('1/4')]))
[1, 3, 19/5, 7]
>>> it = continued_fraction_convergents(continued_fraction_iterator(pi))
>>> for n in range(7):
... print(next(it))
3
22/7
333/106
355/113
103993/33102
104348/33215
208341/66317
See Also
========
continued_fraction_iterator
"""
p_2, q_2 = Integer(0), Integer(1)
p_1, q_1 = Integer(1), Integer(0)
for a in cf:
p, q = a*p_1 + p_2, a*q_1 + q_2
p_2, q_2 = p_1, q_1
p_1, q_1 = p, q
yield p/q
| bsd-3-clause |
richpolis/siveinpy | env/lib/python2.7/site-packages/django/contrib/staticfiles/utils.py | 322 | 1973 | import os
import fnmatch
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
def matches_patterns(path, patterns=None):
"""
Return True or False depending on whether the ``path`` should be
ignored (if it matches any pattern in ``ignore_patterns``).
"""
if patterns is None:
patterns = []
for pattern in patterns:
if fnmatch.fnmatchcase(path, pattern):
return True
return False
def get_files(storage, ignore_patterns=None, location=''):
"""
Recursively walk the storage directories yielding the paths
of all files that should be copied.
"""
if ignore_patterns is None:
ignore_patterns = []
directories, files = storage.listdir(location)
for fn in files:
if matches_patterns(fn, ignore_patterns):
continue
if location:
fn = os.path.join(location, fn)
yield fn
for dir in directories:
if matches_patterns(dir, ignore_patterns):
continue
if location:
dir = os.path.join(location, dir)
for fn in get_files(storage, ignore_patterns, dir):
yield fn
def check_settings(base_url=None):
"""
Checks if the staticfiles settings have sane values.
"""
if base_url is None:
base_url = settings.STATIC_URL
if not base_url:
raise ImproperlyConfigured(
"You're using the staticfiles app "
"without having set the required STATIC_URL setting.")
if settings.MEDIA_URL == base_url:
raise ImproperlyConfigured("The MEDIA_URL and STATIC_URL "
"settings must have different values")
if ((settings.MEDIA_ROOT and settings.STATIC_ROOT) and
(settings.MEDIA_ROOT == settings.STATIC_ROOT)):
raise ImproperlyConfigured("The MEDIA_ROOT and STATIC_ROOT "
"settings must have different values")
| mit |
atilag/qiskit-sdk-py | qiskit/__init__.py | 1 | 1693 | # -*- coding: utf-8 -*-
# pylint: disable=wrong-import-order
# Copyright 2017 IBM RESEARCH. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
"""Main QISKit public functionality."""
# First, check for required Python and API version
from . import _util
from ._qiskiterror import QISKitError
from ._classicalregister import ClassicalRegister
from ._quantumregister import QuantumRegister
from ._quantumcircuit import QuantumCircuit
from ._gate import Gate
from ._compositegate import CompositeGate
from ._instruction import Instruction
from ._instructionset import InstructionSet
from ._reset import Reset
from ._measure import Measure
# The qiskit.extensions.x imports needs to be placed here due to the
# mechanism for adding gates dynamically.
import qiskit.extensions.standard
import qiskit.extensions.quantum_initializer
from ._jobprocessor import JobProcessor
from ._quantumjob import QuantumJob
from ._quantumprogram import QuantumProgram
from ._result import Result
# Import the wrapper, to make it available when doing "import qiskit".
from . import wrapper
__version__ = '0.5.0'
| apache-2.0 |
flavour/rgims_as_diff | modules/test_utils/Web2pyNosePlugin.py | 44 | 3813 |
import nose
import re
from itertools import imap
import unittest
class Web2pyNosePlugin(nose.plugins.base.Plugin):
# see: http://somethingaboutorange.com/mrl/projects/nose/0.11.1/plugins/writing.html
"""This plugin is designed to give the web2py environment to the tests.
"""
score = 0
# always enable as this plugin can only
# be selected by running this script
enabled = True
def __init__(
self,
application_name,
environment,
directory_pattern,
test_folders
):
super(Web2pyNosePlugin, self).__init__()
self.application_name = application_name
self.environment = environment
self.directory_pattern = directory_pattern
self.test_folders = test_folders
def options(self, parser, env):
"""Register command line options"""
pass
def wantDirectory(self, dirname):
return bool(re.search(self.directory_pattern, dirname))
def wantFile(self, file_name):
print file_name
return file_name.endswith(".py") and any(
imap(file_name.__contains__, self.test_folders)
)
def wantModule(self, module):
return False
def loadTestsFromName(self, file_name, discovered):
"""Sets up the unit-testing environment.
This involves loading modules as if by web2py.
Also we must have a test database.
If testing controllers, tests need to set up the request themselves.
"""
if file_name.endswith(".py"):
# Is it possible that the module could load
# other code that is using the original db?
test_globals = self.environment
module_globals = dict(self.environment)
# execfile is used because it doesn't create a module
# or load the module from sys.modules if it exists.
execfile(file_name, module_globals)
import inspect
# we have to return something, otherwise nose
# will let others have a go, and they won't pass
# in the web2py environment, so we'll get errors
tests = []
for name, thing in module_globals.iteritems():
if (
# don't bother with globally imported things
name not in test_globals \
# unless they have been overridden
or test_globals[name] is not thing
):
if (
isinstance(thing, type)
and issubclass(thing, unittest.TestCase)
):
# look for test methods
for member_name in dir(thing):
if member_name.startswith("test"):
if callable(getattr(thing, member_name)):
tests.append(thing(member_name))
elif (
name.startswith("test")
or name.startswith("Test")
):
if inspect.isfunction(thing):
function = thing
function_name = name
# things coming from execfile have no module
#print file_name, function_name, function.__module__
if function.__module__ in ("__main__", None):
tests.append(
nose.case.FunctionTestCase(function)
)
return tests
else:
return []
| mit |
openqt/algorithms | leetcode/python/lc712-minimum-ascii-delete-sum-for-two-strings.py | 1 | 1651 | # coding=utf-8
import unittest
"""712. Minimum ASCII Delete Sum for Two Strings
https://leetcode.com/problems/minimum-ascii-delete-sum-for-two-strings/description/
Given two strings `s1, s2`, find the lowest ASCII sum of deleted characters to
make two strings equal.
**Example 1:**
**Input:** s1 = "sea", s2 = "eat"
**Output:** 231
**Explanation:** Deleting "s" from "sea" adds the ASCII value of "s" (115) to the sum.
Deleting "t" from "eat" adds 116 to the sum.
At the end, both strings are equal, and 115 + 116 = 231 is the minimum sum possible to achieve this.
**Example 2:**
**Input:** s1 = "delete", s2 = "leet"
**Output:** 403
**Explanation:** Deleting "dee" from "delete" to turn the string into "let",
adds 100[d]+101[e]+101[e] to the sum. Deleting "e" from "leet" adds 101[e] to the sum.
At the end, both strings are equal to "let", and the answer is 100+101+101+101 = 403.
If instead we turned both strings into "lee" or "eet", we would get answers of 433 or 417, which are higher.
**Note:**
* `0 < s1.length, s2.length <= 1000`.
* All elements of each string will have an ASCII value in `[97, 122]`.
Similar Questions:
Edit Distance (edit-distance)
Longest Increasing Subsequence (longest-increasing-subsequence)
Delete Operation for Two Strings (delete-operation-for-two-strings)
"""
class Solution(object):
def minimumDeleteSum(self, s1, s2):
"""
:type s1: str
:type s2: str
:rtype: int
"""
def test(self):
pass
if __name__ == "__main__":
unittest.main()
| gpl-3.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.