repo_name stringlengths 6 100 | path stringlengths 4 294 | copies stringlengths 1 5 | size stringlengths 4 6 | content stringlengths 606 896k | license stringclasses 15
values |
|---|---|---|---|---|---|
lmthang/bivec | scripts/visual.py | 2 | 3018 | #!/usr/bin/env python
# Author: Thang Luong <luong.m.thang@gmail.com>, created on Wed Jun 3 01:22:18 MDT 2015
"""
Module docstrings.
"""
usage = 'USAGE DESCRIPTION.'
### Module imports ###
import sys
import os
import argparse # option parsing
import re # regular expression
import codecs
from tsne import bh_sne
import numpy as np
#sys.path.append(os.environ['HOME'] + '/lib/') # add our own libraries
### Global variables ###
### Class declarations ###
### Function declarations ###
def process_command_line():
"""
Return a 1-tuple: (args list).
`argv` is a list of arguments, or `None` for ``sys.argv[1:]``.
"""
parser = argparse.ArgumentParser(description=usage) # add description
# positional arguments
parser.add_argument('in_file', metavar='in_file', type=str, help='input file')
parser.add_argument('out_file', metavar='out_file', type=str, help='output file')
# optional arguments
parser.add_argument('-o', '--option', dest='opt', type=int, default=0, help='option (default=0)')
args = parser.parse_args()
return args
def check_dir(out_file):
dir_name = os.path.dirname(out_file)
if dir_name != '' and os.path.exists(dir_name) == False:
sys.stderr.write('! Directory %s doesn\'t exist, creating ...\n' % dir_name)
os.makedirs(dir_name)
def clean_line(line):
"""
Strip leading and trailing spaces
"""
line = re.sub('(^\s+|\s$)', '', line);
return line
def process_files(in_file, out_file):
"""
Read data from in_file, and output to out_file
"""
sys.stderr.write('# in_file = %s, out_file = %s\n' % (in_file, out_file))
# input
sys.stderr.write('# Input from %s.\n' % (in_file))
inf = codecs.open(in_file, 'r', 'utf-8')
# output
sys.stderr.write('Output to %s\n' % out_file)
check_dir(out_file)
ouf = codecs.open(out_file, 'w', 'utf-8')
line_id = 0
words = []
embs = []
num_dim = -1
all_lines = inf.readlines()
num_words = len(all_lines)
sys.stderr.write('# Processing file %s ...\n' % (in_file))
sys.stderr.write('# num words = %d\n' % (num_words))
for line in all_lines:
line = clean_line(line)
tokens = re.split('\s+', line)
word = tokens[0]
if line_id==0:
num_dim = len(tokens)-1
sys.stderr.write('# num dims = %d\n' % (num_dim))
X = np.zeros((num_words, num_dim))
emb = np.array(tokens[1:], dtype='|S4')
emb = emb.astype(np.float)
X[line_id, :] = emb
line_id = line_id + 1
if (line_id % 10000 == 0):
sys.stderr.write(' (%d) ' % line_id)
sys.stderr.write('Done! Num lines = %d\n' % line_id)
X_2d = bh_sne(X)
for ii in xrange(num_words):
ouf.write('%f %f\n' % (X_2d[ii, 0], X_2d[ii, 1]))
inf.close()
ouf.close()
if __name__ == '__main__':
args = process_command_line()
process_files(args.in_file, args.out_file)
# if in_file == '':
# sys.stderr.write('# Input from stdin.\n')
# inf = sys.stdin
# else:
# if out_file == '':
# sys.stderr.write('# Output to stdout.\n')
# ouf = sys.stdout
# else:
| apache-2.0 |
jhawkesworth/ansible | test/units/modules/network/f5/test_bigip_appsvcs_extension.py | 17 | 3140 | # -*- coding: utf-8 -*-
#
# Copyright: (c) 2018, F5 Networks Inc.
# GNU General Public License v3.0 (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
import json
import pytest
import sys
if sys.version_info < (2, 7):
pytestmark = pytest.mark.skip("F5 Ansible modules require Python >= 2.7")
from ansible.module_utils.basic import AnsibleModule
try:
from library.modules.bigip_appsvcs_extension import ApiParameters
from library.modules.bigip_appsvcs_extension import ModuleParameters
from library.modules.bigip_appsvcs_extension import ModuleManager
from library.modules.bigip_appsvcs_extension import ArgumentSpec
# In Ansible 2.8, Ansible changed import paths.
from test.units.compat import unittest
from test.units.compat.mock import Mock
from test.units.compat.mock import patch
from test.units.modules.utils import set_module_args
except ImportError:
from ansible.modules.network.f5.bigip_appsvcs_extension import ApiParameters
from ansible.modules.network.f5.bigip_appsvcs_extension import ModuleParameters
from ansible.modules.network.f5.bigip_appsvcs_extension import ModuleManager
from ansible.modules.network.f5.bigip_appsvcs_extension import ArgumentSpec
# Ansible 2.8 imports
from units.compat import unittest
from units.compat.mock import Mock
from units.compat.mock import patch
from units.modules.utils import set_module_args
fixture_path = os.path.join(os.path.dirname(__file__), 'fixtures')
fixture_data = {}
def load_fixture(name):
path = os.path.join(fixture_path, name)
if path in fixture_data:
return fixture_data[path]
with open(path) as f:
data = f.read()
try:
data = json.loads(data)
except Exception:
pass
fixture_data[path] = data
return data
class TestParameters(unittest.TestCase):
def test_module_parameters(self):
args = dict(
content='{ "foo": "bar" }',
force=True,
targets=['T1', 'T2']
)
p = ModuleParameters(params=args)
assert 'foo' in p.content
assert p.force is True
assert p.targets == ['T1', 'T2']
class TestManager(unittest.TestCase):
def setUp(self):
self.spec = ArgumentSpec()
def test_create(self, *args):
set_module_args(dict(
content='{ "foo": "bar" }',
provider=dict(
server='localhost',
password='password',
user='admin'
)
))
module = AnsibleModule(
argument_spec=self.spec.argument_spec,
supports_check_mode=self.spec.supports_check_mode,
required_if=self.spec.required_if
)
mm = ModuleManager(module=module)
# Override methods to force specific logic in the module to happen
mm.exists = Mock(return_value=False)
mm.upsert_on_device = Mock(return_value=True)
results = mm.exec_module()
assert results['changed'] is True
| gpl-3.0 |
chugunovyar/factoryForBuild | env/lib/python2.7/site-packages/tornado/log.py | 82 | 9819 | #!/usr/bin/env python
#
# Copyright 2012 Facebook
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Logging support for Tornado.
Tornado uses three logger streams:
* ``tornado.access``: Per-request logging for Tornado's HTTP servers (and
potentially other servers in the future)
* ``tornado.application``: Logging of errors from application code (i.e.
uncaught exceptions from callbacks)
* ``tornado.general``: General-purpose logging, including any errors
or warnings from Tornado itself.
These streams may be configured independently using the standard library's
`logging` module. For example, you may wish to send ``tornado.access`` logs
to a separate file for analysis.
"""
from __future__ import absolute_import, division, print_function, with_statement
import logging
import logging.handlers
import sys
from tornado.escape import _unicode
from tornado.util import unicode_type, basestring_type
try:
import curses
except ImportError:
curses = None
# Logger objects for internal tornado use
access_log = logging.getLogger("tornado.access")
app_log = logging.getLogger("tornado.application")
gen_log = logging.getLogger("tornado.general")
def _stderr_supports_color():
color = False
if curses and hasattr(sys.stderr, 'isatty') and sys.stderr.isatty():
try:
curses.setupterm()
if curses.tigetnum("colors") > 0:
color = True
except Exception:
pass
return color
def _safe_unicode(s):
try:
return _unicode(s)
except UnicodeDecodeError:
return repr(s)
class LogFormatter(logging.Formatter):
"""Log formatter used in Tornado.
Key features of this formatter are:
* Color support when logging to a terminal that supports it.
* Timestamps on every log line.
* Robust against str/bytes encoding problems.
This formatter is enabled automatically by
`tornado.options.parse_command_line` (unless ``--logging=none`` is
used).
"""
DEFAULT_FORMAT = '%(color)s[%(levelname)1.1s %(asctime)s %(module)s:%(lineno)d]%(end_color)s %(message)s'
DEFAULT_DATE_FORMAT = '%y%m%d %H:%M:%S'
DEFAULT_COLORS = {
logging.DEBUG: 4, # Blue
logging.INFO: 2, # Green
logging.WARNING: 3, # Yellow
logging.ERROR: 1, # Red
}
def __init__(self, color=True, fmt=DEFAULT_FORMAT,
datefmt=DEFAULT_DATE_FORMAT, colors=DEFAULT_COLORS):
r"""
:arg bool color: Enables color support.
:arg string fmt: Log message format.
It will be applied to the attributes dict of log records. The
text between ``%(color)s`` and ``%(end_color)s`` will be colored
depending on the level if color support is on.
:arg dict colors: color mappings from logging level to terminal color
code
:arg string datefmt: Datetime format.
Used for formatting ``(asctime)`` placeholder in ``prefix_fmt``.
.. versionchanged:: 3.2
Added ``fmt`` and ``datefmt`` arguments.
"""
logging.Formatter.__init__(self, datefmt=datefmt)
self._fmt = fmt
self._colors = {}
if color and _stderr_supports_color():
# The curses module has some str/bytes confusion in
# python3. Until version 3.2.3, most methods return
# bytes, but only accept strings. In addition, we want to
# output these strings with the logging module, which
# works with unicode strings. The explicit calls to
# unicode() below are harmless in python2 but will do the
# right conversion in python 3.
fg_color = (curses.tigetstr("setaf") or
curses.tigetstr("setf") or "")
if (3, 0) < sys.version_info < (3, 2, 3):
fg_color = unicode_type(fg_color, "ascii")
for levelno, code in colors.items():
self._colors[levelno] = unicode_type(curses.tparm(fg_color, code), "ascii")
self._normal = unicode_type(curses.tigetstr("sgr0"), "ascii")
else:
self._normal = ''
def format(self, record):
try:
message = record.getMessage()
assert isinstance(message, basestring_type) # guaranteed by logging
# Encoding notes: The logging module prefers to work with character
# strings, but only enforces that log messages are instances of
# basestring. In python 2, non-ascii bytestrings will make
# their way through the logging framework until they blow up with
# an unhelpful decoding error (with this formatter it happens
# when we attach the prefix, but there are other opportunities for
# exceptions further along in the framework).
#
# If a byte string makes it this far, convert it to unicode to
# ensure it will make it out to the logs. Use repr() as a fallback
# to ensure that all byte strings can be converted successfully,
# but don't do it by default so we don't add extra quotes to ascii
# bytestrings. This is a bit of a hacky place to do this, but
# it's worth it since the encoding errors that would otherwise
# result are so useless (and tornado is fond of using utf8-encoded
# byte strings whereever possible).
record.message = _safe_unicode(message)
except Exception as e:
record.message = "Bad message (%r): %r" % (e, record.__dict__)
record.asctime = self.formatTime(record, self.datefmt)
if record.levelno in self._colors:
record.color = self._colors[record.levelno]
record.end_color = self._normal
else:
record.color = record.end_color = ''
formatted = self._fmt % record.__dict__
if record.exc_info:
if not record.exc_text:
record.exc_text = self.formatException(record.exc_info)
if record.exc_text:
# exc_text contains multiple lines. We need to _safe_unicode
# each line separately so that non-utf8 bytes don't cause
# all the newlines to turn into '\n'.
lines = [formatted.rstrip()]
lines.extend(_safe_unicode(ln) for ln in record.exc_text.split('\n'))
formatted = '\n'.join(lines)
return formatted.replace("\n", "\n ")
def enable_pretty_logging(options=None, logger=None):
"""Turns on formatted logging output as configured.
This is called automatically by `tornado.options.parse_command_line`
and `tornado.options.parse_config_file`.
"""
if options is None:
from tornado.options import options
if options.logging is None or options.logging.lower() == 'none':
return
if logger is None:
logger = logging.getLogger()
logger.setLevel(getattr(logging, options.logging.upper()))
if options.log_file_prefix:
channel = logging.handlers.RotatingFileHandler(
filename=options.log_file_prefix,
maxBytes=options.log_file_max_size,
backupCount=options.log_file_num_backups)
channel.setFormatter(LogFormatter(color=False))
logger.addHandler(channel)
if (options.log_to_stderr or
(options.log_to_stderr is None and not logger.handlers)):
# Set up color if we are in a tty and curses is installed
channel = logging.StreamHandler()
channel.setFormatter(LogFormatter())
logger.addHandler(channel)
def define_logging_options(options=None):
"""Add logging-related flags to ``options``.
These options are present automatically on the default options instance;
this method is only necessary if you have created your own `.OptionParser`.
.. versionadded:: 4.2
This function existed in prior versions but was broken and undocumented until 4.2.
"""
if options is None:
# late import to prevent cycle
from tornado.options import options
options.define("logging", default="info",
help=("Set the Python log level. If 'none', tornado won't touch the "
"logging configuration."),
metavar="debug|info|warning|error|none")
options.define("log_to_stderr", type=bool, default=None,
help=("Send log output to stderr (colorized if possible). "
"By default use stderr if --log_file_prefix is not set and "
"no other logging is configured."))
options.define("log_file_prefix", type=str, default=None, metavar="PATH",
help=("Path prefix for log files. "
"Note that if you are running multiple tornado processes, "
"log_file_prefix must be different for each of them (e.g. "
"include the port number)"))
options.define("log_file_max_size", type=int, default=100 * 1000 * 1000,
help="max size of log files before rollover")
options.define("log_file_num_backups", type=int, default=10,
help="number of log files to keep")
options.add_parse_callback(lambda: enable_pretty_logging(options))
| gpl-3.0 |
Thraxis/pymedusa | lib/unidecode/x08e.py | 252 | 4659 | data = (
'Chu ', # 0x00
'Jing ', # 0x01
'Nie ', # 0x02
'Xiao ', # 0x03
'Bo ', # 0x04
'Chi ', # 0x05
'Qun ', # 0x06
'Mou ', # 0x07
'Shu ', # 0x08
'Lang ', # 0x09
'Yong ', # 0x0a
'Jiao ', # 0x0b
'Chou ', # 0x0c
'Qiao ', # 0x0d
'[?] ', # 0x0e
'Ta ', # 0x0f
'Jian ', # 0x10
'Qi ', # 0x11
'Wo ', # 0x12
'Wei ', # 0x13
'Zhuo ', # 0x14
'Jie ', # 0x15
'Ji ', # 0x16
'Nie ', # 0x17
'Ju ', # 0x18
'Ju ', # 0x19
'Lun ', # 0x1a
'Lu ', # 0x1b
'Leng ', # 0x1c
'Huai ', # 0x1d
'Ju ', # 0x1e
'Chi ', # 0x1f
'Wan ', # 0x20
'Quan ', # 0x21
'Ti ', # 0x22
'Bo ', # 0x23
'Zu ', # 0x24
'Qie ', # 0x25
'Ji ', # 0x26
'Cu ', # 0x27
'Zong ', # 0x28
'Cai ', # 0x29
'Zong ', # 0x2a
'Peng ', # 0x2b
'Zhi ', # 0x2c
'Zheng ', # 0x2d
'Dian ', # 0x2e
'Zhi ', # 0x2f
'Yu ', # 0x30
'Duo ', # 0x31
'Dun ', # 0x32
'Chun ', # 0x33
'Yong ', # 0x34
'Zhong ', # 0x35
'Di ', # 0x36
'Zhe ', # 0x37
'Chen ', # 0x38
'Chuai ', # 0x39
'Jian ', # 0x3a
'Gua ', # 0x3b
'Tang ', # 0x3c
'Ju ', # 0x3d
'Fu ', # 0x3e
'Zu ', # 0x3f
'Die ', # 0x40
'Pian ', # 0x41
'Rou ', # 0x42
'Nuo ', # 0x43
'Ti ', # 0x44
'Cha ', # 0x45
'Tui ', # 0x46
'Jian ', # 0x47
'Dao ', # 0x48
'Cuo ', # 0x49
'Xi ', # 0x4a
'Ta ', # 0x4b
'Qiang ', # 0x4c
'Zhan ', # 0x4d
'Dian ', # 0x4e
'Ti ', # 0x4f
'Ji ', # 0x50
'Nie ', # 0x51
'Man ', # 0x52
'Liu ', # 0x53
'Zhan ', # 0x54
'Bi ', # 0x55
'Chong ', # 0x56
'Lu ', # 0x57
'Liao ', # 0x58
'Cu ', # 0x59
'Tang ', # 0x5a
'Dai ', # 0x5b
'Suo ', # 0x5c
'Xi ', # 0x5d
'Kui ', # 0x5e
'Ji ', # 0x5f
'Zhi ', # 0x60
'Qiang ', # 0x61
'Di ', # 0x62
'Man ', # 0x63
'Zong ', # 0x64
'Lian ', # 0x65
'Beng ', # 0x66
'Zao ', # 0x67
'Nian ', # 0x68
'Bie ', # 0x69
'Tui ', # 0x6a
'Ju ', # 0x6b
'Deng ', # 0x6c
'Ceng ', # 0x6d
'Xian ', # 0x6e
'Fan ', # 0x6f
'Chu ', # 0x70
'Zhong ', # 0x71
'Dun ', # 0x72
'Bo ', # 0x73
'Cu ', # 0x74
'Zu ', # 0x75
'Jue ', # 0x76
'Jue ', # 0x77
'Lin ', # 0x78
'Ta ', # 0x79
'Qiao ', # 0x7a
'Qiao ', # 0x7b
'Pu ', # 0x7c
'Liao ', # 0x7d
'Dun ', # 0x7e
'Cuan ', # 0x7f
'Kuang ', # 0x80
'Zao ', # 0x81
'Ta ', # 0x82
'Bi ', # 0x83
'Bi ', # 0x84
'Zhu ', # 0x85
'Ju ', # 0x86
'Chu ', # 0x87
'Qiao ', # 0x88
'Dun ', # 0x89
'Chou ', # 0x8a
'Ji ', # 0x8b
'Wu ', # 0x8c
'Yue ', # 0x8d
'Nian ', # 0x8e
'Lin ', # 0x8f
'Lie ', # 0x90
'Zhi ', # 0x91
'Li ', # 0x92
'Zhi ', # 0x93
'Chan ', # 0x94
'Chu ', # 0x95
'Duan ', # 0x96
'Wei ', # 0x97
'Long ', # 0x98
'Lin ', # 0x99
'Xian ', # 0x9a
'Wei ', # 0x9b
'Zuan ', # 0x9c
'Lan ', # 0x9d
'Xie ', # 0x9e
'Rang ', # 0x9f
'Xie ', # 0xa0
'Nie ', # 0xa1
'Ta ', # 0xa2
'Qu ', # 0xa3
'Jie ', # 0xa4
'Cuan ', # 0xa5
'Zuan ', # 0xa6
'Xi ', # 0xa7
'Kui ', # 0xa8
'Jue ', # 0xa9
'Lin ', # 0xaa
'Shen ', # 0xab
'Gong ', # 0xac
'Dan ', # 0xad
'Segare ', # 0xae
'Qu ', # 0xaf
'Ti ', # 0xb0
'Duo ', # 0xb1
'Duo ', # 0xb2
'Gong ', # 0xb3
'Lang ', # 0xb4
'Nerau ', # 0xb5
'Luo ', # 0xb6
'Ai ', # 0xb7
'Ji ', # 0xb8
'Ju ', # 0xb9
'Tang ', # 0xba
'Utsuke ', # 0xbb
'[?] ', # 0xbc
'Yan ', # 0xbd
'Shitsuke ', # 0xbe
'Kang ', # 0xbf
'Qu ', # 0xc0
'Lou ', # 0xc1
'Lao ', # 0xc2
'Tuo ', # 0xc3
'Zhi ', # 0xc4
'Yagate ', # 0xc5
'Ti ', # 0xc6
'Dao ', # 0xc7
'Yagate ', # 0xc8
'Yu ', # 0xc9
'Che ', # 0xca
'Ya ', # 0xcb
'Gui ', # 0xcc
'Jun ', # 0xcd
'Wei ', # 0xce
'Yue ', # 0xcf
'Xin ', # 0xd0
'Di ', # 0xd1
'Xuan ', # 0xd2
'Fan ', # 0xd3
'Ren ', # 0xd4
'Shan ', # 0xd5
'Qiang ', # 0xd6
'Shu ', # 0xd7
'Tun ', # 0xd8
'Chen ', # 0xd9
'Dai ', # 0xda
'E ', # 0xdb
'Na ', # 0xdc
'Qi ', # 0xdd
'Mao ', # 0xde
'Ruan ', # 0xdf
'Ren ', # 0xe0
'Fan ', # 0xe1
'Zhuan ', # 0xe2
'Hong ', # 0xe3
'Hu ', # 0xe4
'Qu ', # 0xe5
'Huang ', # 0xe6
'Di ', # 0xe7
'Ling ', # 0xe8
'Dai ', # 0xe9
'Ao ', # 0xea
'Zhen ', # 0xeb
'Fan ', # 0xec
'Kuang ', # 0xed
'Ang ', # 0xee
'Peng ', # 0xef
'Bei ', # 0xf0
'Gu ', # 0xf1
'Ku ', # 0xf2
'Pao ', # 0xf3
'Zhu ', # 0xf4
'Rong ', # 0xf5
'E ', # 0xf6
'Ba ', # 0xf7
'Zhou ', # 0xf8
'Zhi ', # 0xf9
'Yao ', # 0xfa
'Ke ', # 0xfb
'Yi ', # 0xfc
'Qing ', # 0xfd
'Shi ', # 0xfe
'Ping ', # 0xff
)
| gpl-3.0 |
zijistark/ck2utils | esc/check_titles.py | 1 | 8077 | #!/usr/bin/env python3
from collections import defaultdict
import csv
from operator import attrgetter
import pathlib
import pprint
import re
import sys
import shutil
import tempfile
import ck2parser
from ck2parser import (rootpath, vanilladir, is_codename, Obj, csv_rows,
get_province_id_name_map, SimpleParser)
from print_time import print_time
VANILLA_HISTORY_WARN = True
results = {True: defaultdict(list),
False: defaultdict(list)}
def check_title(parser, v, path, titles, lhs=False, line=None):
if isinstance(v, str):
v_str = v
else:
v_str = v.val
if is_codename(v_str) and v_str not in titles:
if line is None:
line = '<file>'
else:
v_lines = line.inline_str(parser)[0].splitlines()
line = next((l for l in v_lines if not re.match(r'\s*#', l)),
v_lines[0])
results[lhs][path].append(line)
return False
return True
def check_titles(parser, path, titles):
def recurse(tree):
if tree.has_pairs:
for p in tree:
n, v = p
v_is_obj = isinstance(v, Obj)
check_title(parser, n, path, titles, v_is_obj, p)
if v_is_obj:
recurse(v)
else:
check_title(parser, v, path, titles, line=p)
else:
for v in tree:
check_title(parser, v, path, titles, line=v)
try:
recurse(parser.parse_file(path))
except:
print(path)
raise
def check_regions(parser, titles, duchies_de_jure):
bad_titles = []
missing_duchies = list(duchies_de_jure)
region_duchies = defaultdict(list)
path, tree = next(parser.parse_files('map/geographical_region.txt'))
for n, v in tree:
world = n.val.startswith('world_')
for n2, v2 in v:
if n2.val == 'regions':
for v3 in v2:
for duchy in region_duchies.get(v3.val, []):
try:
missing_duchies.remove(duchy)
except ValueError:
pass
region_duchies[n.val].append(duchy)
elif n2.val == 'duchies':
for v3 in v2:
if is_codename(v3.val):
check_title(parser, v3, path, titles, line=v3)
region_duchies[n.val].append(v3.val)
if v3.val in titles and v3.val not in duchies_de_jure:
bad_titles.append(v3.val)
elif world and v3.val in missing_duchies:
missing_duchies.remove(v3.val)
return bad_titles, missing_duchies
def check_province_history(parser, titles):
id_name_map = get_province_id_name_map(parser)
for path in parser.files('history/provinces/*.txt'):
number, name = path.stem.split(' - ')
if id_name_map.get(int(number)) == name:
check_titles(parser, path, titles)
def process_landed_titles(parser):
titles_list = []
title_liege_map = {}
title_vassals_map = defaultdict(set)
misogyny = []
for path, tree in parser.parse_files('common/landed_titles/*.txt'):
try:
dfs = list(reversed(tree))
while dfs:
n, v = dfs.pop()
if is_codename(n.val):
if n.val not in titles_list:
titles_list.append(n.val)
if v.get('title') and not v.get('title_female'):
misogyny.append(n.val)
for n2, v2 in v:
if is_codename(n2.val):
title_liege_map[n2.val] = n.val
title_vassals_map[n.val].add(n2.val)
dfs.extend(reversed(v))
except:
print(path)
raise
return titles_list, title_liege_map, title_vassals_map, misogyny
@print_time
def main():
# import pdb
parser = SimpleParser()
parser.moddirs = [rootpath / 'SWMH-BETA/SWMH']
# parser.moddirs.extend([rootpath / 'EMF/EMF', rootpath / 'EMF/EMF+SWMH'])
titles_list, title_liege_map, title_vassals_map, misogyny = (
process_landed_titles(parser))
titles = set(titles_list)
check_province_history(parser, titles)
start_date = parser.parse_file('common/defines.txt')['start_date'].val
for path, tree in parser.parse_files('history/titles/*.txt',
memcache=True):
if tree.contents:
title = path.stem
good = check_title(parser, title, path, titles)
if (VANILLA_HISTORY_WARN and not good and
not any(d in path.parents for d in parser.moddirs)):
# newpath = parser.moddirs[0] / 'history/titles' / path.name
# newpath.open('w').close()
print('Should override {} with blank file'.format(
'<vanilla>' / path.relative_to(vanilladir)))
else:
check_titles(parser, path, titles)
# update de jure changed before start_date
for n, v in sorted(tree, key=attrgetter('key.val')):
if n.val > start_date:
break
for n2, v2 in v:
if n2.val == 'de_jure_liege':
old_liege = title_liege_map.get(title)
if old_liege:
title_vassals_map[old_liege].discard(title)
title_liege_map[title] = v2.val
title_vassals_map[v2.val].add(title)
parser.flush(path)
duchies_de_jure = [t for t, v in title_vassals_map.items()
if t[0] == 'd' and v]
bad_region_titles, missing_duchies = check_regions(parser, titles,
duchies_de_jure)
for _ in parser.parse_files('history/characters/*.txt'):
pass # just parse it to see if it parses
globs = [
'events/*.txt',
'decisions/*.txt',
'common/laws/*.txt',
'common/objectives/*.txt',
'common/minor_titles/*.txt',
'common/job_titles/*.txt',
'common/job_actions/*.txt',
'common/religious_titles/*.txt',
'common/cb_types/*.txt',
'common/scripted_triggers/*.txt',
'common/scripted_effects/*.txt',
'common/achievements.txt'
]
for glob in globs:
for path in parser.files(glob):
check_titles(parser, path, titles)
with (rootpath / 'check_titles.txt').open('w') as fp:
if bad_region_titles:
print('Titular titles in regions:\n\t', end='', file=fp)
print(*bad_region_titles, sep=' ', file=fp)
if missing_duchies:
print('De jure duchies not found in "world_" regions:\n\t', end='',
file=fp)
print(*missing_duchies, sep=' ', file=fp)
for lhs in [True, False]:
if results[lhs]:
if lhs:
print('Undefined references as SCOPE:', file=fp)
else:
print('Undefined references:', file=fp)
for path, titles in sorted(results[lhs].items()):
if titles:
for modpath in parser.moddirs:
if modpath in path.parents:
rel_path = ('<{}>'.format(modpath.name) /
path.relative_to(modpath))
break
else:
rel_path = '<vanilla>' / path.relative_to(vanilladir)
print('\t' + str(rel_path), *titles, sep='\n\t\t', file=fp)
if misogyny:
print('Title defines title but not title_female:\n\t', end='',
file=fp)
print(*misogyny, sep=' ', file=fp)
if __name__ == '__main__':
main()
| gpl-2.0 |
pmishra02138/conference-app-scalable | Lesson_4/00_Conference_Central/utils.py | 384 | 1576 | import json
import os
import time
import uuid
from google.appengine.api import urlfetch
from models import Profile
def getUserId(user, id_type="email"):
if id_type == "email":
return user.email()
if id_type == "oauth":
"""A workaround implementation for getting userid."""
auth = os.getenv('HTTP_AUTHORIZATION')
bearer, token = auth.split()
token_type = 'id_token'
if 'OAUTH_USER_ID' in os.environ:
token_type = 'access_token'
url = ('https://www.googleapis.com/oauth2/v1/tokeninfo?%s=%s'
% (token_type, token))
user = {}
wait = 1
for i in range(3):
resp = urlfetch.fetch(url)
if resp.status_code == 200:
user = json.loads(resp.content)
break
elif resp.status_code == 400 and 'invalid_token' in resp.content:
url = ('https://www.googleapis.com/oauth2/v1/tokeninfo?%s=%s'
% ('access_token', token))
else:
time.sleep(wait)
wait = wait + i
return user.get('user_id', '')
if id_type == "custom":
# implement your own user_id creation and getting algorythm
# this is just a sample that queries datastore for an existing profile
# and generates an id if profile does not exist for an email
profile = Conference.query(Conference.mainEmail == user.email())
if profile:
return profile.id()
else:
return str(uuid.uuid1().get_hex())
| gpl-3.0 |
OriHoch/Open-Knesset | lobbyists/migrations/0002_auto__add_lobbyistcorporationdata__add_lobbyistcorporation__add_field_.py | 14 | 22496 | # -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'LobbyistCorporationData'
db.create_table(u'lobbyists_lobbyistcorporationdata', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('corporation', self.gf('django.db.models.fields.related.ForeignKey')(blank=True, related_name='data', null=True, to=orm['lobbyists.LobbyistCorporation'])),
('scrape_time', self.gf('django.db.models.fields.DateTimeField')(null=True, blank=True)),
('name', self.gf('django.db.models.fields.CharField')(max_length=100, null=True, blank=True)),
('source_id', self.gf('django.db.models.fields.CharField')(max_length=20, null=True, blank=True)),
))
db.send_create_signal(u'lobbyists', ['LobbyistCorporationData'])
# Adding M2M table for field lobbyists on 'LobbyistCorporationData'
m2m_table_name = db.shorten_name(u'lobbyists_lobbyistcorporationdata_lobbyists')
db.create_table(m2m_table_name, (
('id', models.AutoField(verbose_name='ID', primary_key=True, auto_created=True)),
('lobbyistcorporationdata', models.ForeignKey(orm[u'lobbyists.lobbyistcorporationdata'], null=False)),
('lobbyist', models.ForeignKey(orm[u'lobbyists.lobbyist'], null=False))
))
db.create_unique(m2m_table_name, ['lobbyistcorporationdata_id', 'lobbyist_id'])
# Adding model 'LobbyistCorporation'
db.create_table(u'lobbyists_lobbyistcorporation', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('name', self.gf('django.db.models.fields.CharField')(max_length=100, null=True, blank=True)),
('source_id', self.gf('django.db.models.fields.CharField')(max_length=20, null=True, blank=True)),
))
db.send_create_signal(u'lobbyists', ['LobbyistCorporation'])
# Adding field 'LobbyistData.source_id'
db.add_column(u'lobbyists_lobbyistdata', 'source_id',
self.gf('django.db.models.fields.CharField')(max_length=20, null=True, blank=True),
keep_default=False)
# Adding field 'LobbyistRepresent.name'
db.add_column(u'lobbyists_lobbyistrepresent', 'name',
self.gf('django.db.models.fields.CharField')(max_length=100, null=True, blank=True),
keep_default=False)
# Adding field 'LobbyistRepresentData.source_id'
db.add_column(u'lobbyists_lobbyistrepresentdata', 'source_id',
self.gf('django.db.models.fields.CharField')(max_length=20, null=True, blank=True),
keep_default=False)
def backwards(self, orm):
# Deleting model 'LobbyistCorporationData'
db.delete_table(u'lobbyists_lobbyistcorporationdata')
# Removing M2M table for field lobbyists on 'LobbyistCorporationData'
db.delete_table(db.shorten_name(u'lobbyists_lobbyistcorporationdata_lobbyists'))
# Deleting model 'LobbyistCorporation'
db.delete_table(u'lobbyists_lobbyistcorporation')
# Deleting field 'LobbyistData.source_id'
db.delete_column(u'lobbyists_lobbyistdata', 'source_id')
# Deleting field 'LobbyistRepresent.name'
db.delete_column(u'lobbyists_lobbyistrepresent', 'name')
# Deleting field 'LobbyistRepresentData.source_id'
db.delete_column(u'lobbyists_lobbyistrepresentdata', 'source_id')
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'lobbyists.lobbyist': {
'Meta': {'object_name': 'Lobbyist'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'person': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'lobbyist'", 'null': 'True', 'to': u"orm['persons.Person']"}),
'source_id': ('django.db.models.fields.CharField', [], {'max_length': '20', 'null': 'True', 'blank': 'True'})
},
u'lobbyists.lobbyistcorporation': {
'Meta': {'object_name': 'LobbyistCorporation'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'source_id': ('django.db.models.fields.CharField', [], {'max_length': '20', 'null': 'True', 'blank': 'True'})
},
u'lobbyists.lobbyistcorporationdata': {
'Meta': {'object_name': 'LobbyistCorporationData'},
'corporation': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'data'", 'null': 'True', 'to': u"orm['lobbyists.LobbyistCorporation']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'lobbyists': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['lobbyists.Lobbyist']", 'symmetrical': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'scrape_time': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'source_id': ('django.db.models.fields.CharField', [], {'max_length': '20', 'null': 'True', 'blank': 'True'})
},
u'lobbyists.lobbyistdata': {
'Meta': {'object_name': 'LobbyistData'},
'corporation_id': ('django.db.models.fields.CharField', [], {'max_length': '20', 'null': 'True', 'blank': 'True'}),
'corporation_name': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'faction_member': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'faction_name': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'family_name': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'lobbyist': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'data'", 'null': 'True', 'to': u"orm['lobbyists.Lobbyist']"}),
'permit_type': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'profession': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'represents': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['lobbyists.LobbyistRepresent']", 'symmetrical': 'False'}),
'scrape_time': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'source_id': ('django.db.models.fields.CharField', [], {'max_length': '20', 'null': 'True', 'blank': 'True'})
},
u'lobbyists.lobbyisthistory': {
'Meta': {'object_name': 'LobbyistHistory'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'lobbyists': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'histories'", 'symmetrical': 'False', 'to': u"orm['lobbyists.Lobbyist']"}),
'scrape_time': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'})
},
u'lobbyists.lobbyistrepresent': {
'Meta': {'object_name': 'LobbyistRepresent'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'source_id': ('django.db.models.fields.CharField', [], {'max_length': '20', 'null': 'True', 'blank': 'True'})
},
u'lobbyists.lobbyistrepresentdata': {
'Meta': {'object_name': 'LobbyistRepresentData'},
'domain': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'lobbyist_represent': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'data'", 'null': 'True', 'to': u"orm['lobbyists.LobbyistRepresent']"}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'scrape_time': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'source_id': ('django.db.models.fields.CharField', [], {'max_length': '20', 'null': 'True', 'blank': 'True'}),
'type': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'})
},
u'mks.knesset': {
'Meta': {'object_name': 'Knesset'},
'end_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'number': ('django.db.models.fields.IntegerField', [], {'primary_key': 'True'}),
'start_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'})
},
u'mks.member': {
'Meta': {'ordering': "['name']", 'object_name': 'Member'},
'area_of_residence': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'average_monthly_committee_presence': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'average_weekly_presence_hours': ('django.db.models.fields.FloatField', [], {'null': 'True', 'blank': 'True'}),
'backlinks_enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'bills_stats_approved': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'bills_stats_first': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'bills_stats_pre': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'bills_stats_proposed': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'blog': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['planet.Blog']", 'unique': 'True', 'null': 'True', 'blank': 'True'}),
'current_party': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'members'", 'null': 'True', 'to': u"orm['mks.Party']"}),
'current_position': ('django.db.models.fields.PositiveIntegerField', [], {'default': '999', 'blank': 'True'}),
'current_role_descriptions': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'null': 'True', 'blank': 'True'}),
'date_of_birth': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'date_of_death': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'null': 'True', 'blank': 'True'}),
'end_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'family_status': ('django.db.models.fields.CharField', [], {'max_length': '10', 'null': 'True', 'blank': 'True'}),
'fax': ('django.db.models.fields.CharField', [], {'max_length': '20', 'null': 'True', 'blank': 'True'}),
'gender': ('django.db.models.fields.CharField', [], {'max_length': '1', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'img_url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'}),
'is_current': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'db_index': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'number_of_children': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'parties': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'all_members'", 'symmetrical': 'False', 'through': u"orm['mks.Membership']", 'to': u"orm['mks.Party']"}),
'phone': ('django.db.models.fields.CharField', [], {'max_length': '20', 'null': 'True', 'blank': 'True'}),
'place_of_birth': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'place_of_residence': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'place_of_residence_lat': ('django.db.models.fields.CharField', [], {'max_length': '16', 'null': 'True', 'blank': 'True'}),
'place_of_residence_lon': ('django.db.models.fields.CharField', [], {'max_length': '16', 'null': 'True', 'blank': 'True'}),
'residence_centrality': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'residence_economy': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'start_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']", 'null': 'True', 'blank': 'True'}),
'website': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'year_of_aliyah': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'})
},
u'mks.membership': {
'Meta': {'object_name': 'Membership'},
'end_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'member': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['mks.Member']"}),
'party': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['mks.Party']"}),
'position': ('django.db.models.fields.PositiveIntegerField', [], {'default': '999', 'blank': 'True'}),
'start_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'})
},
u'mks.party': {
'Meta': {'ordering': "('-number_of_seats',)", 'unique_together': "(('knesset', 'name'),)", 'object_name': 'Party'},
'end_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_coalition': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'knesset': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'parties'", 'null': 'True', 'to': u"orm['mks.Knesset']"}),
'logo': ('django.db.models.fields.files.ImageField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'number_of_members': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'number_of_seats': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'start_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'})
},
u'persons.person': {
'Meta': {'ordering': "('name',)", 'object_name': 'Person'},
'area_of_residence': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'date_of_birth': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'date_of_death': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'null': 'True', 'blank': 'True'}),
'family_status': ('django.db.models.fields.CharField', [], {'max_length': '10', 'null': 'True', 'blank': 'True'}),
'fax': ('django.db.models.fields.CharField', [], {'max_length': '20', 'null': 'True', 'blank': 'True'}),
'gender': ('django.db.models.fields.CharField', [], {'max_length': '1', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'img_url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'blank': 'True'}),
'mk': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'person'", 'null': 'True', 'to': u"orm['mks.Member']"}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'number_of_children': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'phone': ('django.db.models.fields.CharField', [], {'max_length': '20', 'null': 'True', 'blank': 'True'}),
'place_of_birth': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'place_of_residence': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'place_of_residence_lat': ('django.db.models.fields.CharField', [], {'max_length': '16', 'null': 'True', 'blank': 'True'}),
'place_of_residence_lon': ('django.db.models.fields.CharField', [], {'max_length': '16', 'null': 'True', 'blank': 'True'}),
'residence_centrality': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'residence_economy': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'titles': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'persons'", 'null': 'True', 'symmetrical': 'False', 'to': u"orm['persons.Title']"}),
'year_of_aliyah': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'})
},
u'persons.title': {
'Meta': {'object_name': 'Title'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64'})
},
u'planet.blog': {
'Meta': {'ordering': "('title', 'url')", 'object_name': 'Blog'},
'date_created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '255', 'blank': 'True'}),
'url': ('django.db.models.fields.URLField', [], {'unique': 'True', 'max_length': '1024', 'db_index': 'True'})
}
}
complete_apps = ['lobbyists'] | bsd-3-clause |
elucify/robotframework-selenium2library | src/ez_setup.py | 276 | 10240 | #!python
"""Bootstrap setuptools installation
If you want to use setuptools in your package's setup.py, just include this
file in the same directory with it, and add this to the top of your setup.py::
from ez_setup import use_setuptools
use_setuptools()
If you want to require a specific version of setuptools, set a download
mirror, or use an alternate download directory, you can do so by supplying
the appropriate options to ``use_setuptools()``.
This file can also be run as a script to install or upgrade setuptools.
"""
import sys
DEFAULT_VERSION = "0.6c11"
DEFAULT_URL = "http://pypi.python.org/packages/%s/s/setuptools/" % sys.version[:3]
md5_data = {
'setuptools-0.6b1-py2.3.egg': '8822caf901250d848b996b7f25c6e6ca',
'setuptools-0.6b1-py2.4.egg': 'b79a8a403e4502fbb85ee3f1941735cb',
'setuptools-0.6b2-py2.3.egg': '5657759d8a6d8fc44070a9d07272d99b',
'setuptools-0.6b2-py2.4.egg': '4996a8d169d2be661fa32a6e52e4f82a',
'setuptools-0.6b3-py2.3.egg': 'bb31c0fc7399a63579975cad9f5a0618',
'setuptools-0.6b3-py2.4.egg': '38a8c6b3d6ecd22247f179f7da669fac',
'setuptools-0.6b4-py2.3.egg': '62045a24ed4e1ebc77fe039aa4e6f7e5',
'setuptools-0.6b4-py2.4.egg': '4cb2a185d228dacffb2d17f103b3b1c4',
'setuptools-0.6c1-py2.3.egg': 'b3f2b5539d65cb7f74ad79127f1a908c',
'setuptools-0.6c1-py2.4.egg': 'b45adeda0667d2d2ffe14009364f2a4b',
'setuptools-0.6c10-py2.3.egg': 'ce1e2ab5d3a0256456d9fc13800a7090',
'setuptools-0.6c10-py2.4.egg': '57d6d9d6e9b80772c59a53a8433a5dd4',
'setuptools-0.6c10-py2.5.egg': 'de46ac8b1c97c895572e5e8596aeb8c7',
'setuptools-0.6c10-py2.6.egg': '58ea40aef06da02ce641495523a0b7f5',
'setuptools-0.6c11-py2.3.egg': '2baeac6e13d414a9d28e7ba5b5a596de',
'setuptools-0.6c11-py2.4.egg': 'bd639f9b0eac4c42497034dec2ec0c2b',
'setuptools-0.6c11-py2.5.egg': '64c94f3bf7a72a13ec83e0b24f2749b2',
'setuptools-0.6c11-py2.6.egg': 'bfa92100bd772d5a213eedd356d64086',
'setuptools-0.6c2-py2.3.egg': 'f0064bf6aa2b7d0f3ba0b43f20817c27',
'setuptools-0.6c2-py2.4.egg': '616192eec35f47e8ea16cd6a122b7277',
'setuptools-0.6c3-py2.3.egg': 'f181fa125dfe85a259c9cd6f1d7b78fa',
'setuptools-0.6c3-py2.4.egg': 'e0ed74682c998bfb73bf803a50e7b71e',
'setuptools-0.6c3-py2.5.egg': 'abef16fdd61955514841c7c6bd98965e',
'setuptools-0.6c4-py2.3.egg': 'b0b9131acab32022bfac7f44c5d7971f',
'setuptools-0.6c4-py2.4.egg': '2a1f9656d4fbf3c97bf946c0a124e6e2',
'setuptools-0.6c4-py2.5.egg': '8f5a052e32cdb9c72bcf4b5526f28afc',
'setuptools-0.6c5-py2.3.egg': 'ee9fd80965da04f2f3e6b3576e9d8167',
'setuptools-0.6c5-py2.4.egg': 'afe2adf1c01701ee841761f5bcd8aa64',
'setuptools-0.6c5-py2.5.egg': 'a8d3f61494ccaa8714dfed37bccd3d5d',
'setuptools-0.6c6-py2.3.egg': '35686b78116a668847237b69d549ec20',
'setuptools-0.6c6-py2.4.egg': '3c56af57be3225019260a644430065ab',
'setuptools-0.6c6-py2.5.egg': 'b2f8a7520709a5b34f80946de5f02f53',
'setuptools-0.6c7-py2.3.egg': '209fdf9adc3a615e5115b725658e13e2',
'setuptools-0.6c7-py2.4.egg': '5a8f954807d46a0fb67cf1f26c55a82e',
'setuptools-0.6c7-py2.5.egg': '45d2ad28f9750e7434111fde831e8372',
'setuptools-0.6c8-py2.3.egg': '50759d29b349db8cfd807ba8303f1902',
'setuptools-0.6c8-py2.4.egg': 'cba38d74f7d483c06e9daa6070cce6de',
'setuptools-0.6c8-py2.5.egg': '1721747ee329dc150590a58b3e1ac95b',
'setuptools-0.6c9-py2.3.egg': 'a83c4020414807b496e4cfbe08507c03',
'setuptools-0.6c9-py2.4.egg': '260a2be2e5388d66bdaee06abec6342a',
'setuptools-0.6c9-py2.5.egg': 'fe67c3e5a17b12c0e7c541b7ea43a8e6',
'setuptools-0.6c9-py2.6.egg': 'ca37b1ff16fa2ede6e19383e7b59245a',
}
import sys, os
try: from hashlib import md5
except ImportError: from md5 import md5
def _validate_md5(egg_name, data):
if egg_name in md5_data:
digest = md5(data).hexdigest()
if digest != md5_data[egg_name]:
print >>sys.stderr, (
"md5 validation of %s failed! (Possible download problem?)"
% egg_name
)
sys.exit(2)
return data
def use_setuptools(
version=DEFAULT_VERSION, download_base=DEFAULT_URL, to_dir=os.curdir,
download_delay=15
):
"""Automatically find/download setuptools and make it available on sys.path
`version` should be a valid setuptools version number that is available
as an egg for download under the `download_base` URL (which should end with
a '/'). `to_dir` is the directory where setuptools will be downloaded, if
it is not already available. If `download_delay` is specified, it should
be the number of seconds that will be paused before initiating a download,
should one be required. If an older version of setuptools is installed,
this routine will print a message to ``sys.stderr`` and raise SystemExit in
an attempt to abort the calling script.
"""
was_imported = 'pkg_resources' in sys.modules or 'setuptools' in sys.modules
def do_download():
egg = download_setuptools(version, download_base, to_dir, download_delay)
sys.path.insert(0, egg)
import setuptools; setuptools.bootstrap_install_from = egg
try:
import pkg_resources
except ImportError:
return do_download()
try:
pkg_resources.require("setuptools>="+version); return
except pkg_resources.VersionConflict, e:
if was_imported:
print >>sys.stderr, (
"The required version of setuptools (>=%s) is not available, and\n"
"can't be installed while this script is running. Please install\n"
" a more recent version first, using 'easy_install -U setuptools'."
"\n\n(Currently using %r)"
) % (version, e.args[0])
sys.exit(2)
except pkg_resources.DistributionNotFound:
pass
del pkg_resources, sys.modules['pkg_resources'] # reload ok
return do_download()
def download_setuptools(
version=DEFAULT_VERSION, download_base=DEFAULT_URL, to_dir=os.curdir,
delay = 15
):
"""Download setuptools from a specified location and return its filename
`version` should be a valid setuptools version number that is available
as an egg for download under the `download_base` URL (which should end
with a '/'). `to_dir` is the directory where the egg will be downloaded.
`delay` is the number of seconds to pause before an actual download attempt.
"""
import urllib2, shutil
egg_name = "setuptools-%s-py%s.egg" % (version,sys.version[:3])
url = download_base + egg_name
saveto = os.path.join(to_dir, egg_name)
src = dst = None
if not os.path.exists(saveto): # Avoid repeated downloads
try:
from distutils import log
if delay:
log.warn("""
---------------------------------------------------------------------------
This script requires setuptools version %s to run (even to display
help). I will attempt to download it for you (from
%s), but
you may need to enable firewall access for this script first.
I will start the download in %d seconds.
(Note: if this machine does not have network access, please obtain the file
%s
and place it in this directory before rerunning this script.)
---------------------------------------------------------------------------""",
version, download_base, delay, url
); from time import sleep; sleep(delay)
log.warn("Downloading %s", url)
src = urllib2.urlopen(url)
# Read/write all in one block, so we don't create a corrupt file
# if the download is interrupted.
data = _validate_md5(egg_name, src.read())
dst = open(saveto,"wb"); dst.write(data)
finally:
if src: src.close()
if dst: dst.close()
return os.path.realpath(saveto)
def main(argv, version=DEFAULT_VERSION):
"""Install or upgrade setuptools and EasyInstall"""
try:
import setuptools
except ImportError:
egg = None
try:
egg = download_setuptools(version, delay=0)
sys.path.insert(0,egg)
from setuptools.command.easy_install import main
return main(list(argv)+[egg]) # we're done here
finally:
if egg and os.path.exists(egg):
os.unlink(egg)
else:
if setuptools.__version__ == '0.0.1':
print >>sys.stderr, (
"You have an obsolete version of setuptools installed. Please\n"
"remove it from your system entirely before rerunning this script."
)
sys.exit(2)
req = "setuptools>="+version
import pkg_resources
try:
pkg_resources.require(req)
except pkg_resources.VersionConflict:
try:
from setuptools.command.easy_install import main
except ImportError:
from easy_install import main
main(list(argv)+[download_setuptools(delay=0)])
sys.exit(0) # try to force an exit
else:
if argv:
from setuptools.command.easy_install import main
main(argv)
else:
print "Setuptools version",version,"or greater has been installed."
print '(Run "ez_setup.py -U setuptools" to reinstall or upgrade.)'
def update_md5(filenames):
"""Update our built-in md5 registry"""
import re
for name in filenames:
base = os.path.basename(name)
f = open(name,'rb')
md5_data[base] = md5(f.read()).hexdigest()
f.close()
data = [" %r: %r,\n" % it for it in md5_data.items()]
data.sort()
repl = "".join(data)
import inspect
srcfile = inspect.getsourcefile(sys.modules[__name__])
f = open(srcfile, 'rb'); src = f.read(); f.close()
match = re.search("\nmd5_data = {\n([^}]+)}", src)
if not match:
print >>sys.stderr, "Internal error!"
sys.exit(2)
src = src[:match.start(1)] + repl + src[match.end(1):]
f = open(srcfile,'w')
f.write(src)
f.close()
if __name__=='__main__':
if len(sys.argv)>2 and sys.argv[1]=='--md5update':
update_md5(sys.argv[2:])
else:
main(sys.argv[1:])
| apache-2.0 |
kennethgillen/ansible | lib/ansible/galaxy/role.py | 17 | 15352 | ########################################################################
#
# (C) 2015, Brian Coca <bcoca@ansible.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
########################################################################
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import datetime
import os
import tarfile
import tempfile
import yaml
from distutils.version import LooseVersion
from shutil import rmtree
import ansible.constants as C
from ansible.errors import AnsibleError
from ansible.module_utils.urls import open_url
from ansible.playbook.role.requirement import RoleRequirement
from ansible.galaxy.api import GalaxyAPI
try:
from __main__ import display
except ImportError:
from ansible.utils.display import Display
display = Display()
class GalaxyRole(object):
SUPPORTED_SCMS = set(['git', 'hg'])
META_MAIN = os.path.join('meta', 'main.yml')
META_INSTALL = os.path.join('meta', '.galaxy_install_info')
ROLE_DIRS = ('defaults','files','handlers','meta','tasks','templates','vars','tests')
def __init__(self, galaxy, name, src=None, version=None, scm=None, path=None):
self._metadata = None
self._install_info = None
self._validate_certs = not galaxy.options.ignore_certs
display.debug('Validate TLS certificates: %s' % self._validate_certs)
self.options = galaxy.options
self.galaxy = galaxy
self.name = name
self.version = version
self.src = src or name
self.scm = scm
if path is not None:
if self.name not in path:
path = os.path.join(path, self.name)
self.path = path
else:
for role_path_dir in galaxy.roles_paths:
role_path = os.path.join(role_path_dir, self.name)
if os.path.exists(role_path):
self.path = role_path
break
else:
# use the first path by default
self.path = os.path.join(galaxy.roles_paths[0], self.name)
# create list of possible paths
self.paths = [x for x in galaxy.roles_paths]
self.paths = [os.path.join(x, self.name) for x in self.paths]
def __repr__(self):
"""
Returns "rolename (version)" if version is not null
Returns "rolename" otherwise
"""
if self.version:
return "%s (%s)" % (self.name, self.version)
else:
return self.name
def __eq__(self, other):
return self.name == other.name
@property
def metadata(self):
"""
Returns role metadata
"""
if self._metadata is None:
meta_path = os.path.join(self.path, self.META_MAIN)
if os.path.isfile(meta_path):
try:
f = open(meta_path, 'r')
self._metadata = yaml.safe_load(f)
except:
display.vvvvv("Unable to load metadata for %s" % self.name)
return False
finally:
f.close()
return self._metadata
@property
def install_info(self):
"""
Returns role install info
"""
if self._install_info is None:
info_path = os.path.join(self.path, self.META_INSTALL)
if os.path.isfile(info_path):
try:
f = open(info_path, 'r')
self._install_info = yaml.safe_load(f)
except:
display.vvvvv("Unable to load Galaxy install info for %s" % self.name)
return False
finally:
f.close()
return self._install_info
def _write_galaxy_install_info(self):
"""
Writes a YAML-formatted file to the role's meta/ directory
(named .galaxy_install_info) which contains some information
we can use later for commands like 'list' and 'info'.
"""
info = dict(
version=self.version,
install_date=datetime.datetime.utcnow().strftime("%c"),
)
if not os.path.exists(os.path.join(self.path, 'meta')):
os.makedirs(os.path.join(self.path, 'meta'))
info_path = os.path.join(self.path, self.META_INSTALL)
with open(info_path, 'w+') as f:
try:
self._install_info = yaml.safe_dump(info, f)
except:
return False
return True
def remove(self):
"""
Removes the specified role from the roles path.
There is a sanity check to make sure there's a meta/main.yml file at this
path so the user doesn't blow away random directories.
"""
if self.metadata:
try:
rmtree(self.path)
return True
except:
pass
return False
def fetch(self, role_data):
"""
Downloads the archived role from github to a temp location
"""
if role_data:
# first grab the file and save it to a temp location
if "github_user" in role_data and "github_repo" in role_data:
archive_url = 'https://github.com/%s/%s/archive/%s.tar.gz' % (role_data["github_user"], role_data["github_repo"], self.version)
else:
archive_url = self.src
display.display("- downloading role from %s" % archive_url)
try:
url_file = open_url(archive_url, validate_certs=self._validate_certs)
temp_file = tempfile.NamedTemporaryFile(delete=False)
data = url_file.read()
while data:
temp_file.write(data)
data = url_file.read()
temp_file.close()
return temp_file.name
except Exception as e:
display.error("failed to download the file: %s" % str(e))
return False
def install(self):
# the file is a tar, so open it that way and extract it
# to the specified (or default) roles directory
local_file = False
if self.scm:
# create tar file from scm url
tmp_file = RoleRequirement.scm_archive_role(**self.spec)
elif self.src:
if os.path.isfile(self.src):
# installing a local tar.gz
local_file = True
tmp_file = self.src
elif '://' in self.src:
role_data = self.src
tmp_file = self.fetch(role_data)
else:
api = GalaxyAPI(self.galaxy)
role_data = api.lookup_role_by_name(self.src)
if not role_data:
raise AnsibleError("- sorry, %s was not found on %s." % (self.src, api.api_server))
if role_data.get('role_type') == 'CON' and not os.environ.get('ANSIBLE_CONTAINER'):
# Container Enabled, running outside of a container
display.warning("%s is a Container Enabled role and should only be installed using "
"Ansible Container" % self.name)
if role_data.get('role_type') == 'APP':
# Container Role
display.warning("%s is a Container App role and should only be installed using Ansible "
"Container" % self.name)
role_versions = api.fetch_role_related('versions', role_data['id'])
if not self.version:
# convert the version names to LooseVersion objects
# and sort them to get the latest version. If there
# are no versions in the list, we'll grab the head
# of the master branch
if len(role_versions) > 0:
loose_versions = [LooseVersion(a.get('name',None)) for a in role_versions]
loose_versions.sort()
self.version = str(loose_versions[-1])
elif role_data.get('github_branch', None):
self.version = role_data['github_branch']
else:
self.version = 'master'
elif self.version != 'master':
if role_versions and str(self.version) not in [a.get('name', None) for a in role_versions]:
raise AnsibleError("- the specified version (%s) of %s was not found in the list of available versions (%s)." % (self.version,
self.name,
role_versions))
tmp_file = self.fetch(role_data)
else:
raise AnsibleError("No valid role data found")
if tmp_file:
display.debug("installing from %s" % tmp_file)
if not tarfile.is_tarfile(tmp_file):
raise AnsibleError("the file downloaded was not a tar.gz")
else:
if tmp_file.endswith('.gz'):
role_tar_file = tarfile.open(tmp_file, "r:gz")
else:
role_tar_file = tarfile.open(tmp_file, "r")
# verify the role's meta file
meta_file = None
members = role_tar_file.getmembers()
# next find the metadata file
for member in members:
if self.META_MAIN in member.name:
# Look for parent of meta/main.yml
# Due to possibility of sub roles each containing meta/main.yml
# look for shortest length parent
meta_parent_dir = os.path.dirname(os.path.dirname(member.name))
if not meta_file:
archive_parent_dir = meta_parent_dir
meta_file = member
else:
if len(meta_parent_dir) < len(archive_parent_dir):
archive_parent_dir = meta_parent_dir
meta_file = member
if not meta_file:
raise AnsibleError("this role does not appear to have a meta/main.yml file.")
else:
try:
self._metadata = yaml.safe_load(role_tar_file.extractfile(meta_file))
except:
raise AnsibleError("this role does not appear to have a valid meta/main.yml file.")
# we strip off any higher-level directories for all of the files contained within
# the tar file here. The default is 'github_repo-target'. Gerrit instances, on the other
# hand, does not have a parent directory at all.
installed = False
while not installed:
display.display("- extracting %s to %s" % (self.name, self.path))
try:
if os.path.exists(self.path):
if not os.path.isdir(self.path):
raise AnsibleError("the specified roles path exists and is not a directory.")
elif not getattr(self.options, "force", False):
raise AnsibleError("the specified role %s appears to already exist. Use --force to replace it." % self.name)
else:
# using --force, remove the old path
if not self.remove():
raise AnsibleError("%s doesn't appear to contain a role.\n please remove this directory manually if you really "
"want to put the role here." % self.path)
else:
os.makedirs(self.path)
# now we do the actual extraction to the path
for member in members:
# we only extract files, and remove any relative path
# bits that might be in the file for security purposes
# and drop any containing directory, as mentioned above
if member.isreg() or member.issym():
parts = member.name.replace(archive_parent_dir, "").split(os.sep)
final_parts = []
for part in parts:
if part != '..' and '~' not in part and '$' not in part:
final_parts.append(part)
member.name = os.path.join(*final_parts)
role_tar_file.extract(member, self.path)
# write out the install info file for later use
self._write_galaxy_install_info()
installed = True
except OSError as e:
error = True
if e[0] == 13 and len(self.paths) > 1:
current = self.paths.index(self.path)
nextidx = current + 1
if len(self.paths) >= current:
self.path = self.paths[nextidx]
error = False
if error:
raise AnsibleError("Could not update files in %s: %s" % (self.path, str(e)))
# return the parsed yaml metadata
display.display("- %s was installed successfully" % str(self))
if not local_file:
try:
os.unlink(tmp_file)
except (OSError,IOError) as e:
display.warning("Unable to remove tmp file (%s): %s" % (tmp_file, str(e)))
return True
return False
@property
def spec(self):
"""
Returns role spec info
{
'scm': 'git',
'src': 'http://git.example.com/repos/repo.git',
'version': 'v1.0',
'name': 'repo'
}
"""
return dict(scm=self.scm, src=self.src, version=self.version, name=self.name)
| gpl-3.0 |
knowsis/django | tests/staticfiles_tests/tests.py | 46 | 33062 | # -*- encoding: utf-8 -*-
from __future__ import unicode_literals
import codecs
import os
import posixpath
import shutil
import sys
import tempfile
from django.template import loader, Context
from django.conf import settings
from django.core.cache.backends.base import BaseCache
from django.core.exceptions import ImproperlyConfigured
from django.core.management import call_command
from django.test import TestCase
from django.test.utils import override_settings
from django.utils.encoding import force_text
from django.utils.functional import empty
from django.utils._os import rmtree_errorhandler, upath
from django.utils import six
from django.contrib.staticfiles import finders, storage
from django.contrib.staticfiles.management.commands import collectstatic
from .storage import DummyStorage
TEST_ROOT = os.path.dirname(upath(__file__))
TEST_SETTINGS = {
'DEBUG': True,
'MEDIA_URL': '/media/',
'STATIC_URL': '/static/',
'MEDIA_ROOT': os.path.join(TEST_ROOT, 'project', 'site_media', 'media'),
'STATIC_ROOT': os.path.join(TEST_ROOT, 'project', 'site_media', 'static'),
'STATICFILES_DIRS': (
os.path.join(TEST_ROOT, 'project', 'documents'),
('prefix', os.path.join(TEST_ROOT, 'project', 'prefixed')),
),
'STATICFILES_FINDERS': (
'django.contrib.staticfiles.finders.FileSystemFinder',
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
'django.contrib.staticfiles.finders.DefaultStorageFinder',
),
}
from django.contrib.staticfiles.management.commands.collectstatic import Command as CollectstaticCommand
class BaseStaticFilesTestCase(object):
"""
Test case with a couple utility assertions.
"""
def setUp(self):
# Clear the cached staticfiles_storage out, this is because when it first
# gets accessed (by some other test), it evaluates settings.STATIC_ROOT,
# since we're planning on changing that we need to clear out the cache.
storage.staticfiles_storage._wrapped = empty
# Clear the cached staticfile finders, so they are reinitialized every
# run and pick up changes in settings.STATICFILES_DIRS.
finders._finders.clear()
testfiles_path = os.path.join(TEST_ROOT, 'apps', 'test', 'static', 'test')
# To make sure SVN doesn't hangs itself with the non-ASCII characters
# during checkout, we actually create one file dynamically.
self._nonascii_filepath = os.path.join(testfiles_path, '\u2297.txt')
with codecs.open(self._nonascii_filepath, 'w', 'utf-8') as f:
f.write("\u2297 in the app dir")
# And also create the stupid hidden file to dwarf the setup.py's
# package data handling.
self._hidden_filepath = os.path.join(testfiles_path, '.hidden')
with codecs.open(self._hidden_filepath, 'w', 'utf-8') as f:
f.write("should be ignored")
self._backup_filepath = os.path.join(
TEST_ROOT, 'project', 'documents', 'test', 'backup~')
with codecs.open(self._backup_filepath, 'w', 'utf-8') as f:
f.write("should be ignored")
def tearDown(self):
os.unlink(self._nonascii_filepath)
os.unlink(self._hidden_filepath)
os.unlink(self._backup_filepath)
def assertFileContains(self, filepath, text):
self.assertIn(text, self._get_file(force_text(filepath)),
"'%s' not in '%s'" % (text, filepath))
def assertFileNotFound(self, filepath):
self.assertRaises(IOError, self._get_file, filepath)
def render_template(self, template, **kwargs):
if isinstance(template, six.string_types):
template = loader.get_template_from_string(template)
return template.render(Context(kwargs)).strip()
def static_template_snippet(self, path, asvar=False):
if asvar:
return "{%% load static from staticfiles %%}{%% static '%s' as var %%}{{ var }}" % path
return "{%% load static from staticfiles %%}{%% static '%s' %%}" % path
def assertStaticRenders(self, path, result, asvar=False, **kwargs):
template = self.static_template_snippet(path, asvar)
self.assertEqual(self.render_template(template, **kwargs), result)
def assertStaticRaises(self, exc, path, result, asvar=False, **kwargs):
self.assertRaises(exc, self.assertStaticRenders, path, result, **kwargs)
@override_settings(**TEST_SETTINGS)
class StaticFilesTestCase(BaseStaticFilesTestCase, TestCase):
pass
class BaseCollectionTestCase(BaseStaticFilesTestCase):
"""
Tests shared by all file finding features (collectstatic,
findstatic, and static serve view).
This relies on the asserts defined in BaseStaticFilesTestCase, but
is separated because some test cases need those asserts without
all these tests.
"""
def setUp(self):
super(BaseCollectionTestCase, self).setUp()
self.old_root = settings.STATIC_ROOT
settings.STATIC_ROOT = tempfile.mkdtemp(dir=os.environ['DJANGO_TEST_TEMP_DIR'])
self.run_collectstatic()
# Use our own error handler that can handle .svn dirs on Windows
self.addCleanup(shutil.rmtree, settings.STATIC_ROOT,
ignore_errors=True, onerror=rmtree_errorhandler)
def tearDown(self):
settings.STATIC_ROOT = self.old_root
super(BaseCollectionTestCase, self).tearDown()
def run_collectstatic(self, **kwargs):
call_command('collectstatic', interactive=False, verbosity='0',
ignore_patterns=['*.ignoreme'], **kwargs)
def _get_file(self, filepath):
assert filepath, 'filepath is empty.'
filepath = os.path.join(settings.STATIC_ROOT, filepath)
with codecs.open(filepath, "r", "utf-8") as f:
return f.read()
class CollectionTestCase(BaseCollectionTestCase, StaticFilesTestCase):
pass
class TestDefaults(object):
"""
A few standard test cases.
"""
def test_staticfiles_dirs(self):
"""
Can find a file in a STATICFILES_DIRS directory.
"""
self.assertFileContains('test.txt', 'Can we find')
self.assertFileContains(os.path.join('prefix', 'test.txt'), 'Prefix')
def test_staticfiles_dirs_subdir(self):
"""
Can find a file in a subdirectory of a STATICFILES_DIRS
directory.
"""
self.assertFileContains('subdir/test.txt', 'Can we find')
def test_staticfiles_dirs_priority(self):
"""
File in STATICFILES_DIRS has priority over file in app.
"""
self.assertFileContains('test/file.txt', 'STATICFILES_DIRS')
def test_app_files(self):
"""
Can find a file in an app static/ directory.
"""
self.assertFileContains('test/file1.txt', 'file1 in the app dir')
def test_nonascii_filenames(self):
"""
Can find a file with non-ASCII character in an app static/ directory.
"""
self.assertFileContains('test/⊗.txt', '⊗ in the app dir')
def test_camelcase_filenames(self):
"""
Can find a file with capital letters.
"""
self.assertFileContains('test/camelCase.txt', 'camelCase')
class TestFindStatic(CollectionTestCase, TestDefaults):
"""
Test ``findstatic`` management command.
"""
def _get_file(self, filepath):
out = six.StringIO()
call_command('findstatic', filepath, all=False, verbosity=0, stdout=out)
out.seek(0)
lines = [l.strip() for l in out.readlines()]
with codecs.open(force_text(lines[0].strip()), "r", "utf-8") as f:
return f.read()
def test_all_files(self):
"""
Test that findstatic returns all candidate files if run without --first and -v1.
"""
out = six.StringIO()
call_command('findstatic', 'test/file.txt', verbosity=1, stdout=out)
out.seek(0)
lines = [l.strip() for l in out.readlines()]
self.assertEqual(len(lines), 3) # three because there is also the "Found <file> here" line
self.assertIn('project', force_text(lines[1]))
self.assertIn('apps', force_text(lines[2]))
def test_all_files_less_verbose(self):
"""
Test that findstatic returns all candidate files if run without --first and -v0.
"""
out = six.StringIO()
call_command('findstatic', 'test/file.txt', verbosity=0, stdout=out)
out.seek(0)
lines = [l.strip() for l in out.readlines()]
self.assertEqual(len(lines), 2)
self.assertIn('project', force_text(lines[0]))
self.assertIn('apps', force_text(lines[1]))
class TestConfiguration(StaticFilesTestCase):
def test_location_empty(self):
err = six.StringIO()
for root in ['', None]:
with override_settings(STATIC_ROOT=root):
with six.assertRaisesRegex(
self, ImproperlyConfigured,
'without having set the STATIC_ROOT setting to a filesystem path'):
call_command('collectstatic', interactive=False, verbosity=0, stderr=err)
def test_local_storage_detection_helper(self):
staticfiles_storage = storage.staticfiles_storage
try:
storage.staticfiles_storage._wrapped = empty
with override_settings(STATICFILES_STORAGE='django.contrib.staticfiles.storage.StaticFilesStorage'):
command = collectstatic.Command()
self.assertTrue(command.is_local_storage())
storage.staticfiles_storage._wrapped = empty
with override_settings(STATICFILES_STORAGE='staticfiles_tests.storage.DummyStorage'):
command = collectstatic.Command()
self.assertFalse(command.is_local_storage())
storage.staticfiles_storage = storage.FileSystemStorage()
command = collectstatic.Command()
self.assertTrue(command.is_local_storage())
storage.staticfiles_storage = DummyStorage()
command = collectstatic.Command()
self.assertFalse(command.is_local_storage())
finally:
storage.staticfiles_storage = staticfiles_storage
class TestCollection(CollectionTestCase, TestDefaults):
"""
Test ``collectstatic`` management command.
"""
def test_ignore(self):
"""
Test that -i patterns are ignored.
"""
self.assertFileNotFound('test/test.ignoreme')
def test_common_ignore_patterns(self):
"""
Common ignore patterns (*~, .*, CVS) are ignored.
"""
self.assertFileNotFound('test/.hidden')
self.assertFileNotFound('test/backup~')
self.assertFileNotFound('test/CVS')
class TestCollectionClear(CollectionTestCase):
"""
Test the ``--clear`` option of the ``collectstatic`` management command.
"""
def run_collectstatic(self, **kwargs):
clear_filepath = os.path.join(settings.STATIC_ROOT, 'cleared.txt')
with open(clear_filepath, 'w') as f:
f.write('should be cleared')
super(TestCollectionClear, self).run_collectstatic(clear=True)
def test_cleared_not_found(self):
self.assertFileNotFound('cleared.txt')
class TestCollectionExcludeNoDefaultIgnore(CollectionTestCase, TestDefaults):
"""
Test ``--exclude-dirs`` and ``--no-default-ignore`` options of the
``collectstatic`` management command.
"""
def run_collectstatic(self):
super(TestCollectionExcludeNoDefaultIgnore, self).run_collectstatic(
use_default_ignore_patterns=False)
def test_no_common_ignore_patterns(self):
"""
With --no-default-ignore, common ignore patterns (*~, .*, CVS)
are not ignored.
"""
self.assertFileContains('test/.hidden', 'should be ignored')
self.assertFileContains('test/backup~', 'should be ignored')
self.assertFileContains('test/CVS', 'should be ignored')
class TestNoFilesCreated(object):
def test_no_files_created(self):
"""
Make sure no files were create in the destination directory.
"""
self.assertEqual(os.listdir(settings.STATIC_ROOT), [])
class TestCollectionDryRun(CollectionTestCase, TestNoFilesCreated):
"""
Test ``--dry-run`` option for ``collectstatic`` management command.
"""
def run_collectstatic(self):
super(TestCollectionDryRun, self).run_collectstatic(dry_run=True)
class TestCollectionFilesOverride(CollectionTestCase):
"""
Test overriding duplicated files by ``collectstatic`` management command.
Check for proper handling of apps order in INSTALLED_APPS even if file modification
dates are in different order:
'staticfiles_tests.apps.test',
'staticfiles_tests.apps.no_label',
"""
def setUp(self):
self.orig_path = os.path.join(TEST_ROOT, 'apps', 'no_label', 'static', 'file2.txt')
# get modification and access times for no_label/static/file2.txt
self.orig_mtime = os.path.getmtime(self.orig_path)
self.orig_atime = os.path.getatime(self.orig_path)
# prepare duplicate of file2.txt from no_label app
# this file will have modification time older than no_label/static/file2.txt
# anyway it should be taken to STATIC_ROOT because 'test' app is before
# 'no_label' app in INSTALLED_APPS
self.testfile_path = os.path.join(TEST_ROOT, 'apps', 'test', 'static', 'file2.txt')
with open(self.testfile_path, 'w+') as f:
f.write('duplicate of file2.txt')
os.utime(self.testfile_path, (self.orig_atime - 1, self.orig_mtime - 1))
super(TestCollectionFilesOverride, self).setUp()
def tearDown(self):
if os.path.exists(self.testfile_path):
os.unlink(self.testfile_path)
# set back original modification time
os.utime(self.orig_path, (self.orig_atime, self.orig_mtime))
super(TestCollectionFilesOverride, self).tearDown()
def test_ordering_override(self):
"""
Test if collectstatic takes files in proper order
"""
self.assertFileContains('file2.txt', 'duplicate of file2.txt')
# run collectstatic again
self.run_collectstatic()
self.assertFileContains('file2.txt', 'duplicate of file2.txt')
# and now change modification time of no_label/static/file2.txt
# test app is first in INSTALLED_APPS so file2.txt should remain unmodified
mtime = os.path.getmtime(self.testfile_path)
atime = os.path.getatime(self.testfile_path)
os.utime(self.orig_path, (mtime + 1, atime + 1))
# run collectstatic again
self.run_collectstatic()
self.assertFileContains('file2.txt', 'duplicate of file2.txt')
@override_settings(
STATICFILES_STORAGE='staticfiles_tests.storage.DummyStorage',
)
class TestCollectionNonLocalStorage(CollectionTestCase, TestNoFilesCreated):
"""
Tests for #15035
"""
pass
# we set DEBUG to False here since the template tag wouldn't work otherwise
@override_settings(**dict(TEST_SETTINGS,
STATICFILES_STORAGE='django.contrib.staticfiles.storage.CachedStaticFilesStorage',
DEBUG=False,
))
class TestCollectionCachedStorage(BaseCollectionTestCase,
BaseStaticFilesTestCase, TestCase):
"""
Tests for the Cache busting storage
"""
def cached_file_path(self, path):
fullpath = self.render_template(self.static_template_snippet(path))
return fullpath.replace(settings.STATIC_URL, '')
def test_template_tag_return(self):
"""
Test the CachedStaticFilesStorage backend.
"""
self.assertStaticRaises(ValueError,
"does/not/exist.png",
"/static/does/not/exist.png")
self.assertStaticRenders("test/file.txt",
"/static/test/file.dad0999e4f8f.txt")
self.assertStaticRenders("test/file.txt",
"/static/test/file.dad0999e4f8f.txt", asvar=True)
self.assertStaticRenders("cached/styles.css",
"/static/cached/styles.93b1147e8552.css")
self.assertStaticRenders("path/",
"/static/path/")
self.assertStaticRenders("path/?query",
"/static/path/?query")
def test_template_tag_simple_content(self):
relpath = self.cached_file_path("cached/styles.css")
self.assertEqual(relpath, "cached/styles.93b1147e8552.css")
with storage.staticfiles_storage.open(relpath) as relfile:
content = relfile.read()
self.assertNotIn(b"cached/other.css", content)
self.assertIn(b"other.d41d8cd98f00.css", content)
def test_path_ignored_completely(self):
relpath = self.cached_file_path("cached/css/ignored.css")
self.assertEqual(relpath, "cached/css/ignored.6c77f2643390.css")
with storage.staticfiles_storage.open(relpath) as relfile:
content = relfile.read()
self.assertIn(b'#foobar', content)
self.assertIn(b'http:foobar', content)
self.assertIn(b'https:foobar', content)
self.assertIn(b'data:foobar', content)
self.assertIn(b'//foobar', content)
def test_path_with_querystring(self):
relpath = self.cached_file_path("cached/styles.css?spam=eggs")
self.assertEqual(relpath,
"cached/styles.93b1147e8552.css?spam=eggs")
with storage.staticfiles_storage.open(
"cached/styles.93b1147e8552.css") as relfile:
content = relfile.read()
self.assertNotIn(b"cached/other.css", content)
self.assertIn(b"other.d41d8cd98f00.css", content)
def test_path_with_fragment(self):
relpath = self.cached_file_path("cached/styles.css#eggs")
self.assertEqual(relpath, "cached/styles.93b1147e8552.css#eggs")
with storage.staticfiles_storage.open(
"cached/styles.93b1147e8552.css") as relfile:
content = relfile.read()
self.assertNotIn(b"cached/other.css", content)
self.assertIn(b"other.d41d8cd98f00.css", content)
def test_path_with_querystring_and_fragment(self):
relpath = self.cached_file_path("cached/css/fragments.css")
self.assertEqual(relpath, "cached/css/fragments.75433540b096.css")
with storage.staticfiles_storage.open(relpath) as relfile:
content = relfile.read()
self.assertIn(b'fonts/font.a4b0478549d0.eot?#iefix', content)
self.assertIn(b'fonts/font.b8d603e42714.svg#webfontIyfZbseF', content)
self.assertIn(b'data:font/woff;charset=utf-8;base64,d09GRgABAAAAADJoAA0AAAAAR2QAAQAAAAAAAAAAAAA', content)
self.assertIn(b'#default#VML', content)
def test_template_tag_absolute(self):
relpath = self.cached_file_path("cached/absolute.css")
self.assertEqual(relpath, "cached/absolute.23f087ad823a.css")
with storage.staticfiles_storage.open(relpath) as relfile:
content = relfile.read()
self.assertNotIn(b"/static/cached/styles.css", content)
self.assertIn(b"/static/cached/styles.93b1147e8552.css", content)
self.assertIn(b'/static/cached/img/relative.acae32e4532b.png', content)
def test_template_tag_denorm(self):
relpath = self.cached_file_path("cached/denorm.css")
self.assertEqual(relpath, "cached/denorm.c5bd139ad821.css")
with storage.staticfiles_storage.open(relpath) as relfile:
content = relfile.read()
self.assertNotIn(b"..//cached///styles.css", content)
self.assertIn(b"../cached/styles.93b1147e8552.css", content)
self.assertNotIn(b"url(img/relative.png )", content)
self.assertIn(b'url("img/relative.acae32e4532b.png', content)
def test_template_tag_relative(self):
relpath = self.cached_file_path("cached/relative.css")
self.assertEqual(relpath, "cached/relative.2217ea7273c2.css")
with storage.staticfiles_storage.open(relpath) as relfile:
content = relfile.read()
self.assertNotIn(b"../cached/styles.css", content)
self.assertNotIn(b'@import "styles.css"', content)
self.assertNotIn(b'url(img/relative.png)', content)
self.assertIn(b'url("img/relative.acae32e4532b.png")', content)
self.assertIn(b"../cached/styles.93b1147e8552.css", content)
def test_import_replacement(self):
"See #18050"
relpath = self.cached_file_path("cached/import.css")
self.assertEqual(relpath, "cached/import.2b1d40b0bbd4.css")
with storage.staticfiles_storage.open(relpath) as relfile:
self.assertIn(b"""import url("styles.93b1147e8552.css")""", relfile.read())
def test_template_tag_deep_relative(self):
relpath = self.cached_file_path("cached/css/window.css")
self.assertEqual(relpath, "cached/css/window.9db38d5169f3.css")
with storage.staticfiles_storage.open(relpath) as relfile:
content = relfile.read()
self.assertNotIn(b'url(img/window.png)', content)
self.assertIn(b'url("img/window.acae32e4532b.png")', content)
def test_template_tag_url(self):
relpath = self.cached_file_path("cached/url.css")
self.assertEqual(relpath, "cached/url.615e21601e4b.css")
with storage.staticfiles_storage.open(relpath) as relfile:
self.assertIn(b"https://", relfile.read())
def test_cache_invalidation(self):
name = "cached/styles.css"
hashed_name = "cached/styles.93b1147e8552.css"
# check if the cache is filled correctly as expected
cache_key = storage.staticfiles_storage.cache_key(name)
cached_name = storage.staticfiles_storage.cache.get(cache_key)
self.assertEqual(self.cached_file_path(name), cached_name)
# clearing the cache to make sure we re-set it correctly in the url method
storage.staticfiles_storage.cache.clear()
cached_name = storage.staticfiles_storage.cache.get(cache_key)
self.assertEqual(cached_name, None)
self.assertEqual(self.cached_file_path(name), hashed_name)
cached_name = storage.staticfiles_storage.cache.get(cache_key)
self.assertEqual(cached_name, hashed_name)
def test_post_processing(self):
"""Test that post_processing behaves correctly.
Files that are alterable should always be post-processed; files that
aren't should be skipped.
collectstatic has already been called once in setUp() for this testcase,
therefore we check by verifying behavior on a second run.
"""
collectstatic_args = {
'interactive': False,
'verbosity': '0',
'link': False,
'clear': False,
'dry_run': False,
'post_process': True,
'use_default_ignore_patterns': True,
'ignore_patterns': ['*.ignoreme'],
}
collectstatic_cmd = CollectstaticCommand()
collectstatic_cmd.set_options(**collectstatic_args)
stats = collectstatic_cmd.collect()
self.assertIn(os.path.join('cached', 'css', 'window.css'), stats['post_processed'])
self.assertIn(os.path.join('cached', 'css', 'img', 'window.png'), stats['unmodified'])
self.assertIn(os.path.join('test', 'nonascii.css'), stats['post_processed'])
def test_cache_key_memcache_validation(self):
"""
Handle cache key creation correctly, see #17861.
"""
name = "/some crazy/long filename/ with spaces Here and ?#%#$/other/stuff/some crazy/long filename/ with spaces Here and ?#%#$/other/stuff/some crazy/long filename/ with spaces Here and ?#%#$/other/stuff/some crazy/long filename/ with spaces Here and ?#%#$/other/stuff/some crazy/long filename/ with spaces Here and ?#%#$/other/stuff/some crazy/" + "\x16" + "\xb4"
cache_key = storage.staticfiles_storage.cache_key(name)
cache_validator = BaseCache({})
cache_validator.validate_key(cache_key)
self.assertEqual(cache_key, 'staticfiles:821ea71ef36f95b3922a77f7364670e7')
def test_css_import_case_insensitive(self):
relpath = self.cached_file_path("cached/styles_insensitive.css")
self.assertEqual(relpath, "cached/styles_insensitive.2f0151cca872.css")
with storage.staticfiles_storage.open(relpath) as relfile:
content = relfile.read()
self.assertNotIn(b"cached/other.css", content)
self.assertIn(b"other.d41d8cd98f00.css", content)
@override_settings(
STATICFILES_DIRS=(os.path.join(TEST_ROOT, 'project', 'faulty'),),
STATICFILES_FINDERS=('django.contrib.staticfiles.finders.FileSystemFinder',),
)
def test_post_processing_failure(self):
"""
Test that post_processing indicates the origin of the error when it
fails. Regression test for #18986.
"""
finders._finders.clear()
err = six.StringIO()
with self.assertRaises(Exception) as cm:
call_command('collectstatic', interactive=False, verbosity=0, stderr=err)
self.assertEqual("Post-processing 'faulty.css' failed!\n\n", err.getvalue())
# we set DEBUG to False here since the template tag wouldn't work otherwise
@override_settings(**dict(TEST_SETTINGS,
STATICFILES_STORAGE='staticfiles_tests.storage.SimpleCachedStaticFilesStorage',
DEBUG=False,
))
class TestCollectionSimpleCachedStorage(BaseCollectionTestCase,
BaseStaticFilesTestCase, TestCase):
"""
Tests for the Cache busting storage
"""
def cached_file_path(self, path):
fullpath = self.render_template(self.static_template_snippet(path))
return fullpath.replace(settings.STATIC_URL, '')
def test_template_tag_return(self):
"""
Test the CachedStaticFilesStorage backend.
"""
self.assertStaticRaises(ValueError,
"does/not/exist.png",
"/static/does/not/exist.png")
self.assertStaticRenders("test/file.txt",
"/static/test/file.deploy12345.txt")
self.assertStaticRenders("cached/styles.css",
"/static/cached/styles.deploy12345.css")
self.assertStaticRenders("path/",
"/static/path/")
self.assertStaticRenders("path/?query",
"/static/path/?query")
def test_template_tag_simple_content(self):
relpath = self.cached_file_path("cached/styles.css")
self.assertEqual(relpath, "cached/styles.deploy12345.css")
with storage.staticfiles_storage.open(relpath) as relfile:
content = relfile.read()
self.assertNotIn(b"cached/other.css", content)
self.assertIn(b"other.deploy12345.css", content)
if sys.platform != 'win32':
class TestCollectionLinks(CollectionTestCase, TestDefaults):
"""
Test ``--link`` option for ``collectstatic`` management command.
Note that by inheriting ``TestDefaults`` we repeat all
the standard file resolving tests here, to make sure using
``--link`` does not change the file-selection semantics.
"""
def run_collectstatic(self):
super(TestCollectionLinks, self).run_collectstatic(link=True)
def test_links_created(self):
"""
With ``--link``, symbolic links are created.
"""
self.assertTrue(os.path.islink(os.path.join(settings.STATIC_ROOT, 'test.txt')))
class TestServeStatic(StaticFilesTestCase):
"""
Test static asset serving view.
"""
urls = 'staticfiles_tests.urls.default'
def _response(self, filepath):
return self.client.get(
posixpath.join(settings.STATIC_URL, filepath))
def assertFileContains(self, filepath, text):
self.assertContains(self._response(filepath), text)
def assertFileNotFound(self, filepath):
self.assertEqual(self._response(filepath).status_code, 404)
class TestServeDisabled(TestServeStatic):
"""
Test serving static files disabled when DEBUG is False.
"""
def setUp(self):
super(TestServeDisabled, self).setUp()
settings.DEBUG = False
def test_disabled_serving(self):
six.assertRaisesRegex(self, ImproperlyConfigured, 'The staticfiles view '
'can only be used in debug mode ', self._response, 'test.txt')
class TestServeStaticWithDefaultURL(TestServeStatic, TestDefaults):
"""
Test static asset serving view with manually configured URLconf.
"""
pass
class TestServeStaticWithURLHelper(TestServeStatic, TestDefaults):
"""
Test static asset serving view with staticfiles_urlpatterns helper.
"""
urls = 'staticfiles_tests.urls.helper'
class TestServeAdminMedia(TestServeStatic):
"""
Test serving media from django.contrib.admin.
"""
def _response(self, filepath):
return self.client.get(
posixpath.join(settings.STATIC_URL, 'admin/', filepath))
def test_serve_admin_media(self):
self.assertFileContains('css/base.css', 'body')
class FinderTestCase(object):
"""
Base finder test mixin.
On Windows, sometimes the case of the path we ask the finders for and the
path(s) they find can differ. Compare them using os.path.normcase() to
avoid false negatives.
"""
def test_find_first(self):
src, dst = self.find_first
found = self.finder.find(src)
self.assertEqual(os.path.normcase(found), os.path.normcase(dst))
def test_find_all(self):
src, dst = self.find_all
found = self.finder.find(src, all=True)
found = [os.path.normcase(f) for f in found]
dst = [os.path.normcase(d) for d in dst]
self.assertEqual(found, dst)
class TestFileSystemFinder(StaticFilesTestCase, FinderTestCase):
"""
Test FileSystemFinder.
"""
def setUp(self):
super(TestFileSystemFinder, self).setUp()
self.finder = finders.FileSystemFinder()
test_file_path = os.path.join(TEST_ROOT, 'project', 'documents', 'test', 'file.txt')
self.find_first = (os.path.join('test', 'file.txt'), test_file_path)
self.find_all = (os.path.join('test', 'file.txt'), [test_file_path])
class TestAppDirectoriesFinder(StaticFilesTestCase, FinderTestCase):
"""
Test AppDirectoriesFinder.
"""
def setUp(self):
super(TestAppDirectoriesFinder, self).setUp()
self.finder = finders.AppDirectoriesFinder()
test_file_path = os.path.join(TEST_ROOT, 'apps', 'test', 'static', 'test', 'file1.txt')
self.find_first = (os.path.join('test', 'file1.txt'), test_file_path)
self.find_all = (os.path.join('test', 'file1.txt'), [test_file_path])
class TestDefaultStorageFinder(StaticFilesTestCase, FinderTestCase):
"""
Test DefaultStorageFinder.
"""
def setUp(self):
super(TestDefaultStorageFinder, self).setUp()
self.finder = finders.DefaultStorageFinder(
storage=storage.StaticFilesStorage(location=settings.MEDIA_ROOT))
test_file_path = os.path.join(settings.MEDIA_ROOT, 'media-file.txt')
self.find_first = ('media-file.txt', test_file_path)
self.find_all = ('media-file.txt', [test_file_path])
class TestMiscFinder(TestCase):
"""
A few misc finder tests.
"""
def test_get_finder(self):
self.assertIsInstance(finders.get_finder(
'django.contrib.staticfiles.finders.FileSystemFinder'),
finders.FileSystemFinder)
def test_get_finder_bad_classname(self):
self.assertRaises(ImproperlyConfigured, finders.get_finder,
'django.contrib.staticfiles.finders.FooBarFinder')
def test_get_finder_bad_module(self):
self.assertRaises(ImproperlyConfigured,
finders.get_finder, 'foo.bar.FooBarFinder')
@override_settings(STATICFILES_DIRS='a string')
def test_non_tuple_raises_exception(self):
"""
We can't determine if STATICFILES_DIRS is set correctly just by
looking at the type, but we can determine if it's definitely wrong.
"""
self.assertRaises(ImproperlyConfigured, finders.FileSystemFinder)
@override_settings(MEDIA_ROOT='')
def test_location_empty(self):
self.assertRaises(ImproperlyConfigured, finders.DefaultStorageFinder)
class TestTemplateTag(StaticFilesTestCase):
def test_template_tag(self):
self.assertStaticRenders("does/not/exist.png",
"/static/does/not/exist.png")
self.assertStaticRenders("testfile.txt", "/static/testfile.txt")
| bsd-3-clause |
agilgur5/LTLMoP | src/lib/handlers/nao/NaoLocomotionCommandHandler.py | 7 | 1046 | #!/usr/bin/env python
"""
==================================================================
naoLocomotionCommand.py - Nao Locomotion Command Handler
==================================================================
Send forward, side, and angular velocity commands to the Nao.
"""
import lib.handlers.handlerTemplates as handlerTemplates
class NaoLocomotionCommandHandler(handlerTemplates.LocomotionCommandHandler):
def __init__(self, executor, shared_data):
"""
Locomotion Command handler for NAO robot.
"""
self.naoInitHandler = shared_data['NAO_INIT_HANDLER']
self.movProxy = self.naoInitHandler.createProxy('ALMotion')
def sendCommand(self, cmd):
""" Send movement command to the Nao
Uses built in function in Aldebaran toolkit
Expects [vel_x,vel_y,vel_ang,freq_step]"""
# Extract data
vx = cmd[0]
vy = cmd[1]
w = cmd[2]
f = cmd[3]
# Call robot movement
self.movProxy.setWalkTargetVelocity(vx,vy,w,f)
| gpl-3.0 |
atlarge-research/opendc-web-server | opendc/api/v1/simulations/simulationId/paths/endpoint.py | 1 | 1243 | from opendc.models.path import Path
from opendc.models.simulation import Simulation
from opendc.util import exceptions
from opendc.util.rest import Response
def GET(request):
"""Get this Simulation's Paths."""
# Make sure required parameters are there
try:
request.check_required_parameters(
path={
'simulationId': 'int'
}
)
except exceptions.ParameterError as e:
return Response(400, e.message)
# Instantiate a Simulation from the database
simulation = Simulation.from_primary_key((request.params_path['simulationId'],))
# Make sure this Simulation exists
if not simulation.exists():
return Response(404, '{} not found.'.format(simulation))
# Make sure this user is authorized to view this Simulation's path
if not simulation.google_id_has_at_least(request.google_id, 'VIEW'):
return Response(403, 'Forbidden from viewing Paths for {}.'.format(simulation))
# Get and return the Paths
paths = Path.query('simulation_id', request.params_path['simulationId'])
return Response(
200,
'Successfully retrieved Paths for {}.'.format(simulation),
[x.to_JSON() for x in paths]
)
| mit |
mojoboss/scikit-learn | sklearn/decomposition/tests/test_truncated_svd.py | 240 | 6055 | """Test truncated SVD transformer."""
import numpy as np
import scipy.sparse as sp
from sklearn.decomposition import TruncatedSVD
from sklearn.utils import check_random_state
from sklearn.utils.testing import (assert_array_almost_equal, assert_equal,
assert_raises, assert_greater,
assert_array_less)
# Make an X that looks somewhat like a small tf-idf matrix.
# XXX newer versions of SciPy have scipy.sparse.rand for this.
shape = 60, 55
n_samples, n_features = shape
rng = check_random_state(42)
X = rng.randint(-100, 20, np.product(shape)).reshape(shape)
X = sp.csr_matrix(np.maximum(X, 0), dtype=np.float64)
X.data[:] = 1 + np.log(X.data)
Xdense = X.A
def test_algorithms():
svd_a = TruncatedSVD(30, algorithm="arpack")
svd_r = TruncatedSVD(30, algorithm="randomized", random_state=42)
Xa = svd_a.fit_transform(X)[:, :6]
Xr = svd_r.fit_transform(X)[:, :6]
assert_array_almost_equal(Xa, Xr)
comp_a = np.abs(svd_a.components_)
comp_r = np.abs(svd_r.components_)
# All elements are equal, but some elements are more equal than others.
assert_array_almost_equal(comp_a[:9], comp_r[:9])
assert_array_almost_equal(comp_a[9:], comp_r[9:], decimal=3)
def test_attributes():
for n_components in (10, 25, 41):
tsvd = TruncatedSVD(n_components).fit(X)
assert_equal(tsvd.n_components, n_components)
assert_equal(tsvd.components_.shape, (n_components, n_features))
def test_too_many_components():
for algorithm in ["arpack", "randomized"]:
for n_components in (n_features, n_features+1):
tsvd = TruncatedSVD(n_components=n_components, algorithm=algorithm)
assert_raises(ValueError, tsvd.fit, X)
def test_sparse_formats():
for fmt in ("array", "csr", "csc", "coo", "lil"):
Xfmt = Xdense if fmt == "dense" else getattr(X, "to" + fmt)()
tsvd = TruncatedSVD(n_components=11)
Xtrans = tsvd.fit_transform(Xfmt)
assert_equal(Xtrans.shape, (n_samples, 11))
Xtrans = tsvd.transform(Xfmt)
assert_equal(Xtrans.shape, (n_samples, 11))
def test_inverse_transform():
for algo in ("arpack", "randomized"):
# We need a lot of components for the reconstruction to be "almost
# equal" in all positions. XXX Test means or sums instead?
tsvd = TruncatedSVD(n_components=52, random_state=42)
Xt = tsvd.fit_transform(X)
Xinv = tsvd.inverse_transform(Xt)
assert_array_almost_equal(Xinv, Xdense, decimal=1)
def test_integers():
Xint = X.astype(np.int64)
tsvd = TruncatedSVD(n_components=6)
Xtrans = tsvd.fit_transform(Xint)
assert_equal(Xtrans.shape, (n_samples, tsvd.n_components))
def test_explained_variance():
# Test sparse data
svd_a_10_sp = TruncatedSVD(10, algorithm="arpack")
svd_r_10_sp = TruncatedSVD(10, algorithm="randomized", random_state=42)
svd_a_20_sp = TruncatedSVD(20, algorithm="arpack")
svd_r_20_sp = TruncatedSVD(20, algorithm="randomized", random_state=42)
X_trans_a_10_sp = svd_a_10_sp.fit_transform(X)
X_trans_r_10_sp = svd_r_10_sp.fit_transform(X)
X_trans_a_20_sp = svd_a_20_sp.fit_transform(X)
X_trans_r_20_sp = svd_r_20_sp.fit_transform(X)
# Test dense data
svd_a_10_de = TruncatedSVD(10, algorithm="arpack")
svd_r_10_de = TruncatedSVD(10, algorithm="randomized", random_state=42)
svd_a_20_de = TruncatedSVD(20, algorithm="arpack")
svd_r_20_de = TruncatedSVD(20, algorithm="randomized", random_state=42)
X_trans_a_10_de = svd_a_10_de.fit_transform(X.toarray())
X_trans_r_10_de = svd_r_10_de.fit_transform(X.toarray())
X_trans_a_20_de = svd_a_20_de.fit_transform(X.toarray())
X_trans_r_20_de = svd_r_20_de.fit_transform(X.toarray())
# helper arrays for tests below
svds = (svd_a_10_sp, svd_r_10_sp, svd_a_20_sp, svd_r_20_sp, svd_a_10_de,
svd_r_10_de, svd_a_20_de, svd_r_20_de)
svds_trans = (
(svd_a_10_sp, X_trans_a_10_sp),
(svd_r_10_sp, X_trans_r_10_sp),
(svd_a_20_sp, X_trans_a_20_sp),
(svd_r_20_sp, X_trans_r_20_sp),
(svd_a_10_de, X_trans_a_10_de),
(svd_r_10_de, X_trans_r_10_de),
(svd_a_20_de, X_trans_a_20_de),
(svd_r_20_de, X_trans_r_20_de),
)
svds_10_v_20 = (
(svd_a_10_sp, svd_a_20_sp),
(svd_r_10_sp, svd_r_20_sp),
(svd_a_10_de, svd_a_20_de),
(svd_r_10_de, svd_r_20_de),
)
svds_sparse_v_dense = (
(svd_a_10_sp, svd_a_10_de),
(svd_a_20_sp, svd_a_20_de),
(svd_r_10_sp, svd_r_10_de),
(svd_r_20_sp, svd_r_20_de),
)
# Assert the 1st component is equal
for svd_10, svd_20 in svds_10_v_20:
assert_array_almost_equal(
svd_10.explained_variance_ratio_,
svd_20.explained_variance_ratio_[:10],
decimal=5,
)
# Assert that 20 components has higher explained variance than 10
for svd_10, svd_20 in svds_10_v_20:
assert_greater(
svd_20.explained_variance_ratio_.sum(),
svd_10.explained_variance_ratio_.sum(),
)
# Assert that all the values are greater than 0
for svd in svds:
assert_array_less(0.0, svd.explained_variance_ratio_)
# Assert that total explained variance is less than 1
for svd in svds:
assert_array_less(svd.explained_variance_ratio_.sum(), 1.0)
# Compare sparse vs. dense
for svd_sparse, svd_dense in svds_sparse_v_dense:
assert_array_almost_equal(svd_sparse.explained_variance_ratio_,
svd_dense.explained_variance_ratio_)
# Test that explained_variance is correct
for svd, transformed in svds_trans:
total_variance = np.var(X.toarray(), axis=0).sum()
variances = np.var(transformed, axis=0)
true_explained_variance_ratio = variances / total_variance
assert_array_almost_equal(
svd.explained_variance_ratio_,
true_explained_variance_ratio,
)
| bsd-3-clause |
JorgeCoock/django | tests/multiple_database/tests.py | 107 | 93762 | from __future__ import unicode_literals
import datetime
import pickle
import warnings
from operator import attrgetter
from django.contrib.auth.models import User
from django.contrib.contenttypes.models import ContentType
from django.core import management
from django.db import DEFAULT_DB_ALIAS, connections, router, transaction
from django.db.models import signals
from django.db.utils import ConnectionRouter
from django.test import SimpleTestCase, TestCase, override_settings
from django.utils.encoding import force_text
from django.utils.six import StringIO
from .models import Book, Person, Pet, Review, UserProfile
from .routers import AuthRouter, TestRouter, WriteRouter
class QueryTestCase(TestCase):
multi_db = True
def test_db_selection(self):
"Check that querysets will use the default database by default"
self.assertEqual(Book.objects.db, DEFAULT_DB_ALIAS)
self.assertEqual(Book.objects.all().db, DEFAULT_DB_ALIAS)
self.assertEqual(Book.objects.using('other').db, 'other')
self.assertEqual(Book.objects.db_manager('other').db, 'other')
self.assertEqual(Book.objects.db_manager('other').all().db, 'other')
def test_default_creation(self):
"Objects created on the default database don't leak onto other databases"
# Create a book on the default database using create()
Book.objects.create(title="Pro Django",
published=datetime.date(2008, 12, 16))
# Create a book on the default database using a save
dive = Book()
dive.title = "Dive into Python"
dive.published = datetime.date(2009, 5, 4)
dive.save()
# Check that book exists on the default database, but not on other database
try:
Book.objects.get(title="Pro Django")
Book.objects.using('default').get(title="Pro Django")
except Book.DoesNotExist:
self.fail('"Pro Django" should exist on default database')
self.assertRaises(
Book.DoesNotExist,
Book.objects.using('other').get,
title="Pro Django"
)
try:
Book.objects.get(title="Dive into Python")
Book.objects.using('default').get(title="Dive into Python")
except Book.DoesNotExist:
self.fail('"Dive into Python" should exist on default database')
self.assertRaises(
Book.DoesNotExist,
Book.objects.using('other').get,
title="Dive into Python"
)
def test_other_creation(self):
"Objects created on another database don't leak onto the default database"
# Create a book on the second database
Book.objects.using('other').create(title="Pro Django",
published=datetime.date(2008, 12, 16))
# Create a book on the default database using a save
dive = Book()
dive.title = "Dive into Python"
dive.published = datetime.date(2009, 5, 4)
dive.save(using='other')
# Check that book exists on the default database, but not on other database
try:
Book.objects.using('other').get(title="Pro Django")
except Book.DoesNotExist:
self.fail('"Pro Django" should exist on other database')
self.assertRaises(
Book.DoesNotExist,
Book.objects.get,
title="Pro Django"
)
self.assertRaises(
Book.DoesNotExist,
Book.objects.using('default').get,
title="Pro Django"
)
try:
Book.objects.using('other').get(title="Dive into Python")
except Book.DoesNotExist:
self.fail('"Dive into Python" should exist on other database')
self.assertRaises(
Book.DoesNotExist,
Book.objects.get,
title="Dive into Python"
)
self.assertRaises(
Book.DoesNotExist,
Book.objects.using('default').get,
title="Dive into Python"
)
def test_refresh(self):
dive = Book()
dive.title = "Dive into Python"
dive = Book()
dive.title = "Dive into Python"
dive.published = datetime.date(2009, 5, 4)
dive.save(using='other')
dive.published = datetime.date(2009, 5, 4)
dive.save(using='other')
dive2 = Book.objects.using('other').get()
dive2.title = "Dive into Python (on default)"
dive2.save(using='default')
dive.refresh_from_db()
self.assertEqual(dive.title, "Dive into Python")
dive.refresh_from_db(using='default')
self.assertEqual(dive.title, "Dive into Python (on default)")
self.assertEqual(dive._state.db, "default")
def test_basic_queries(self):
"Queries are constrained to a single database"
dive = Book.objects.using('other').create(title="Dive into Python",
published=datetime.date(2009, 5, 4))
dive = Book.objects.using('other').get(published=datetime.date(2009, 5, 4))
self.assertEqual(dive.title, "Dive into Python")
self.assertRaises(Book.DoesNotExist, Book.objects.using('default').get, published=datetime.date(2009, 5, 4))
dive = Book.objects.using('other').get(title__icontains="dive")
self.assertEqual(dive.title, "Dive into Python")
self.assertRaises(Book.DoesNotExist, Book.objects.using('default').get, title__icontains="dive")
dive = Book.objects.using('other').get(title__iexact="dive INTO python")
self.assertEqual(dive.title, "Dive into Python")
self.assertRaises(Book.DoesNotExist, Book.objects.using('default').get, title__iexact="dive INTO python")
dive = Book.objects.using('other').get(published__year=2009)
self.assertEqual(dive.title, "Dive into Python")
self.assertEqual(dive.published, datetime.date(2009, 5, 4))
self.assertRaises(Book.DoesNotExist, Book.objects.using('default').get, published__year=2009)
years = Book.objects.using('other').dates('published', 'year')
self.assertEqual([o.year for o in years], [2009])
years = Book.objects.using('default').dates('published', 'year')
self.assertEqual([o.year for o in years], [])
months = Book.objects.using('other').dates('published', 'month')
self.assertEqual([o.month for o in months], [5])
months = Book.objects.using('default').dates('published', 'month')
self.assertEqual([o.month for o in months], [])
def test_m2m_separation(self):
"M2M fields are constrained to a single database"
# Create a book and author on the default database
pro = Book.objects.create(title="Pro Django",
published=datetime.date(2008, 12, 16))
marty = Person.objects.create(name="Marty Alchin")
# Create a book and author on the other database
dive = Book.objects.using('other').create(title="Dive into Python",
published=datetime.date(2009, 5, 4))
mark = Person.objects.using('other').create(name="Mark Pilgrim")
# Save the author relations
pro.authors = [marty]
dive.authors = [mark]
# Inspect the m2m tables directly.
# There should be 1 entry in each database
self.assertEqual(Book.authors.through.objects.using('default').count(), 1)
self.assertEqual(Book.authors.through.objects.using('other').count(), 1)
# Check that queries work across m2m joins
self.assertEqual(list(Book.objects.using('default').filter(authors__name='Marty Alchin').values_list('title', flat=True)),
['Pro Django'])
self.assertEqual(list(Book.objects.using('other').filter(authors__name='Marty Alchin').values_list('title', flat=True)),
[])
self.assertEqual(list(Book.objects.using('default').filter(authors__name='Mark Pilgrim').values_list('title', flat=True)),
[])
self.assertEqual(list(Book.objects.using('other').filter(authors__name='Mark Pilgrim').values_list('title', flat=True)),
['Dive into Python'])
# Reget the objects to clear caches
dive = Book.objects.using('other').get(title="Dive into Python")
mark = Person.objects.using('other').get(name="Mark Pilgrim")
# Retrieve related object by descriptor. Related objects should be database-bound
self.assertEqual(list(dive.authors.all().values_list('name', flat=True)),
['Mark Pilgrim'])
self.assertEqual(list(mark.book_set.all().values_list('title', flat=True)),
['Dive into Python'])
def test_m2m_forward_operations(self):
"M2M forward manipulations are all constrained to a single DB"
# Create a book and author on the other database
dive = Book.objects.using('other').create(title="Dive into Python",
published=datetime.date(2009, 5, 4))
mark = Person.objects.using('other').create(name="Mark Pilgrim")
# Save the author relations
dive.authors = [mark]
# Add a second author
john = Person.objects.using('other').create(name="John Smith")
self.assertEqual(list(Book.objects.using('other').filter(authors__name='John Smith').values_list('title', flat=True)),
[])
dive.authors.add(john)
self.assertEqual(list(Book.objects.using('other').filter(authors__name='Mark Pilgrim').values_list('title', flat=True)),
['Dive into Python'])
self.assertEqual(list(Book.objects.using('other').filter(authors__name='John Smith').values_list('title', flat=True)),
['Dive into Python'])
# Remove the second author
dive.authors.remove(john)
self.assertEqual(list(Book.objects.using('other').filter(authors__name='Mark Pilgrim').values_list('title', flat=True)),
['Dive into Python'])
self.assertEqual(list(Book.objects.using('other').filter(authors__name='John Smith').values_list('title', flat=True)),
[])
# Clear all authors
dive.authors.clear()
self.assertEqual(list(Book.objects.using('other').filter(authors__name='Mark Pilgrim').values_list('title', flat=True)),
[])
self.assertEqual(list(Book.objects.using('other').filter(authors__name='John Smith').values_list('title', flat=True)),
[])
# Create an author through the m2m interface
dive.authors.create(name='Jane Brown')
self.assertEqual(list(Book.objects.using('other').filter(authors__name='Mark Pilgrim').values_list('title', flat=True)),
[])
self.assertEqual(list(Book.objects.using('other').filter(authors__name='Jane Brown').values_list('title', flat=True)),
['Dive into Python'])
def test_m2m_reverse_operations(self):
"M2M reverse manipulations are all constrained to a single DB"
# Create a book and author on the other database
dive = Book.objects.using('other').create(title="Dive into Python",
published=datetime.date(2009, 5, 4))
mark = Person.objects.using('other').create(name="Mark Pilgrim")
# Save the author relations
dive.authors = [mark]
# Create a second book on the other database
grease = Book.objects.using('other').create(title="Greasemonkey Hacks",
published=datetime.date(2005, 11, 1))
# Add a books to the m2m
mark.book_set.add(grease)
self.assertEqual(list(Person.objects.using('other').filter(book__title='Dive into Python').values_list('name', flat=True)),
['Mark Pilgrim'])
self.assertEqual(list(Person.objects.using('other').filter(book__title='Greasemonkey Hacks').values_list('name', flat=True)),
['Mark Pilgrim'])
# Remove a book from the m2m
mark.book_set.remove(grease)
self.assertEqual(list(Person.objects.using('other').filter(book__title='Dive into Python').values_list('name', flat=True)),
['Mark Pilgrim'])
self.assertEqual(list(Person.objects.using('other').filter(book__title='Greasemonkey Hacks').values_list('name', flat=True)),
[])
# Clear the books associated with mark
mark.book_set.clear()
self.assertEqual(list(Person.objects.using('other').filter(book__title='Dive into Python').values_list('name', flat=True)),
[])
self.assertEqual(list(Person.objects.using('other').filter(book__title='Greasemonkey Hacks').values_list('name', flat=True)),
[])
# Create a book through the m2m interface
mark.book_set.create(title="Dive into HTML5", published=datetime.date(2020, 1, 1))
self.assertEqual(list(Person.objects.using('other').filter(book__title='Dive into Python').values_list('name', flat=True)),
[])
self.assertEqual(list(Person.objects.using('other').filter(book__title='Dive into HTML5').values_list('name', flat=True)),
['Mark Pilgrim'])
def test_m2m_cross_database_protection(self):
"Operations that involve sharing M2M objects across databases raise an error"
# Create a book and author on the default database
pro = Book.objects.create(title="Pro Django",
published=datetime.date(2008, 12, 16))
marty = Person.objects.create(name="Marty Alchin")
# Create a book and author on the other database
dive = Book.objects.using('other').create(title="Dive into Python",
published=datetime.date(2009, 5, 4))
mark = Person.objects.using('other').create(name="Mark Pilgrim")
# Set a foreign key set with an object from a different database
with self.assertRaises(ValueError):
with transaction.atomic(using='default'):
marty.edited = [pro, dive]
# Add to an m2m with an object from a different database
with self.assertRaises(ValueError):
with transaction.atomic(using='default'):
marty.book_set.add(dive)
# Set a m2m with an object from a different database
with self.assertRaises(ValueError):
with transaction.atomic(using='default'):
marty.book_set = [pro, dive]
# Add to a reverse m2m with an object from a different database
with self.assertRaises(ValueError):
with transaction.atomic(using='other'):
dive.authors.add(marty)
# Set a reverse m2m with an object from a different database
with self.assertRaises(ValueError):
with transaction.atomic(using='other'):
dive.authors = [mark, marty]
def test_m2m_deletion(self):
"Cascaded deletions of m2m relations issue queries on the right database"
# Create a book and author on the other database
dive = Book.objects.using('other').create(title="Dive into Python",
published=datetime.date(2009, 5, 4))
mark = Person.objects.using('other').create(name="Mark Pilgrim")
dive.authors = [mark]
# Check the initial state
self.assertEqual(Person.objects.using('default').count(), 0)
self.assertEqual(Book.objects.using('default').count(), 0)
self.assertEqual(Book.authors.through.objects.using('default').count(), 0)
self.assertEqual(Person.objects.using('other').count(), 1)
self.assertEqual(Book.objects.using('other').count(), 1)
self.assertEqual(Book.authors.through.objects.using('other').count(), 1)
# Delete the object on the other database
dive.delete(using='other')
self.assertEqual(Person.objects.using('default').count(), 0)
self.assertEqual(Book.objects.using('default').count(), 0)
self.assertEqual(Book.authors.through.objects.using('default').count(), 0)
# The person still exists ...
self.assertEqual(Person.objects.using('other').count(), 1)
# ... but the book has been deleted
self.assertEqual(Book.objects.using('other').count(), 0)
# ... and the relationship object has also been deleted.
self.assertEqual(Book.authors.through.objects.using('other').count(), 0)
# Now try deletion in the reverse direction. Set up the relation again
dive = Book.objects.using('other').create(title="Dive into Python",
published=datetime.date(2009, 5, 4))
dive.authors = [mark]
# Check the initial state
self.assertEqual(Person.objects.using('default').count(), 0)
self.assertEqual(Book.objects.using('default').count(), 0)
self.assertEqual(Book.authors.through.objects.using('default').count(), 0)
self.assertEqual(Person.objects.using('other').count(), 1)
self.assertEqual(Book.objects.using('other').count(), 1)
self.assertEqual(Book.authors.through.objects.using('other').count(), 1)
# Delete the object on the other database
mark.delete(using='other')
self.assertEqual(Person.objects.using('default').count(), 0)
self.assertEqual(Book.objects.using('default').count(), 0)
self.assertEqual(Book.authors.through.objects.using('default').count(), 0)
# The person has been deleted ...
self.assertEqual(Person.objects.using('other').count(), 0)
# ... but the book still exists
self.assertEqual(Book.objects.using('other').count(), 1)
# ... and the relationship object has been deleted.
self.assertEqual(Book.authors.through.objects.using('other').count(), 0)
def test_foreign_key_separation(self):
"FK fields are constrained to a single database"
# Create a book and author on the default database
pro = Book.objects.create(title="Pro Django",
published=datetime.date(2008, 12, 16))
george = Person.objects.create(name="George Vilches")
# Create a book and author on the other database
dive = Book.objects.using('other').create(title="Dive into Python",
published=datetime.date(2009, 5, 4))
chris = Person.objects.using('other').create(name="Chris Mills")
# Save the author's favorite books
pro.editor = george
pro.save()
dive.editor = chris
dive.save()
pro = Book.objects.using('default').get(title="Pro Django")
self.assertEqual(pro.editor.name, "George Vilches")
dive = Book.objects.using('other').get(title="Dive into Python")
self.assertEqual(dive.editor.name, "Chris Mills")
# Check that queries work across foreign key joins
self.assertEqual(list(Person.objects.using('default').filter(edited__title='Pro Django').values_list('name', flat=True)),
['George Vilches'])
self.assertEqual(list(Person.objects.using('other').filter(edited__title='Pro Django').values_list('name', flat=True)),
[])
self.assertEqual(list(Person.objects.using('default').filter(edited__title='Dive into Python').values_list('name', flat=True)),
[])
self.assertEqual(list(Person.objects.using('other').filter(edited__title='Dive into Python').values_list('name', flat=True)),
['Chris Mills'])
# Reget the objects to clear caches
chris = Person.objects.using('other').get(name="Chris Mills")
dive = Book.objects.using('other').get(title="Dive into Python")
# Retrieve related object by descriptor. Related objects should be database-bound
self.assertEqual(list(chris.edited.values_list('title', flat=True)),
['Dive into Python'])
def test_foreign_key_reverse_operations(self):
"FK reverse manipulations are all constrained to a single DB"
dive = Book.objects.using('other').create(title="Dive into Python",
published=datetime.date(2009, 5, 4))
chris = Person.objects.using('other').create(name="Chris Mills")
# Save the author relations
dive.editor = chris
dive.save()
# Add a second book edited by chris
html5 = Book.objects.using('other').create(title="Dive into HTML5", published=datetime.date(2010, 3, 15))
self.assertEqual(list(Person.objects.using('other').filter(edited__title='Dive into HTML5').values_list('name', flat=True)),
[])
chris.edited.add(html5)
self.assertEqual(list(Person.objects.using('other').filter(edited__title='Dive into HTML5').values_list('name', flat=True)),
['Chris Mills'])
self.assertEqual(list(Person.objects.using('other').filter(edited__title='Dive into Python').values_list('name', flat=True)),
['Chris Mills'])
# Remove the second editor
chris.edited.remove(html5)
self.assertEqual(list(Person.objects.using('other').filter(edited__title='Dive into HTML5').values_list('name', flat=True)),
[])
self.assertEqual(list(Person.objects.using('other').filter(edited__title='Dive into Python').values_list('name', flat=True)),
['Chris Mills'])
# Clear all edited books
chris.edited.clear()
self.assertEqual(list(Person.objects.using('other').filter(edited__title='Dive into HTML5').values_list('name', flat=True)),
[])
self.assertEqual(list(Person.objects.using('other').filter(edited__title='Dive into Python').values_list('name', flat=True)),
[])
# Create an author through the m2m interface
chris.edited.create(title='Dive into Water', published=datetime.date(2010, 3, 15))
self.assertEqual(list(Person.objects.using('other').filter(edited__title='Dive into HTML5').values_list('name', flat=True)),
[])
self.assertEqual(list(Person.objects.using('other').filter(edited__title='Dive into Water').values_list('name', flat=True)),
['Chris Mills'])
self.assertEqual(list(Person.objects.using('other').filter(edited__title='Dive into Python').values_list('name', flat=True)),
[])
def test_foreign_key_cross_database_protection(self):
"Operations that involve sharing FK objects across databases raise an error"
# Create a book and author on the default database
pro = Book.objects.create(title="Pro Django",
published=datetime.date(2008, 12, 16))
marty = Person.objects.create(name="Marty Alchin")
# Create a book and author on the other database
dive = Book.objects.using('other').create(title="Dive into Python",
published=datetime.date(2009, 5, 4))
# Set a foreign key with an object from a different database
with self.assertRaises(ValueError):
dive.editor = marty
# Set a foreign key set with an object from a different database
with self.assertRaises(ValueError):
with transaction.atomic(using='default'):
marty.edited = [pro, dive]
# Add to a foreign key set with an object from a different database
with self.assertRaises(ValueError):
with transaction.atomic(using='default'):
marty.edited.add(dive)
def test_foreign_key_deletion(self):
"Cascaded deletions of Foreign Key relations issue queries on the right database"
mark = Person.objects.using('other').create(name="Mark Pilgrim")
Pet.objects.using('other').create(name="Fido", owner=mark)
# Check the initial state
self.assertEqual(Person.objects.using('default').count(), 0)
self.assertEqual(Pet.objects.using('default').count(), 0)
self.assertEqual(Person.objects.using('other').count(), 1)
self.assertEqual(Pet.objects.using('other').count(), 1)
# Delete the person object, which will cascade onto the pet
mark.delete(using='other')
self.assertEqual(Person.objects.using('default').count(), 0)
self.assertEqual(Pet.objects.using('default').count(), 0)
# Both the pet and the person have been deleted from the right database
self.assertEqual(Person.objects.using('other').count(), 0)
self.assertEqual(Pet.objects.using('other').count(), 0)
def test_foreign_key_validation(self):
"ForeignKey.validate() uses the correct database"
mickey = Person.objects.using('other').create(name="Mickey")
pluto = Pet.objects.using('other').create(name="Pluto", owner=mickey)
self.assertIsNone(pluto.full_clean())
def test_o2o_separation(self):
"OneToOne fields are constrained to a single database"
# Create a user and profile on the default database
alice = User.objects.db_manager('default').create_user('alice', 'alice@example.com')
alice_profile = UserProfile.objects.using('default').create(user=alice, flavor='chocolate')
# Create a user and profile on the other database
bob = User.objects.db_manager('other').create_user('bob', 'bob@example.com')
bob_profile = UserProfile.objects.using('other').create(user=bob, flavor='crunchy frog')
# Retrieve related objects; queries should be database constrained
alice = User.objects.using('default').get(username="alice")
self.assertEqual(alice.userprofile.flavor, "chocolate")
bob = User.objects.using('other').get(username="bob")
self.assertEqual(bob.userprofile.flavor, "crunchy frog")
# Check that queries work across joins
self.assertEqual(list(User.objects.using('default').filter(userprofile__flavor='chocolate').values_list('username', flat=True)),
['alice'])
self.assertEqual(list(User.objects.using('other').filter(userprofile__flavor='chocolate').values_list('username', flat=True)),
[])
self.assertEqual(list(User.objects.using('default').filter(userprofile__flavor='crunchy frog').values_list('username', flat=True)),
[])
self.assertEqual(list(User.objects.using('other').filter(userprofile__flavor='crunchy frog').values_list('username', flat=True)),
['bob'])
# Reget the objects to clear caches
alice_profile = UserProfile.objects.using('default').get(flavor='chocolate')
bob_profile = UserProfile.objects.using('other').get(flavor='crunchy frog')
# Retrieve related object by descriptor. Related objects should be database-bound
self.assertEqual(alice_profile.user.username, 'alice')
self.assertEqual(bob_profile.user.username, 'bob')
def test_o2o_cross_database_protection(self):
"Operations that involve sharing FK objects across databases raise an error"
# Create a user and profile on the default database
alice = User.objects.db_manager('default').create_user('alice', 'alice@example.com')
# Create a user and profile on the other database
bob = User.objects.db_manager('other').create_user('bob', 'bob@example.com')
# Set a one-to-one relation with an object from a different database
alice_profile = UserProfile.objects.using('default').create(user=alice, flavor='chocolate')
with self.assertRaises(ValueError):
bob.userprofile = alice_profile
# BUT! if you assign a FK object when the base object hasn't
# been saved yet, you implicitly assign the database for the
# base object.
bob_profile = UserProfile.objects.using('other').create(user=bob, flavor='crunchy frog')
new_bob_profile = UserProfile(flavor="spring surprise")
# assigning a profile requires an explicit pk as the object isn't saved
charlie = User(pk=51, username='charlie', email='charlie@example.com')
charlie.set_unusable_password()
# initially, no db assigned
self.assertEqual(new_bob_profile._state.db, None)
self.assertEqual(charlie._state.db, None)
# old object comes from 'other', so the new object is set to use 'other'...
new_bob_profile.user = bob
charlie.userprofile = bob_profile
self.assertEqual(new_bob_profile._state.db, 'other')
self.assertEqual(charlie._state.db, 'other')
# ... but it isn't saved yet
self.assertEqual(list(User.objects.using('other').values_list('username', flat=True)),
['bob'])
self.assertEqual(list(UserProfile.objects.using('other').values_list('flavor', flat=True)),
['crunchy frog'])
# When saved (no using required), new objects goes to 'other'
charlie.save()
bob_profile.save()
new_bob_profile.save()
self.assertEqual(list(User.objects.using('default').values_list('username', flat=True)),
['alice'])
self.assertEqual(list(User.objects.using('other').values_list('username', flat=True)),
['bob', 'charlie'])
self.assertEqual(list(UserProfile.objects.using('default').values_list('flavor', flat=True)),
['chocolate'])
self.assertEqual(list(UserProfile.objects.using('other').values_list('flavor', flat=True)),
['crunchy frog', 'spring surprise'])
# This also works if you assign the O2O relation in the constructor
denise = User.objects.db_manager('other').create_user('denise', 'denise@example.com')
denise_profile = UserProfile(flavor="tofu", user=denise)
self.assertEqual(denise_profile._state.db, 'other')
# ... but it isn't saved yet
self.assertEqual(list(UserProfile.objects.using('default').values_list('flavor', flat=True)),
['chocolate'])
self.assertEqual(list(UserProfile.objects.using('other').values_list('flavor', flat=True)),
['crunchy frog', 'spring surprise'])
# When saved, the new profile goes to 'other'
denise_profile.save()
self.assertEqual(list(UserProfile.objects.using('default').values_list('flavor', flat=True)),
['chocolate'])
self.assertEqual(list(UserProfile.objects.using('other').values_list('flavor', flat=True)),
['crunchy frog', 'spring surprise', 'tofu'])
def test_generic_key_separation(self):
"Generic fields are constrained to a single database"
# Create a book and author on the default database
pro = Book.objects.create(title="Pro Django",
published=datetime.date(2008, 12, 16))
review1 = Review.objects.create(source="Python Monthly", content_object=pro)
# Create a book and author on the other database
dive = Book.objects.using('other').create(title="Dive into Python",
published=datetime.date(2009, 5, 4))
review2 = Review.objects.using('other').create(source="Python Weekly", content_object=dive)
review1 = Review.objects.using('default').get(source="Python Monthly")
self.assertEqual(review1.content_object.title, "Pro Django")
review2 = Review.objects.using('other').get(source="Python Weekly")
self.assertEqual(review2.content_object.title, "Dive into Python")
# Reget the objects to clear caches
dive = Book.objects.using('other').get(title="Dive into Python")
# Retrieve related object by descriptor. Related objects should be database-bound
self.assertEqual(list(dive.reviews.all().values_list('source', flat=True)),
['Python Weekly'])
def test_generic_key_reverse_operations(self):
"Generic reverse manipulations are all constrained to a single DB"
dive = Book.objects.using('other').create(title="Dive into Python",
published=datetime.date(2009, 5, 4))
temp = Book.objects.using('other').create(title="Temp",
published=datetime.date(2009, 5, 4))
review1 = Review.objects.using('other').create(source="Python Weekly", content_object=dive)
review2 = Review.objects.using('other').create(source="Python Monthly", content_object=temp)
self.assertEqual(list(Review.objects.using('default').filter(object_id=dive.pk).values_list('source', flat=True)),
[])
self.assertEqual(list(Review.objects.using('other').filter(object_id=dive.pk).values_list('source', flat=True)),
['Python Weekly'])
# Add a second review
dive.reviews.add(review2)
self.assertEqual(list(Review.objects.using('default').filter(object_id=dive.pk).values_list('source', flat=True)),
[])
self.assertEqual(list(Review.objects.using('other').filter(object_id=dive.pk).values_list('source', flat=True)),
['Python Monthly', 'Python Weekly'])
# Remove the second author
dive.reviews.remove(review1)
self.assertEqual(list(Review.objects.using('default').filter(object_id=dive.pk).values_list('source', flat=True)),
[])
self.assertEqual(list(Review.objects.using('other').filter(object_id=dive.pk).values_list('source', flat=True)),
['Python Monthly'])
# Clear all reviews
dive.reviews.clear()
self.assertEqual(list(Review.objects.using('default').filter(object_id=dive.pk).values_list('source', flat=True)),
[])
self.assertEqual(list(Review.objects.using('other').filter(object_id=dive.pk).values_list('source', flat=True)),
[])
# Create an author through the generic interface
dive.reviews.create(source='Python Daily')
self.assertEqual(list(Review.objects.using('default').filter(object_id=dive.pk).values_list('source', flat=True)),
[])
self.assertEqual(list(Review.objects.using('other').filter(object_id=dive.pk).values_list('source', flat=True)),
['Python Daily'])
def test_generic_key_cross_database_protection(self):
"Operations that involve sharing generic key objects across databases raise an error"
# Create a book and author on the default database
pro = Book.objects.create(title="Pro Django",
published=datetime.date(2008, 12, 16))
review1 = Review.objects.create(source="Python Monthly", content_object=pro)
# Create a book and author on the other database
dive = Book.objects.using('other').create(title="Dive into Python",
published=datetime.date(2009, 5, 4))
Review.objects.using('other').create(source="Python Weekly", content_object=dive)
# Set a foreign key with an object from a different database
with self.assertRaises(ValueError):
review1.content_object = dive
# Add to a foreign key set with an object from a different database
with self.assertRaises(ValueError):
with transaction.atomic(using='other'):
dive.reviews.add(review1)
# BUT! if you assign a FK object when the base object hasn't
# been saved yet, you implicitly assign the database for the
# base object.
review3 = Review(source="Python Daily")
# initially, no db assigned
self.assertEqual(review3._state.db, None)
# Dive comes from 'other', so review3 is set to use 'other'...
review3.content_object = dive
self.assertEqual(review3._state.db, 'other')
# ... but it isn't saved yet
self.assertEqual(list(Review.objects.using('default').filter(object_id=pro.pk).values_list('source', flat=True)),
['Python Monthly'])
self.assertEqual(list(Review.objects.using('other').filter(object_id=dive.pk).values_list('source', flat=True)),
['Python Weekly'])
# When saved, John goes to 'other'
review3.save()
self.assertEqual(list(Review.objects.using('default').filter(object_id=pro.pk).values_list('source', flat=True)),
['Python Monthly'])
self.assertEqual(list(Review.objects.using('other').filter(object_id=dive.pk).values_list('source', flat=True)),
['Python Daily', 'Python Weekly'])
def test_generic_key_deletion(self):
"Cascaded deletions of Generic Key relations issue queries on the right database"
dive = Book.objects.using('other').create(title="Dive into Python",
published=datetime.date(2009, 5, 4))
Review.objects.using('other').create(source="Python Weekly", content_object=dive)
# Check the initial state
self.assertEqual(Book.objects.using('default').count(), 0)
self.assertEqual(Review.objects.using('default').count(), 0)
self.assertEqual(Book.objects.using('other').count(), 1)
self.assertEqual(Review.objects.using('other').count(), 1)
# Delete the Book object, which will cascade onto the pet
dive.delete(using='other')
self.assertEqual(Book.objects.using('default').count(), 0)
self.assertEqual(Review.objects.using('default').count(), 0)
# Both the pet and the person have been deleted from the right database
self.assertEqual(Book.objects.using('other').count(), 0)
self.assertEqual(Review.objects.using('other').count(), 0)
def test_ordering(self):
"get_next_by_XXX commands stick to a single database"
Book.objects.create(title="Pro Django",
published=datetime.date(2008, 12, 16))
dive = Book.objects.using('other').create(title="Dive into Python",
published=datetime.date(2009, 5, 4))
learn = Book.objects.using('other').create(title="Learning Python",
published=datetime.date(2008, 7, 16))
self.assertEqual(learn.get_next_by_published().title, "Dive into Python")
self.assertEqual(dive.get_previous_by_published().title, "Learning Python")
def test_raw(self):
"test the raw() method across databases"
dive = Book.objects.using('other').create(title="Dive into Python",
published=datetime.date(2009, 5, 4))
val = Book.objects.db_manager("other").raw('SELECT id FROM multiple_database_book')
self.assertQuerysetEqual(val, [dive.pk], attrgetter("pk"))
val = Book.objects.raw('SELECT id FROM multiple_database_book').using('other')
self.assertQuerysetEqual(val, [dive.pk], attrgetter("pk"))
def test_select_related(self):
"Database assignment is retained if an object is retrieved with select_related()"
# Create a book and author on the other database
mark = Person.objects.using('other').create(name="Mark Pilgrim")
Book.objects.using('other').create(title="Dive into Python",
published=datetime.date(2009, 5, 4),
editor=mark)
# Retrieve the Person using select_related()
book = Book.objects.using('other').select_related('editor').get(title="Dive into Python")
# The editor instance should have a db state
self.assertEqual(book.editor._state.db, 'other')
def test_subquery(self):
"""Make sure as_sql works with subqueries and primary/replica."""
sub = Person.objects.using('other').filter(name='fff')
qs = Book.objects.filter(editor__in=sub)
# When you call __str__ on the query object, it doesn't know about using
# so it falls back to the default. If the subquery explicitly uses a
# different database, an error should be raised.
self.assertRaises(ValueError, str, qs.query)
# Evaluating the query shouldn't work, either
with self.assertRaises(ValueError):
for obj in qs:
pass
def test_related_manager(self):
"Related managers return managers, not querysets"
mark = Person.objects.using('other').create(name="Mark Pilgrim")
# extra_arg is removed by the BookManager's implementation of
# create(); but the BookManager's implementation won't get called
# unless edited returns a Manager, not a queryset
mark.book_set.create(title="Dive into Python",
published=datetime.date(2009, 5, 4),
extra_arg=True)
mark.book_set.get_or_create(title="Dive into Python",
published=datetime.date(2009, 5, 4),
extra_arg=True)
mark.edited.create(title="Dive into Water",
published=datetime.date(2009, 5, 4),
extra_arg=True)
mark.edited.get_or_create(title="Dive into Water",
published=datetime.date(2009, 5, 4),
extra_arg=True)
class ConnectionRouterTestCase(SimpleTestCase):
@override_settings(DATABASE_ROUTERS=[
'multiple_database.tests.TestRouter',
'multiple_database.tests.WriteRouter'])
def test_router_init_default(self):
connection_router = ConnectionRouter()
self.assertListEqual([r.__class__.__name__ for r in connection_router.routers],
['TestRouter', 'WriteRouter'])
def test_router_init_arg(self):
connection_router = ConnectionRouter([
'multiple_database.tests.TestRouter',
'multiple_database.tests.WriteRouter'
])
self.assertListEqual([r.__class__.__name__ for r in connection_router.routers],
['TestRouter', 'WriteRouter'])
# Init with instances instead of strings
connection_router = ConnectionRouter([TestRouter(), WriteRouter()])
self.assertListEqual([r.__class__.__name__ for r in connection_router.routers],
['TestRouter', 'WriteRouter'])
# Make the 'other' database appear to be a replica of the 'default'
@override_settings(DATABASE_ROUTERS=[TestRouter()])
class RouterTestCase(TestCase):
multi_db = True
def test_db_selection(self):
"Check that querysets obey the router for db suggestions"
self.assertEqual(Book.objects.db, 'other')
self.assertEqual(Book.objects.all().db, 'other')
self.assertEqual(Book.objects.using('default').db, 'default')
self.assertEqual(Book.objects.db_manager('default').db, 'default')
self.assertEqual(Book.objects.db_manager('default').all().db, 'default')
def test_migrate_selection(self):
"Synchronization behavior is predictable"
self.assertTrue(router.allow_migrate_model('default', User))
self.assertTrue(router.allow_migrate_model('default', Book))
self.assertTrue(router.allow_migrate_model('other', User))
self.assertTrue(router.allow_migrate_model('other', Book))
with override_settings(DATABASE_ROUTERS=[TestRouter(), AuthRouter()]):
# Add the auth router to the chain. TestRouter is a universal
# synchronizer, so it should have no effect.
self.assertTrue(router.allow_migrate_model('default', User))
self.assertTrue(router.allow_migrate_model('default', Book))
self.assertTrue(router.allow_migrate_model('other', User))
self.assertTrue(router.allow_migrate_model('other', Book))
with override_settings(DATABASE_ROUTERS=[AuthRouter(), TestRouter()]):
# Now check what happens if the router order is reversed.
self.assertFalse(router.allow_migrate_model('default', User))
self.assertTrue(router.allow_migrate_model('default', Book))
self.assertTrue(router.allow_migrate_model('other', User))
self.assertTrue(router.allow_migrate_model('other', Book))
def test_migrate_legacy_router(self):
class LegacyRouter(object):
def allow_migrate(self, db, model):
"""
Deprecated allow_migrate signature should trigger
RemovedInDjango110Warning.
"""
assert db == 'default'
assert model is User
return True
with override_settings(DATABASE_ROUTERS=[LegacyRouter()]):
with warnings.catch_warnings(record=True) as recorded:
warnings.filterwarnings('always')
msg = (
"The signature of allow_migrate has changed from "
"allow_migrate(self, db, model) to "
"allow_migrate(self, db, app_label, model_name=None, **hints). "
"Support for the old signature will be removed in Django 1.10."
)
self.assertTrue(router.allow_migrate_model('default', User))
self.assertEqual(force_text(recorded.pop().message), msg)
self.assertEqual(recorded, [])
self.assertTrue(router.allow_migrate('default', 'app_label'))
self.assertEqual(force_text(recorded.pop().message), msg)
def test_partial_router(self):
"A router can choose to implement a subset of methods"
dive = Book.objects.using('other').create(title="Dive into Python",
published=datetime.date(2009, 5, 4))
# First check the baseline behavior.
self.assertEqual(router.db_for_read(User), 'other')
self.assertEqual(router.db_for_read(Book), 'other')
self.assertEqual(router.db_for_write(User), 'default')
self.assertEqual(router.db_for_write(Book), 'default')
self.assertTrue(router.allow_relation(dive, dive))
self.assertTrue(router.allow_migrate_model('default', User))
self.assertTrue(router.allow_migrate_model('default', Book))
with override_settings(DATABASE_ROUTERS=[WriteRouter(), AuthRouter(), TestRouter()]):
self.assertEqual(router.db_for_read(User), 'default')
self.assertEqual(router.db_for_read(Book), 'other')
self.assertEqual(router.db_for_write(User), 'writer')
self.assertEqual(router.db_for_write(Book), 'writer')
self.assertTrue(router.allow_relation(dive, dive))
self.assertFalse(router.allow_migrate_model('default', User))
self.assertTrue(router.allow_migrate_model('default', Book))
def test_database_routing(self):
marty = Person.objects.using('default').create(name="Marty Alchin")
pro = Book.objects.using('default').create(title="Pro Django",
published=datetime.date(2008, 12, 16),
editor=marty)
pro.authors = [marty]
# Create a book and author on the other database
Book.objects.using('other').create(title="Dive into Python",
published=datetime.date(2009, 5, 4))
# An update query will be routed to the default database
Book.objects.filter(title='Pro Django').update(pages=200)
with self.assertRaises(Book.DoesNotExist):
# By default, the get query will be directed to 'other'
Book.objects.get(title='Pro Django')
# But the same query issued explicitly at a database will work.
pro = Book.objects.using('default').get(title='Pro Django')
# Check that the update worked.
self.assertEqual(pro.pages, 200)
# An update query with an explicit using clause will be routed
# to the requested database.
Book.objects.using('other').filter(title='Dive into Python').update(pages=300)
self.assertEqual(Book.objects.get(title='Dive into Python').pages, 300)
# Related object queries stick to the same database
# as the original object, regardless of the router
self.assertEqual(list(pro.authors.values_list('name', flat=True)), ['Marty Alchin'])
self.assertEqual(pro.editor.name, 'Marty Alchin')
# get_or_create is a special case. The get needs to be targeted at
# the write database in order to avoid potential transaction
# consistency problems
book, created = Book.objects.get_or_create(title="Pro Django")
self.assertFalse(created)
book, created = Book.objects.get_or_create(title="Dive Into Python",
defaults={'published': datetime.date(2009, 5, 4)})
self.assertTrue(created)
# Check the head count of objects
self.assertEqual(Book.objects.using('default').count(), 2)
self.assertEqual(Book.objects.using('other').count(), 1)
# If a database isn't specified, the read database is used
self.assertEqual(Book.objects.count(), 1)
# A delete query will also be routed to the default database
Book.objects.filter(pages__gt=150).delete()
# The default database has lost the book.
self.assertEqual(Book.objects.using('default').count(), 1)
self.assertEqual(Book.objects.using('other').count(), 1)
def test_invalid_set_foreign_key_assignment(self):
marty = Person.objects.using('default').create(name="Marty Alchin")
dive = Book.objects.using('other').create(
title="Dive into Python",
published=datetime.date(2009, 5, 4),
)
# Set a foreign key set with an object from a different database
msg = "<Book: Dive into Python> instance isn't saved. Use bulk=False or save the object first."
with self.assertRaisesMessage(ValueError, msg):
marty.edited.set([dive])
def test_foreign_key_cross_database_protection(self):
"Foreign keys can cross databases if they two databases have a common source"
# Create a book and author on the default database
pro = Book.objects.using('default').create(title="Pro Django",
published=datetime.date(2008, 12, 16))
marty = Person.objects.using('default').create(name="Marty Alchin")
# Create a book and author on the other database
dive = Book.objects.using('other').create(title="Dive into Python",
published=datetime.date(2009, 5, 4))
mark = Person.objects.using('other').create(name="Mark Pilgrim")
# Set a foreign key with an object from a different database
try:
dive.editor = marty
except ValueError:
self.fail("Assignment across primary/replica databases with a common source should be ok")
# Database assignments of original objects haven't changed...
self.assertEqual(marty._state.db, 'default')
self.assertEqual(pro._state.db, 'default')
self.assertEqual(dive._state.db, 'other')
self.assertEqual(mark._state.db, 'other')
# ... but they will when the affected object is saved.
dive.save()
self.assertEqual(dive._state.db, 'default')
# ...and the source database now has a copy of any object saved
try:
Book.objects.using('default').get(title='Dive into Python').delete()
except Book.DoesNotExist:
self.fail('Source database should have a copy of saved object')
# This isn't a real primary/replica database, so restore the original from other
dive = Book.objects.using('other').get(title='Dive into Python')
self.assertEqual(dive._state.db, 'other')
# Set a foreign key set with an object from a different database
try:
marty.edited.set([pro, dive], bulk=False)
except ValueError:
self.fail("Assignment across primary/replica databases with a common source should be ok")
# Assignment implies a save, so database assignments of original objects have changed...
self.assertEqual(marty._state.db, 'default')
self.assertEqual(pro._state.db, 'default')
self.assertEqual(dive._state.db, 'default')
self.assertEqual(mark._state.db, 'other')
# ...and the source database now has a copy of any object saved
try:
Book.objects.using('default').get(title='Dive into Python').delete()
except Book.DoesNotExist:
self.fail('Source database should have a copy of saved object')
# This isn't a real primary/replica database, so restore the original from other
dive = Book.objects.using('other').get(title='Dive into Python')
self.assertEqual(dive._state.db, 'other')
# Add to a foreign key set with an object from a different database
try:
marty.edited.add(dive, bulk=False)
except ValueError:
self.fail("Assignment across primary/replica databases with a common source should be ok")
# Add implies a save, so database assignments of original objects have changed...
self.assertEqual(marty._state.db, 'default')
self.assertEqual(pro._state.db, 'default')
self.assertEqual(dive._state.db, 'default')
self.assertEqual(mark._state.db, 'other')
# ...and the source database now has a copy of any object saved
try:
Book.objects.using('default').get(title='Dive into Python').delete()
except Book.DoesNotExist:
self.fail('Source database should have a copy of saved object')
# This isn't a real primary/replica database, so restore the original from other
dive = Book.objects.using('other').get(title='Dive into Python')
# If you assign a FK object when the base object hasn't
# been saved yet, you implicitly assign the database for the
# base object.
chris = Person(name="Chris Mills")
html5 = Book(title="Dive into HTML5", published=datetime.date(2010, 3, 15))
# initially, no db assigned
self.assertEqual(chris._state.db, None)
self.assertEqual(html5._state.db, None)
# old object comes from 'other', so the new object is set to use the
# source of 'other'...
self.assertEqual(dive._state.db, 'other')
chris.save()
dive.editor = chris
html5.editor = mark
self.assertEqual(dive._state.db, 'other')
self.assertEqual(mark._state.db, 'other')
self.assertEqual(chris._state.db, 'default')
self.assertEqual(html5._state.db, 'default')
# This also works if you assign the FK in the constructor
water = Book(title="Dive into Water", published=datetime.date(2001, 1, 1), editor=mark)
self.assertEqual(water._state.db, 'default')
# For the remainder of this test, create a copy of 'mark' in the
# 'default' database to prevent integrity errors on backends that
# don't defer constraints checks until the end of the transaction
mark.save(using='default')
# This moved 'mark' in the 'default' database, move it back in 'other'
mark.save(using='other')
self.assertEqual(mark._state.db, 'other')
# If you create an object through a FK relation, it will be
# written to the write database, even if the original object
# was on the read database
cheesecake = mark.edited.create(title='Dive into Cheesecake', published=datetime.date(2010, 3, 15))
self.assertEqual(cheesecake._state.db, 'default')
# Same goes for get_or_create, regardless of whether getting or creating
cheesecake, created = mark.edited.get_or_create(title='Dive into Cheesecake', published=datetime.date(2010, 3, 15))
self.assertEqual(cheesecake._state.db, 'default')
puddles, created = mark.edited.get_or_create(title='Dive into Puddles', published=datetime.date(2010, 3, 15))
self.assertEqual(puddles._state.db, 'default')
def test_m2m_cross_database_protection(self):
"M2M relations can cross databases if the database share a source"
# Create books and authors on the inverse to the usual database
pro = Book.objects.using('other').create(pk=1, title="Pro Django",
published=datetime.date(2008, 12, 16))
marty = Person.objects.using('other').create(pk=1, name="Marty Alchin")
dive = Book.objects.using('default').create(pk=2, title="Dive into Python",
published=datetime.date(2009, 5, 4))
mark = Person.objects.using('default').create(pk=2, name="Mark Pilgrim")
# Now save back onto the usual database.
# This simulates primary/replica - the objects exist on both database,
# but the _state.db is as it is for all other tests.
pro.save(using='default')
marty.save(using='default')
dive.save(using='other')
mark.save(using='other')
# Check that we have 2 of both types of object on both databases
self.assertEqual(Book.objects.using('default').count(), 2)
self.assertEqual(Book.objects.using('other').count(), 2)
self.assertEqual(Person.objects.using('default').count(), 2)
self.assertEqual(Person.objects.using('other').count(), 2)
# Set a m2m set with an object from a different database
try:
marty.book_set = [pro, dive]
except ValueError:
self.fail("Assignment across primary/replica databases with a common source should be ok")
# Database assignments don't change
self.assertEqual(marty._state.db, 'default')
self.assertEqual(pro._state.db, 'default')
self.assertEqual(dive._state.db, 'other')
self.assertEqual(mark._state.db, 'other')
# All m2m relations should be saved on the default database
self.assertEqual(Book.authors.through.objects.using('default').count(), 2)
self.assertEqual(Book.authors.through.objects.using('other').count(), 0)
# Reset relations
Book.authors.through.objects.using('default').delete()
# Add to an m2m with an object from a different database
try:
marty.book_set.add(dive)
except ValueError:
self.fail("Assignment across primary/replica databases with a common source should be ok")
# Database assignments don't change
self.assertEqual(marty._state.db, 'default')
self.assertEqual(pro._state.db, 'default')
self.assertEqual(dive._state.db, 'other')
self.assertEqual(mark._state.db, 'other')
# All m2m relations should be saved on the default database
self.assertEqual(Book.authors.through.objects.using('default').count(), 1)
self.assertEqual(Book.authors.through.objects.using('other').count(), 0)
# Reset relations
Book.authors.through.objects.using('default').delete()
# Set a reverse m2m with an object from a different database
try:
dive.authors = [mark, marty]
except ValueError:
self.fail("Assignment across primary/replica databases with a common source should be ok")
# Database assignments don't change
self.assertEqual(marty._state.db, 'default')
self.assertEqual(pro._state.db, 'default')
self.assertEqual(dive._state.db, 'other')
self.assertEqual(mark._state.db, 'other')
# All m2m relations should be saved on the default database
self.assertEqual(Book.authors.through.objects.using('default').count(), 2)
self.assertEqual(Book.authors.through.objects.using('other').count(), 0)
# Reset relations
Book.authors.through.objects.using('default').delete()
self.assertEqual(Book.authors.through.objects.using('default').count(), 0)
self.assertEqual(Book.authors.through.objects.using('other').count(), 0)
# Add to a reverse m2m with an object from a different database
try:
dive.authors.add(marty)
except ValueError:
self.fail("Assignment across primary/replica databases with a common source should be ok")
# Database assignments don't change
self.assertEqual(marty._state.db, 'default')
self.assertEqual(pro._state.db, 'default')
self.assertEqual(dive._state.db, 'other')
self.assertEqual(mark._state.db, 'other')
# All m2m relations should be saved on the default database
self.assertEqual(Book.authors.through.objects.using('default').count(), 1)
self.assertEqual(Book.authors.through.objects.using('other').count(), 0)
# If you create an object through a M2M relation, it will be
# written to the write database, even if the original object
# was on the read database
alice = dive.authors.create(name='Alice')
self.assertEqual(alice._state.db, 'default')
# Same goes for get_or_create, regardless of whether getting or creating
alice, created = dive.authors.get_or_create(name='Alice')
self.assertEqual(alice._state.db, 'default')
bob, created = dive.authors.get_or_create(name='Bob')
self.assertEqual(bob._state.db, 'default')
def test_o2o_cross_database_protection(self):
"Operations that involve sharing FK objects across databases raise an error"
# Create a user and profile on the default database
alice = User.objects.db_manager('default').create_user('alice', 'alice@example.com')
# Create a user and profile on the other database
bob = User.objects.db_manager('other').create_user('bob', 'bob@example.com')
# Set a one-to-one relation with an object from a different database
alice_profile = UserProfile.objects.create(user=alice, flavor='chocolate')
try:
bob.userprofile = alice_profile
except ValueError:
self.fail("Assignment across primary/replica databases with a common source should be ok")
# Database assignments of original objects haven't changed...
self.assertEqual(alice._state.db, 'default')
self.assertEqual(alice_profile._state.db, 'default')
self.assertEqual(bob._state.db, 'other')
# ... but they will when the affected object is saved.
bob.save()
self.assertEqual(bob._state.db, 'default')
def test_generic_key_cross_database_protection(self):
"Generic Key operations can span databases if they share a source"
# Create a book and author on the default database
pro = Book.objects.using(
'default').create(title="Pro Django", published=datetime.date(2008, 12, 16))
review1 = Review.objects.using(
'default').create(source="Python Monthly", content_object=pro)
# Create a book and author on the other database
dive = Book.objects.using(
'other').create(title="Dive into Python", published=datetime.date(2009, 5, 4))
review2 = Review.objects.using(
'other').create(source="Python Weekly", content_object=dive)
# Set a generic foreign key with an object from a different database
try:
review1.content_object = dive
except ValueError:
self.fail("Assignment across primary/replica databases with a common source should be ok")
# Database assignments of original objects haven't changed...
self.assertEqual(pro._state.db, 'default')
self.assertEqual(review1._state.db, 'default')
self.assertEqual(dive._state.db, 'other')
self.assertEqual(review2._state.db, 'other')
# ... but they will when the affected object is saved.
dive.save()
self.assertEqual(review1._state.db, 'default')
self.assertEqual(dive._state.db, 'default')
# ...and the source database now has a copy of any object saved
try:
Book.objects.using('default').get(title='Dive into Python').delete()
except Book.DoesNotExist:
self.fail('Source database should have a copy of saved object')
# This isn't a real primary/replica database, so restore the original from other
dive = Book.objects.using('other').get(title='Dive into Python')
self.assertEqual(dive._state.db, 'other')
# Add to a generic foreign key set with an object from a different database
try:
dive.reviews.add(review1)
except ValueError:
self.fail("Assignment across primary/replica databases with a common source should be ok")
# Database assignments of original objects haven't changed...
self.assertEqual(pro._state.db, 'default')
self.assertEqual(review1._state.db, 'default')
self.assertEqual(dive._state.db, 'other')
self.assertEqual(review2._state.db, 'other')
# ... but they will when the affected object is saved.
dive.save()
self.assertEqual(dive._state.db, 'default')
# ...and the source database now has a copy of any object saved
try:
Book.objects.using('default').get(title='Dive into Python').delete()
except Book.DoesNotExist:
self.fail('Source database should have a copy of saved object')
# BUT! if you assign a FK object when the base object hasn't
# been saved yet, you implicitly assign the database for the
# base object.
review3 = Review(source="Python Daily")
# initially, no db assigned
self.assertEqual(review3._state.db, None)
# Dive comes from 'other', so review3 is set to use the source of 'other'...
review3.content_object = dive
self.assertEqual(review3._state.db, 'default')
# If you create an object through a M2M relation, it will be
# written to the write database, even if the original object
# was on the read database
dive = Book.objects.using('other').get(title='Dive into Python')
nyt = dive.reviews.create(source="New York Times", content_object=dive)
self.assertEqual(nyt._state.db, 'default')
def test_m2m_managers(self):
"M2M relations are represented by managers, and can be controlled like managers"
pro = Book.objects.using('other').create(pk=1, title="Pro Django",
published=datetime.date(2008, 12, 16))
marty = Person.objects.using('other').create(pk=1, name="Marty Alchin")
self.assertEqual(pro.authors.db, 'other')
self.assertEqual(pro.authors.db_manager('default').db, 'default')
self.assertEqual(pro.authors.db_manager('default').all().db, 'default')
self.assertEqual(marty.book_set.db, 'other')
self.assertEqual(marty.book_set.db_manager('default').db, 'default')
self.assertEqual(marty.book_set.db_manager('default').all().db, 'default')
def test_foreign_key_managers(self):
"FK reverse relations are represented by managers, and can be controlled like managers"
marty = Person.objects.using('other').create(pk=1, name="Marty Alchin")
Book.objects.using('other').create(pk=1, title="Pro Django",
published=datetime.date(2008, 12, 16),
editor=marty)
self.assertEqual(marty.edited.db, 'other')
self.assertEqual(marty.edited.db_manager('default').db, 'default')
self.assertEqual(marty.edited.db_manager('default').all().db, 'default')
def test_generic_key_managers(self):
"Generic key relations are represented by managers, and can be controlled like managers"
pro = Book.objects.using('other').create(title="Pro Django",
published=datetime.date(2008, 12, 16))
Review.objects.using('other').create(source="Python Monthly",
content_object=pro)
self.assertEqual(pro.reviews.db, 'other')
self.assertEqual(pro.reviews.db_manager('default').db, 'default')
self.assertEqual(pro.reviews.db_manager('default').all().db, 'default')
def test_subquery(self):
"""Make sure as_sql works with subqueries and primary/replica."""
# Create a book and author on the other database
mark = Person.objects.using('other').create(name="Mark Pilgrim")
Book.objects.using('other').create(title="Dive into Python",
published=datetime.date(2009, 5, 4),
editor=mark)
sub = Person.objects.filter(name='Mark Pilgrim')
qs = Book.objects.filter(editor__in=sub)
# When you call __str__ on the query object, it doesn't know about using
# so it falls back to the default. Don't let routing instructions
# force the subquery to an incompatible database.
str(qs.query)
# If you evaluate the query, it should work, running on 'other'
self.assertEqual(list(qs.values_list('title', flat=True)), ['Dive into Python'])
def test_deferred_models(self):
mark_def = Person.objects.using('default').create(name="Mark Pilgrim")
mark_other = Person.objects.using('other').create(name="Mark Pilgrim")
orig_b = Book.objects.using('other').create(title="Dive into Python",
published=datetime.date(2009, 5, 4),
editor=mark_other)
b = Book.objects.using('other').only('title').get(pk=orig_b.pk)
self.assertEqual(b.published, datetime.date(2009, 5, 4))
b = Book.objects.using('other').only('title').get(pk=orig_b.pk)
b.editor = mark_def
b.save(using='default')
self.assertEqual(Book.objects.using('default').get(pk=b.pk).published,
datetime.date(2009, 5, 4))
@override_settings(DATABASE_ROUTERS=[AuthRouter()])
class AuthTestCase(TestCase):
multi_db = True
def test_auth_manager(self):
"The methods on the auth manager obey database hints"
# Create one user using default allocation policy
User.objects.create_user('alice', 'alice@example.com')
# Create another user, explicitly specifying the database
User.objects.db_manager('default').create_user('bob', 'bob@example.com')
# The second user only exists on the other database
alice = User.objects.using('other').get(username='alice')
self.assertEqual(alice.username, 'alice')
self.assertEqual(alice._state.db, 'other')
self.assertRaises(User.DoesNotExist, User.objects.using('default').get, username='alice')
# The second user only exists on the default database
bob = User.objects.using('default').get(username='bob')
self.assertEqual(bob.username, 'bob')
self.assertEqual(bob._state.db, 'default')
self.assertRaises(User.DoesNotExist, User.objects.using('other').get, username='bob')
# That is... there is one user on each database
self.assertEqual(User.objects.using('default').count(), 1)
self.assertEqual(User.objects.using('other').count(), 1)
def test_dumpdata(self):
"Check that dumpdata honors allow_migrate restrictions on the router"
User.objects.create_user('alice', 'alice@example.com')
User.objects.db_manager('default').create_user('bob', 'bob@example.com')
# Check that dumping the default database doesn't try to include auth
# because allow_migrate prohibits auth on default
new_io = StringIO()
management.call_command('dumpdata', 'auth', format='json', database='default', stdout=new_io)
command_output = new_io.getvalue().strip()
self.assertEqual(command_output, '[]')
# Check that dumping the other database does include auth
new_io = StringIO()
management.call_command('dumpdata', 'auth', format='json', database='other', stdout=new_io)
command_output = new_io.getvalue().strip()
self.assertIn('"email": "alice@example.com"', command_output)
class AntiPetRouter(object):
# A router that only expresses an opinion on migrate,
# passing pets to the 'other' database
def allow_migrate(self, db, app_label, model_name=None, **hints):
if db == 'other':
return model_name == 'pet'
else:
return model_name != 'pet'
class FixtureTestCase(TestCase):
multi_db = True
fixtures = ['multidb-common', 'multidb']
@override_settings(DATABASE_ROUTERS=[AntiPetRouter()])
def test_fixture_loading(self):
"Multi-db fixtures are loaded correctly"
# Check that "Pro Django" exists on the default database, but not on other database
try:
Book.objects.get(title="Pro Django")
Book.objects.using('default').get(title="Pro Django")
except Book.DoesNotExist:
self.fail('"Pro Django" should exist on default database')
self.assertRaises(
Book.DoesNotExist,
Book.objects.using('other').get,
title="Pro Django"
)
# Check that "Dive into Python" exists on the default database, but not on other database
try:
Book.objects.using('other').get(title="Dive into Python")
except Book.DoesNotExist:
self.fail('"Dive into Python" should exist on other database')
self.assertRaises(
Book.DoesNotExist,
Book.objects.get,
title="Dive into Python"
)
self.assertRaises(
Book.DoesNotExist,
Book.objects.using('default').get,
title="Dive into Python"
)
# Check that "Definitive Guide" exists on the both databases
try:
Book.objects.get(title="The Definitive Guide to Django")
Book.objects.using('default').get(title="The Definitive Guide to Django")
Book.objects.using('other').get(title="The Definitive Guide to Django")
except Book.DoesNotExist:
self.fail('"The Definitive Guide to Django" should exist on both databases')
@override_settings(DATABASE_ROUTERS=[AntiPetRouter()])
def test_pseudo_empty_fixtures(self):
"A fixture can contain entries, but lead to nothing in the database; this shouldn't raise an error (ref #14068)"
new_io = StringIO()
management.call_command('loaddata', 'pets', stdout=new_io, stderr=new_io)
command_output = new_io.getvalue().strip()
# No objects will actually be loaded
self.assertEqual(command_output, "Installed 0 object(s) (of 2) from 1 fixture(s)")
class PickleQuerySetTestCase(TestCase):
multi_db = True
def test_pickling(self):
for db in connections:
Book.objects.using(db).create(title='Dive into Python', published=datetime.date(2009, 5, 4))
qs = Book.objects.all()
self.assertEqual(qs.db, pickle.loads(pickle.dumps(qs)).db)
class DatabaseReceiver(object):
"""
Used in the tests for the database argument in signals (#13552)
"""
def __call__(self, signal, sender, **kwargs):
self._database = kwargs['using']
class WriteToOtherRouter(object):
"""
A router that sends all writes to the other database.
"""
def db_for_write(self, model, **hints):
return "other"
class SignalTests(TestCase):
multi_db = True
def override_router(self):
return override_settings(DATABASE_ROUTERS=[WriteToOtherRouter()])
def test_database_arg_save_and_delete(self):
"""
Tests that the pre/post_save signal contains the correct database.
(#13552)
"""
# Make some signal receivers
pre_save_receiver = DatabaseReceiver()
post_save_receiver = DatabaseReceiver()
pre_delete_receiver = DatabaseReceiver()
post_delete_receiver = DatabaseReceiver()
# Make model and connect receivers
signals.pre_save.connect(sender=Person, receiver=pre_save_receiver)
signals.post_save.connect(sender=Person, receiver=post_save_receiver)
signals.pre_delete.connect(sender=Person, receiver=pre_delete_receiver)
signals.post_delete.connect(sender=Person, receiver=post_delete_receiver)
p = Person.objects.create(name='Darth Vader')
# Save and test receivers got calls
p.save()
self.assertEqual(pre_save_receiver._database, DEFAULT_DB_ALIAS)
self.assertEqual(post_save_receiver._database, DEFAULT_DB_ALIAS)
# Delete, and test
p.delete()
self.assertEqual(pre_delete_receiver._database, DEFAULT_DB_ALIAS)
self.assertEqual(post_delete_receiver._database, DEFAULT_DB_ALIAS)
# Save again to a different database
p.save(using="other")
self.assertEqual(pre_save_receiver._database, "other")
self.assertEqual(post_save_receiver._database, "other")
# Delete, and test
p.delete(using="other")
self.assertEqual(pre_delete_receiver._database, "other")
self.assertEqual(post_delete_receiver._database, "other")
signals.pre_save.disconnect(sender=Person, receiver=pre_save_receiver)
signals.post_save.disconnect(sender=Person, receiver=post_save_receiver)
signals.pre_delete.disconnect(sender=Person, receiver=pre_delete_receiver)
signals.post_delete.disconnect(sender=Person, receiver=post_delete_receiver)
def test_database_arg_m2m(self):
"""
Test that the m2m_changed signal has a correct database arg (#13552)
"""
# Make a receiver
receiver = DatabaseReceiver()
# Connect it
signals.m2m_changed.connect(receiver=receiver)
# Create the models that will be used for the tests
b = Book.objects.create(title="Pro Django",
published=datetime.date(2008, 12, 16))
p = Person.objects.create(name="Marty Alchin")
# Create a copy of the models on the 'other' database to prevent
# integrity errors on backends that don't defer constraints checks
Book.objects.using('other').create(pk=b.pk, title=b.title,
published=b.published)
Person.objects.using('other').create(pk=p.pk, name=p.name)
# Test addition
b.authors.add(p)
self.assertEqual(receiver._database, DEFAULT_DB_ALIAS)
with self.override_router():
b.authors.add(p)
self.assertEqual(receiver._database, "other")
# Test removal
b.authors.remove(p)
self.assertEqual(receiver._database, DEFAULT_DB_ALIAS)
with self.override_router():
b.authors.remove(p)
self.assertEqual(receiver._database, "other")
# Test addition in reverse
p.book_set.add(b)
self.assertEqual(receiver._database, DEFAULT_DB_ALIAS)
with self.override_router():
p.book_set.add(b)
self.assertEqual(receiver._database, "other")
# Test clearing
b.authors.clear()
self.assertEqual(receiver._database, DEFAULT_DB_ALIAS)
with self.override_router():
b.authors.clear()
self.assertEqual(receiver._database, "other")
class AttributeErrorRouter(object):
"A router to test the exception handling of ConnectionRouter"
def db_for_read(self, model, **hints):
raise AttributeError
def db_for_write(self, model, **hints):
raise AttributeError
class RouterAttributeErrorTestCase(TestCase):
multi_db = True
def override_router(self):
return override_settings(DATABASE_ROUTERS=[AttributeErrorRouter()])
def test_attribute_error_read(self):
"Check that the AttributeError from AttributeErrorRouter bubbles up"
b = Book.objects.create(title="Pro Django",
published=datetime.date(2008, 12, 16))
with self.override_router():
self.assertRaises(AttributeError, Book.objects.get, pk=b.pk)
def test_attribute_error_save(self):
"Check that the AttributeError from AttributeErrorRouter bubbles up"
dive = Book()
dive.title = "Dive into Python"
dive.published = datetime.date(2009, 5, 4)
with self.override_router():
self.assertRaises(AttributeError, dive.save)
def test_attribute_error_delete(self):
"Check that the AttributeError from AttributeErrorRouter bubbles up"
b = Book.objects.create(title="Pro Django",
published=datetime.date(2008, 12, 16))
p = Person.objects.create(name="Marty Alchin")
b.authors = [p]
b.editor = p
with self.override_router():
self.assertRaises(AttributeError, b.delete)
def test_attribute_error_m2m(self):
"Check that the AttributeError from AttributeErrorRouter bubbles up"
b = Book.objects.create(title="Pro Django",
published=datetime.date(2008, 12, 16))
p = Person.objects.create(name="Marty Alchin")
with self.override_router():
self.assertRaises(AttributeError, setattr, b, 'authors', [p])
class ModelMetaRouter(object):
"A router to ensure model arguments are real model classes"
def db_for_write(self, model, **hints):
if not hasattr(model, '_meta'):
raise ValueError
@override_settings(DATABASE_ROUTERS=[ModelMetaRouter()])
class RouterModelArgumentTestCase(TestCase):
multi_db = True
def test_m2m_collection(self):
b = Book.objects.create(title="Pro Django",
published=datetime.date(2008, 12, 16))
p = Person.objects.create(name="Marty Alchin")
# test add
b.authors.add(p)
# test remove
b.authors.remove(p)
# test clear
b.authors.clear()
# test setattr
b.authors = [p]
# test M2M collection
b.delete()
def test_foreignkey_collection(self):
person = Person.objects.create(name='Bob')
Pet.objects.create(owner=person, name='Wart')
# test related FK collection
person.delete()
class SyncOnlyDefaultDatabaseRouter(object):
def allow_migrate(self, db, app_label, **hints):
return db == DEFAULT_DB_ALIAS
class MigrateTestCase(TestCase):
available_apps = [
'multiple_database',
'django.contrib.auth',
'django.contrib.contenttypes'
]
multi_db = True
def test_migrate_to_other_database(self):
"""Regression test for #16039: migrate with --database option."""
cts = ContentType.objects.using('other').filter(app_label='multiple_database')
count = cts.count()
self.assertGreater(count, 0)
cts.delete()
management.call_command('migrate', verbosity=0, interactive=False, database='other')
self.assertEqual(cts.count(), count)
def test_migrate_to_other_database_with_router(self):
"""Regression test for #16039: migrate with --database option."""
cts = ContentType.objects.using('other').filter(app_label='multiple_database')
cts.delete()
with override_settings(DATABASE_ROUTERS=[SyncOnlyDefaultDatabaseRouter()]):
management.call_command('migrate', verbosity=0, interactive=False, database='other')
self.assertEqual(cts.count(), 0)
class RouterUsed(Exception):
WRITE = 'write'
def __init__(self, mode, model, hints):
self.mode = mode
self.model = model
self.hints = hints
class RouteForWriteTestCase(TestCase):
multi_db = True
class WriteCheckRouter(object):
def db_for_write(self, model, **hints):
raise RouterUsed(mode=RouterUsed.WRITE, model=model, hints=hints)
def override_router(self):
return override_settings(DATABASE_ROUTERS=[RouteForWriteTestCase.WriteCheckRouter()])
def test_fk_delete(self):
owner = Person.objects.create(name='Someone')
pet = Pet.objects.create(name='fido', owner=owner)
try:
with self.override_router():
pet.owner.delete()
self.fail('db_for_write() not invoked on router')
except RouterUsed as e:
self.assertEqual(e.mode, RouterUsed.WRITE)
self.assertEqual(e.model, Person)
self.assertEqual(e.hints, {'instance': owner})
def test_reverse_fk_delete(self):
owner = Person.objects.create(name='Someone')
to_del_qs = owner.pet_set.all()
try:
with self.override_router():
to_del_qs.delete()
self.fail('db_for_write() not invoked on router')
except RouterUsed as e:
self.assertEqual(e.mode, RouterUsed.WRITE)
self.assertEqual(e.model, Pet)
self.assertEqual(e.hints, {'instance': owner})
def test_reverse_fk_get_or_create(self):
owner = Person.objects.create(name='Someone')
try:
with self.override_router():
owner.pet_set.get_or_create(name='fido')
self.fail('db_for_write() not invoked on router')
except RouterUsed as e:
self.assertEqual(e.mode, RouterUsed.WRITE)
self.assertEqual(e.model, Pet)
self.assertEqual(e.hints, {'instance': owner})
def test_reverse_fk_update(self):
owner = Person.objects.create(name='Someone')
Pet.objects.create(name='fido', owner=owner)
try:
with self.override_router():
owner.pet_set.update(name='max')
self.fail('db_for_write() not invoked on router')
except RouterUsed as e:
self.assertEqual(e.mode, RouterUsed.WRITE)
self.assertEqual(e.model, Pet)
self.assertEqual(e.hints, {'instance': owner})
def test_m2m_add(self):
auth = Person.objects.create(name='Someone')
book = Book.objects.create(title="Pro Django",
published=datetime.date(2008, 12, 16))
try:
with self.override_router():
book.authors.add(auth)
self.fail('db_for_write() not invoked on router')
except RouterUsed as e:
self.assertEqual(e.mode, RouterUsed.WRITE)
self.assertEqual(e.model, Book.authors.through)
self.assertEqual(e.hints, {'instance': book})
def test_m2m_clear(self):
auth = Person.objects.create(name='Someone')
book = Book.objects.create(title="Pro Django",
published=datetime.date(2008, 12, 16))
book.authors.add(auth)
try:
with self.override_router():
book.authors.clear()
self.fail('db_for_write() not invoked on router')
except RouterUsed as e:
self.assertEqual(e.mode, RouterUsed.WRITE)
self.assertEqual(e.model, Book.authors.through)
self.assertEqual(e.hints, {'instance': book})
def test_m2m_delete(self):
auth = Person.objects.create(name='Someone')
book = Book.objects.create(title="Pro Django",
published=datetime.date(2008, 12, 16))
book.authors.add(auth)
try:
with self.override_router():
book.authors.all().delete()
self.fail('db_for_write() not invoked on router')
except RouterUsed as e:
self.assertEqual(e.mode, RouterUsed.WRITE)
self.assertEqual(e.model, Person)
self.assertEqual(e.hints, {'instance': book})
def test_m2m_get_or_create(self):
Person.objects.create(name='Someone')
book = Book.objects.create(title="Pro Django",
published=datetime.date(2008, 12, 16))
try:
with self.override_router():
book.authors.get_or_create(name='Someone else')
self.fail('db_for_write() not invoked on router')
except RouterUsed as e:
self.assertEqual(e.mode, RouterUsed.WRITE)
self.assertEqual(e.model, Book)
self.assertEqual(e.hints, {'instance': book})
def test_m2m_remove(self):
auth = Person.objects.create(name='Someone')
book = Book.objects.create(title="Pro Django",
published=datetime.date(2008, 12, 16))
book.authors.add(auth)
try:
with self.override_router():
book.authors.remove(auth)
self.fail('db_for_write() not invoked on router')
except RouterUsed as e:
self.assertEqual(e.mode, RouterUsed.WRITE)
self.assertEqual(e.model, Book.authors.through)
self.assertEqual(e.hints, {'instance': book})
def test_m2m_update(self):
auth = Person.objects.create(name='Someone')
book = Book.objects.create(title="Pro Django",
published=datetime.date(2008, 12, 16))
book.authors.add(auth)
try:
with self.override_router():
book.authors.all().update(name='Different')
self.fail('db_for_write() not invoked on router')
except RouterUsed as e:
self.assertEqual(e.mode, RouterUsed.WRITE)
self.assertEqual(e.model, Person)
self.assertEqual(e.hints, {'instance': book})
def test_reverse_m2m_add(self):
auth = Person.objects.create(name='Someone')
book = Book.objects.create(title="Pro Django",
published=datetime.date(2008, 12, 16))
try:
with self.override_router():
auth.book_set.add(book)
self.fail('db_for_write() not invoked on router')
except RouterUsed as e:
self.assertEqual(e.mode, RouterUsed.WRITE)
self.assertEqual(e.model, Book.authors.through)
self.assertEqual(e.hints, {'instance': auth})
def test_reverse_m2m_clear(self):
auth = Person.objects.create(name='Someone')
book = Book.objects.create(title="Pro Django",
published=datetime.date(2008, 12, 16))
book.authors.add(auth)
try:
with self.override_router():
auth.book_set.clear()
self.fail('db_for_write() not invoked on router')
except RouterUsed as e:
self.assertEqual(e.mode, RouterUsed.WRITE)
self.assertEqual(e.model, Book.authors.through)
self.assertEqual(e.hints, {'instance': auth})
def test_reverse_m2m_delete(self):
auth = Person.objects.create(name='Someone')
book = Book.objects.create(title="Pro Django",
published=datetime.date(2008, 12, 16))
book.authors.add(auth)
try:
with self.override_router():
auth.book_set.all().delete()
self.fail('db_for_write() not invoked on router')
except RouterUsed as e:
self.assertEqual(e.mode, RouterUsed.WRITE)
self.assertEqual(e.model, Book)
self.assertEqual(e.hints, {'instance': auth})
def test_reverse_m2m_get_or_create(self):
auth = Person.objects.create(name='Someone')
Book.objects.create(title="Pro Django",
published=datetime.date(2008, 12, 16))
try:
with self.override_router():
auth.book_set.get_or_create(title="New Book", published=datetime.datetime.now())
self.fail('db_for_write() not invoked on router')
except RouterUsed as e:
self.assertEqual(e.mode, RouterUsed.WRITE)
self.assertEqual(e.model, Person)
self.assertEqual(e.hints, {'instance': auth})
def test_reverse_m2m_remove(self):
auth = Person.objects.create(name='Someone')
book = Book.objects.create(title="Pro Django",
published=datetime.date(2008, 12, 16))
book.authors.add(auth)
try:
with self.override_router():
auth.book_set.remove(book)
self.fail('db_for_write() not invoked on router')
except RouterUsed as e:
self.assertEqual(e.mode, RouterUsed.WRITE)
self.assertEqual(e.model, Book.authors.through)
self.assertEqual(e.hints, {'instance': auth})
def test_reverse_m2m_update(self):
auth = Person.objects.create(name='Someone')
book = Book.objects.create(title="Pro Django",
published=datetime.date(2008, 12, 16))
book.authors.add(auth)
try:
with self.override_router():
auth.book_set.all().update(title='Different')
self.fail('db_for_write() not invoked on router')
except RouterUsed as e:
self.assertEqual(e.mode, RouterUsed.WRITE)
self.assertEqual(e.model, Book)
self.assertEqual(e.hints, {'instance': auth})
| bsd-3-clause |
decvalts/cartopy | versioneer.py | 1 | 72608 |
# Version: 0.15+dev
"""The Versioneer - like a rocketeer, but for versions.
The Versioneer
==============
* like a rocketeer, but for versions!
* https://github.com/warner/python-versioneer
* Brian Warner
* License: Public Domain
* Compatible With: python2.6, 2.7, 3.2, 3.3, 3.4, and pypy
* [![Latest Version]
(https://pypip.in/version/versioneer/badge.svg?style=flat)
](https://pypi.python.org/pypi/versioneer/)
* [![Build Status]
(https://travis-ci.org/warner/python-versioneer.png?branch=master)
](https://travis-ci.org/warner/python-versioneer)
This is a tool for managing a recorded version number in distutils-based
python projects. The goal is to remove the tedious and error-prone "update
the embedded version string" step from your release process. Making a new
release should be as easy as recording a new tag in your version-control
system, and maybe making new tarballs.
## Quick Install
* `pip install versioneer` to somewhere to your $PATH
* add a `[versioneer]` section to your setup.cfg (see below)
* run `versioneer install` in your source tree, commit the results
## Version Identifiers
Source trees come from a variety of places:
* a version-control system checkout (mostly used by developers)
* a nightly tarball, produced by build automation
* a snapshot tarball, produced by a web-based VCS browser, like github's
"tarball from tag" feature
* a release tarball, produced by "setup.py sdist", distributed through PyPI
Within each source tree, the version identifier (either a string or a number,
this tool is format-agnostic) can come from a variety of places:
* ask the VCS tool itself, e.g. "git describe" (for checkouts), which knows
about recent "tags" and an absolute revision-id
* the name of the directory into which the tarball was unpacked
* an expanded VCS keyword ($Id$, etc)
* a `_version.py` created by some earlier build step
For released software, the version identifier is closely related to a VCS
tag. Some projects use tag names that include more than just the version
string (e.g. "myproject-1.2" instead of just "1.2"), in which case the tool
needs to strip the tag prefix to extract the version identifier. For
unreleased software (between tags), the version identifier should provide
enough information to help developers recreate the same tree, while also
giving them an idea of roughly how old the tree is (after version 1.2, before
version 1.3). Many VCS systems can report a description that captures this,
for example `git describe --tags --dirty --always` reports things like
"0.7-1-g574ab98-dirty" to indicate that the checkout is one revision past the
0.7 tag, has a unique revision id of "574ab98", and is "dirty" (it has
uncommitted changes.
The version identifier is used for multiple purposes:
* to allow the module to self-identify its version: `myproject.__version__`
* to choose a name and prefix for a 'setup.py sdist' tarball
## Theory of Operation
Versioneer works by adding a special `_version.py` file into your source
tree, where your `__init__.py` can import it. This `_version.py` knows how to
dynamically ask the VCS tool for version information at import time.
`_version.py` also contains `$Revision$` markers, and the installation
process marks `_version.py` to have this marker rewritten with a tag name
during the `git archive` command. As a result, generated tarballs will
contain enough information to get the proper version.
To allow `setup.py` to compute a version too, a `versioneer.py` is added to
the top level of your source tree, next to `setup.py` and the `setup.cfg`
that configures it. This overrides several distutils/setuptools commands to
compute the version when invoked, and changes `setup.py build` and `setup.py
sdist` to replace `_version.py` with a small static file that contains just
the generated version data.
## Installation
First, decide on values for the following configuration variables:
* `VCS`: the version control system you use. Currently accepts "git".
* `style`: the style of version string to be produced. See "Styles" below for
details. Defaults to "pep440", which looks like
`TAG[+DISTANCE.gSHORTHASH[.dirty]]`.
* `versionfile_source`:
A project-relative pathname into which the generated version strings should
be written. This is usually a `_version.py` next to your project's main
`__init__.py` file, so it can be imported at runtime. If your project uses
`src/myproject/__init__.py`, this should be `src/myproject/_version.py`.
This file should be checked in to your VCS as usual: the copy created below
by `setup.py setup_versioneer` will include code that parses expanded VCS
keywords in generated tarballs. The 'build' and 'sdist' commands will
replace it with a copy that has just the calculated version string.
This must be set even if your project does not have any modules (and will
therefore never import `_version.py`), since "setup.py sdist" -based trees
still need somewhere to record the pre-calculated version strings. Anywhere
in the source tree should do. If there is a `__init__.py` next to your
`_version.py`, the `setup.py setup_versioneer` command (described below)
will append some `__version__`-setting assignments, if they aren't already
present.
* `versionfile_build`:
Like `versionfile_source`, but relative to the build directory instead of
the source directory. These will differ when your setup.py uses
'package_dir='. If you have `package_dir={'myproject': 'src/myproject'}`,
then you will probably have `versionfile_build='myproject/_version.py'` and
`versionfile_source='src/myproject/_version.py'`.
If this is set to None, then `setup.py build` will not attempt to rewrite
any `_version.py` in the built tree. If your project does not have any
libraries (e.g. if it only builds a script), then you should use
`versionfile_build = None`. To actually use the computed version string,
your `setup.py` will need to override `distutils.command.build_scripts`
with a subclass that explicitly inserts a copy of
`versioneer.get_version()` into your script file. See
`test/demoapp-script-only/setup.py` for an example.
* `tag_prefix`:
a string, like 'PROJECTNAME-', which appears at the start of all VCS tags.
If your tags look like 'myproject-1.2.0', then you should use
tag_prefix='myproject-'. If you use unprefixed tags like '1.2.0', this
should be an empty string, using either `tag_prefix=` or `tag_prefix=''`.
* `parentdir_prefix`:
a optional string, frequently the same as tag_prefix, which appears at the
start of all unpacked tarball filenames. If your tarball unpacks into
'myproject-1.2.0', this should be 'myproject-'. To disable this feature,
just omit the field from your `setup.cfg`.
This tool provides one script, named `versioneer`. That script has one mode,
"install", which writes a copy of `versioneer.py` into the current directory
and runs `versioneer.py setup` to finish the installation.
To versioneer-enable your project:
* 1: Modify your `setup.cfg`, adding a section named `[versioneer]` and
populating it with the configuration values you decided earlier (note that
the option names are not case-sensitive):
````
[versioneer]
VCS = git
style = pep440
versionfile_source = src/myproject/_version.py
versionfile_build = myproject/_version.py
tag_prefix =
parentdir_prefix = myproject-
````
* 2: Run `versioneer install`. This will do the following:
* copy `versioneer.py` into the top of your source tree
* create `_version.py` in the right place (`versionfile_source`)
* modify your `__init__.py` (if one exists next to `_version.py`) to define
`__version__` (by calling a function from `_version.py`)
* modify your `MANIFEST.in` to include both `versioneer.py` and the
generated `_version.py` in sdist tarballs
`versioneer install` will complain about any problems it finds with your
`setup.py` or `setup.cfg`. Run it multiple times until you have fixed all
the problems.
* 3: add a `import versioneer` to your setup.py, and add the following
arguments to the setup() call:
version=versioneer.get_version(),
cmdclass=versioneer.get_cmdclass(),
* 4: commit these changes to your VCS. To make sure you won't forget,
`versioneer install` will mark everything it touched for addition using
`git add`. Don't forget to add `setup.py` and `setup.cfg` too.
## Post-Installation Usage
Once established, all uses of your tree from a VCS checkout should get the
current version string. All generated tarballs should include an embedded
version string (so users who unpack them will not need a VCS tool installed).
If you distribute your project through PyPI, then the release process should
boil down to two steps:
* 1: git tag 1.0
* 2: python setup.py register sdist upload
If you distribute it through github (i.e. users use github to generate
tarballs with `git archive`), the process is:
* 1: git tag 1.0
* 2: git push; git push --tags
Versioneer will report "0+untagged.NUMCOMMITS.gHASH" until your tree has at
least one tag in its history.
## Version-String Flavors
Code which uses Versioneer can learn about its version string at runtime by
importing `_version` from your main `__init__.py` file and running the
`get_versions()` function. From the "outside" (e.g. in `setup.py`), you can
import the top-level `versioneer.py` and run `get_versions()`.
Both functions return a dictionary with different flavors of version
information:
* `['version']`: A condensed version string, rendered using the selected
style. This is the most commonly used value for the project's version
string. The default "pep440" style yields strings like `0.11`,
`0.11+2.g1076c97`, or `0.11+2.g1076c97.dirty`. See the "Styles" section
below for alternative styles.
* `['full-revisionid']`: detailed revision identifier. For Git, this is the
full SHA1 commit id, e.g. "1076c978a8d3cfc70f408fe5974aa6c092c949ac".
* `['dirty']`: a boolean, True if the tree has uncommitted changes. Note that
this is only accurate if run in a VCS checkout, otherwise it is likely to
be False or None
* `['error']`: if the version string could not be computed, this will be set
to a string describing the problem, otherwise it will be None. It may be
useful to throw an exception in setup.py if this is set, to avoid e.g.
creating tarballs with a version string of "unknown".
Some variants are more useful than others. Including `full-revisionid` in a
bug report should allow developers to reconstruct the exact code being tested
(or indicate the presence of local changes that should be shared with the
developers). `version` is suitable for display in an "about" box or a CLI
`--version` output: it can be easily compared against release notes and lists
of bugs fixed in various releases.
The installer adds the following text to your `__init__.py` to place a basic
version in `YOURPROJECT.__version__`:
from ._version import get_versions
__version__ = get_versions()['version']
del get_versions
## Styles
The setup.cfg `style=` configuration controls how the VCS information is
rendered into a version string.
The default style, "pep440", produces a PEP440-compliant string, equal to the
un-prefixed tag name for actual releases, and containing an additional "local
version" section with more detail for in-between builds. For Git, this is
TAG[+DISTANCE.gHEX[.dirty]] , using information from `git describe --tags
--dirty --always`. For example "0.11+2.g1076c97.dirty" indicates that the
tree is like the "1076c97" commit but has uncommitted changes (".dirty"), and
that this commit is two revisions ("+2") beyond the "0.11" tag. For released
software (exactly equal to a known tag), the identifier will only contain the
stripped tag, e.g. "0.11".
Other styles are available. See details.md in the Versioneer source tree for
descriptions.
## Debugging
Versioneer tries to avoid fatal errors: if something goes wrong, it will tend
to return a version of "0+unknown". To investigate the problem, run `setup.py
version`, which will run the version-lookup code in a verbose mode, and will
display the full contents of `get_versions()` (including the `error` string,
which may help identify what went wrong).
## Updating Versioneer
To upgrade your project to a new release of Versioneer, do the following:
* install the new Versioneer (`pip install -U versioneer` or equivalent)
* edit `setup.cfg`, if necessary, to include any new configuration settings
indicated by the release notes
* re-run `versioneer install` in your source tree, to replace
`SRC/_version.py`
* commit any changed files
### Upgrading to 0.15
Starting with this version, Versioneer is configured with a `[versioneer]`
section in your `setup.cfg` file. Earlier versions required the `setup.py` to
set attributes on the `versioneer` module immediately after import. The new
version will refuse to run (raising an exception during import) until you
have provided the necessary `setup.cfg` section.
In addition, the Versioneer package provides an executable named
`versioneer`, and the installation process is driven by running `versioneer
install`. In 0.14 and earlier, the executable was named
`versioneer-installer` and was run without an argument.
### Upgrading to 0.14
0.14 changes the format of the version string. 0.13 and earlier used
hyphen-separated strings like "0.11-2-g1076c97-dirty". 0.14 and beyond use a
plus-separated "local version" section strings, with dot-separated
components, like "0.11+2.g1076c97". PEP440-strict tools did not like the old
format, but should be ok with the new one.
### Upgrading from 0.11 to 0.12
Nothing special.
### Upgrading from 0.10 to 0.11
You must add a `versioneer.VCS = "git"` to your `setup.py` before re-running
`setup.py setup_versioneer`. This will enable the use of additional
version-control systems (SVN, etc) in the future.
## Future Directions
This tool is designed to make it easily extended to other version-control
systems: all VCS-specific components are in separate directories like
src/git/ . The top-level `versioneer.py` script is assembled from these
components by running make-versioneer.py . In the future, make-versioneer.py
will take a VCS name as an argument, and will construct a version of
`versioneer.py` that is specific to the given VCS. It might also take the
configuration arguments that are currently provided manually during
installation by editing setup.py . Alternatively, it might go the other
direction and include code from all supported VCS systems, reducing the
number of intermediate scripts.
## License
To make Versioneer easier to embed, all its code is dedicated to the public
domain. The `_version.py` that it creates is also in the public domain.
Specifically, both are released under the Creative Commons "Public Domain
Dedication" license (CC0-1.0), as described in
https://creativecommons.org/publicdomain/zero/1.0/ .
"""
from __future__ import print_function
try:
import configparser
except ImportError:
import ConfigParser as configparser
import errno
import json
import os
import re
import subprocess
import sys
class VersioneerConfig:
"""Container for Versioneer configuration parameters."""
def get_root():
"""Get the project root directory.
We require that all commands are run from the project root, i.e. the
directory that contains setup.py, setup.cfg, and versioneer.py .
"""
root = os.path.realpath(os.path.abspath(os.getcwd()))
setup_py = os.path.join(root, "setup.py")
versioneer_py = os.path.join(root, "versioneer.py")
if not (os.path.exists(setup_py) or os.path.exists(versioneer_py)):
# allow 'python path/to/setup.py COMMAND'
root = os.path.dirname(os.path.realpath(os.path.abspath(sys.argv[0])))
setup_py = os.path.join(root, "setup.py")
versioneer_py = os.path.join(root, "versioneer.py")
if not (os.path.exists(setup_py) or os.path.exists(versioneer_py)):
err = ("Versioneer was unable to run the project root directory. "
"Versioneer requires setup.py to be executed from "
"its immediate directory (like 'python setup.py COMMAND'), "
"or in a way that lets it use sys.argv[0] to find the root "
"(like 'python path/to/setup.py COMMAND').")
raise VersioneerBadRootError(err)
try:
# Certain runtime workflows (setup.py install/develop in a setuptools
# tree) execute all dependencies in a single python process, so
# "versioneer" may be imported multiple times, and python's shared
# module-import table will cache the first one. So we can't use
# os.path.dirname(__file__), as that will find whichever
# versioneer.py was first imported, even in later projects.
me = os.path.realpath(os.path.abspath(__file__))
if os.path.splitext(me)[0] != os.path.splitext(versioneer_py)[0]:
print("Warning: build in %s is using versioneer.py from %s"
% (os.path.dirname(me), versioneer_py))
except NameError:
pass
return root
def get_config_from_root(root):
"""Read the project setup.cfg file to determine Versioneer config."""
# This might raise EnvironmentError (if setup.cfg is missing), or
# configparser.NoSectionError (if it lacks a [versioneer] section), or
# configparser.NoOptionError (if it lacks "VCS="). See the docstring at
# the top of versioneer.py for instructions on writing your setup.cfg .
setup_cfg = os.path.join(root, "setup.cfg")
parser = configparser.SafeConfigParser()
with open(setup_cfg, "r") as f:
parser.readfp(f)
VCS = parser.get("versioneer", "VCS") # mandatory
def get(parser, name):
if parser.has_option("versioneer", name):
return parser.get("versioneer", name)
return None
cfg = VersioneerConfig()
cfg.VCS = VCS
cfg.style = get(parser, "style") or ""
cfg.versionfile_source = get(parser, "versionfile_source")
cfg.versionfile_build = get(parser, "versionfile_build")
cfg.tag_prefix = get(parser, "tag_prefix")
if cfg.tag_prefix in ("''", '""'):
cfg.tag_prefix = ""
cfg.parentdir_prefix = get(parser, "parentdir_prefix")
cfg.verbose = get(parser, "verbose")
return cfg
class NotThisMethod(Exception):
"""Exception raised if a method is not valid for the current scenario."""
# these dictionaries contain VCS-specific tools
LONG_VERSION_PY = {}
HANDLERS = {}
def register_vcs_handler(vcs, method): # decorator
"""Decorator to mark a method as the handler for a particular VCS."""
def decorate(f):
"""Store f in HANDLERS[vcs][method]."""
if vcs not in HANDLERS:
HANDLERS[vcs] = {}
HANDLERS[vcs][method] = f
return f
return decorate
def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False):
"""Call the given command(s)."""
assert isinstance(commands, list)
p = None
for c in commands:
try:
dispcmd = str([c] + args)
# remember shell=False, so use git.cmd on windows, not just git
p = subprocess.Popen([c] + args, cwd=cwd, stdout=subprocess.PIPE,
stderr=(subprocess.PIPE if hide_stderr
else None))
break
except EnvironmentError:
e = sys.exc_info()[1]
if e.errno == errno.ENOENT:
continue
if verbose:
print("unable to run %s" % dispcmd)
print(e)
return None
else:
if verbose:
print("unable to find command, tried %s" % (commands,))
return None
stdout = p.communicate()[0].strip()
if sys.version_info[0] >= 3:
stdout = stdout.decode()
if p.returncode != 0:
if verbose:
print("unable to run %s (error)" % dispcmd)
return None
return stdout
LONG_VERSION_PY['git'] = r'''
# This file helps to compute a version number in source trees obtained from
# git-archive tarball (such as those provided by githubs download-from-tag
# feature). Distribution tarballs (built by setup.py sdist) and build
# directories (produced by setup.py build) will contain a much shorter file
# that just contains the computed version number.
# This file is released into the public domain. Generated by
# versioneer-0.15+dev (https://github.com/warner/python-versioneer)
"""Git implementation of _version.py."""
import errno
import os
import re
import subprocess
import sys
def get_keywords():
"""Get the keywords needed to look up the version information."""
# these strings will be replaced by git during git-archive.
# setup.py/versioneer.py will grep for the variable names, so they must
# each be defined on a line of their own. _version.py will just call
# get_keywords().
git_refnames = "%(DOLLAR)sFormat:%%d%(DOLLAR)s"
git_full = "%(DOLLAR)sFormat:%%H%(DOLLAR)s"
keywords = {"refnames": git_refnames, "full": git_full}
return keywords
class VersioneerConfig:
"""Container for Versioneer configuration parameters."""
def get_config():
"""Create, populate and return the VersioneerConfig() object."""
# these strings are filled in when 'setup.py versioneer' creates
# _version.py
cfg = VersioneerConfig()
cfg.VCS = "git"
cfg.style = "%(STYLE)s"
cfg.tag_prefix = "%(TAG_PREFIX)s"
cfg.parentdir_prefix = "%(PARENTDIR_PREFIX)s"
cfg.versionfile_source = "%(VERSIONFILE_SOURCE)s"
cfg.verbose = False
return cfg
class NotThisMethod(Exception):
"""Exception raised if a method is not valid for the current scenario."""
LONG_VERSION_PY = {}
HANDLERS = {}
def register_vcs_handler(vcs, method): # decorator
"""Decorator to mark a method as the handler for a particular VCS."""
def decorate(f):
"""Store f in HANDLERS[vcs][method]."""
if vcs not in HANDLERS:
HANDLERS[vcs] = {}
HANDLERS[vcs][method] = f
return f
return decorate
def run_command(commands, args, cwd=None, verbose=False, hide_stderr=False):
"""Call the given command(s)."""
assert isinstance(commands, list)
p = None
for c in commands:
try:
dispcmd = str([c] + args)
# remember shell=False, so use git.cmd on windows, not just git
p = subprocess.Popen([c] + args, cwd=cwd, stdout=subprocess.PIPE,
stderr=(subprocess.PIPE if hide_stderr
else None))
break
except EnvironmentError:
e = sys.exc_info()[1]
if e.errno == errno.ENOENT:
continue
if verbose:
print("unable to run %%s" %% dispcmd)
print(e)
return None
else:
if verbose:
print("unable to find command, tried %%s" %% (commands,))
return None
stdout = p.communicate()[0].strip()
if sys.version_info[0] >= 3:
stdout = stdout.decode()
if p.returncode != 0:
if verbose:
print("unable to run %%s (error)" %% dispcmd)
return None
return stdout
def versions_from_parentdir(parentdir_prefix, root, verbose):
"""Try to determine the version from the parent directory name.
Source tarballs conventionally unpack into a directory that includes
both the project name and a version string.
"""
dirname = os.path.basename(root)
if not dirname.startswith(parentdir_prefix):
if verbose:
print("guessing rootdir is '%%s', but '%%s' doesn't start with "
"prefix '%%s'" %% (root, dirname, parentdir_prefix))
raise NotThisMethod("rootdir doesn't start with parentdir_prefix")
return {"version": dirname[len(parentdir_prefix):],
"full-revisionid": None,
"dirty": False, "error": None}
@register_vcs_handler("git", "get_keywords")
def git_get_keywords(versionfile_abs):
"""Extract version information from the given file."""
# the code embedded in _version.py can just fetch the value of these
# keywords. When used from setup.py, we don't want to import _version.py,
# so we do it with a regexp instead. This function is not used from
# _version.py.
keywords = {}
try:
f = open(versionfile_abs, "r")
for line in f.readlines():
if line.strip().startswith("git_refnames ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["refnames"] = mo.group(1)
if line.strip().startswith("git_full ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["full"] = mo.group(1)
f.close()
except EnvironmentError:
pass
return keywords
@register_vcs_handler("git", "keywords")
def git_versions_from_keywords(keywords, tag_prefix, verbose):
"""Get version information from git keywords."""
if not keywords:
raise NotThisMethod("no keywords at all, weird")
refnames = keywords["refnames"].strip()
if refnames.startswith("$Format"):
if verbose:
print("keywords are unexpanded, not using")
raise NotThisMethod("unexpanded keywords, not a git-archive tarball")
refs = [r.strip() for r in refnames.strip("()").split(",")]
# starting in git-1.8.3, tags are listed as "tag: foo-1.0" instead of
# just "foo-1.0". If we see a "tag: " prefix, prefer those.
TAG = "tag: "
tags = set([r[len(TAG):] for r in refs if r.startswith(TAG)])
if not tags:
# Either we're using git < 1.8.3, or there really are no tags. We use
# a heuristic: assume all version tags have a digit. The old git %%d
# expansion behaves like git log --decorate=short and strips out the
# refs/heads/ and refs/tags/ prefixes that would let us distinguish
# between branches and tags. By ignoring refnames without digits, we
# filter out many common branch names like "release" and
# "stabilization", as well as "HEAD" and "master".
tags = set([r for r in refs if re.search(r'\d', r)])
if verbose:
print("discarding '%%s', no digits" %% ",".join(set(refs) - tags))
if verbose:
print("likely tags: %%s" %% ",".join(sorted(tags)))
for ref in sorted(tags):
# sorting will prefer e.g. "2.0" over "2.0rc1"
if ref.startswith(tag_prefix):
r = ref[len(tag_prefix):]
if verbose:
print("picking %%s" %% r)
return {"version": r,
"full-revisionid": keywords["full"].strip(),
"dirty": False, "error": None, "branch": None
}
# no suitable tags, so version is "0+unknown", but full hex is still there
if verbose:
print("no suitable tags, using unknown + full revision id")
return {"version": "0+unknown",
"full-revisionid": keywords["full"].strip(),
"dirty": False, "error": "no suitable tags",
"branch": None}
@register_vcs_handler("git", "pieces_from_vcs")
def git_pieces_from_vcs(tag_prefix, root, verbose, run_command=run_command):
"""Get version from 'git describe' in the root of the source tree.
This only gets called if the git-archive 'subst' keywords were *not*
expanded, and _version.py hasn't already been rewritten with a short
version string, meaning we're inside a checked out source tree.
"""
if not os.path.exists(os.path.join(root, ".git")):
if verbose:
print("no .git in %%s" %% root)
raise NotThisMethod("no .git directory")
GITS = ["git"]
if sys.platform == "win32":
GITS = ["git.cmd", "git.exe"]
# if there is a tag matching tag_prefix, this yields TAG-NUM-gHEX[-dirty]
# if there isn't one, this yields HEX[-dirty] (no NUM). Note, for git v1.7
# and below, it is necessary to run "git update-index --refresh" first.
describe_out = run_command(GITS, ["describe", "--tags", "--dirty",
"--always", "--long",
"--match", "%%s*" %% tag_prefix],
cwd=root)
# --long was added in git-1.5.5
if describe_out is None:
raise NotThisMethod("'git describe' failed")
describe_out = describe_out.strip()
full_out = run_command(GITS, ["rev-parse", "HEAD"], cwd=root)
if full_out is None:
raise NotThisMethod("'git rev-parse' failed")
full_out = full_out.strip()
pieces = {}
pieces["long"] = full_out
pieces["short"] = full_out[:7] # maybe improved later
pieces["error"] = None
# abbrev-ref available with git >= 1.7
branch_name = run_command(GITS, ["rev-parse", "--abbrev-ref", "HEAD"],
cwd=root).strip()
if branch_name == 'HEAD':
branches = run_command(GITS, ["branch", "--contains"],
cwd=root).split('\n')
branches = [branch[2:] for branch in branches if branch[4:5] != '(']
if 'master' in branches:
branch_name = 'master'
elif not branches:
branch_name = None
else:
# Pick the first branch that is returned. Good or bad.
branch_name = branches[0]
pieces['branch'] = branch_name
# parse describe_out. It will be like TAG-NUM-gHEX[-dirty] or HEX[-dirty]
# TAG might have hyphens.
git_describe = describe_out
# look for -dirty suffix
dirty = git_describe.endswith("-dirty")
pieces["dirty"] = dirty
if dirty:
git_describe = git_describe[:git_describe.rindex("-dirty")]
# now we have TAG-NUM-gHEX or HEX
if "-" in git_describe:
# TAG-NUM-gHEX
mo = re.search(r'^(.+)-(\d+)-g([0-9a-f]+)$', git_describe)
if not mo:
# unparseable. Maybe git-describe is misbehaving?
pieces["error"] = ("unable to parse git-describe output: '%%s'"
%% describe_out)
return pieces
# tag
full_tag = mo.group(1)
if not full_tag.startswith(tag_prefix):
if verbose:
fmt = "tag '%%s' doesn't start with prefix '%%s'"
print(fmt %% (full_tag, tag_prefix))
pieces["error"] = ("tag '%%s' doesn't start with prefix '%%s'"
%% (full_tag, tag_prefix))
return pieces
pieces["closest-tag"] = full_tag[len(tag_prefix):]
# distance: number of commits since tag
pieces["distance"] = int(mo.group(2))
# commit: short hex revision ID
pieces["short"] = mo.group(3)
else:
# HEX: no tags
pieces["closest-tag"] = None
count_out = run_command(GITS, ["rev-list", "HEAD", "--count"],
cwd=root)
pieces["distance"] = int(count_out) # total number of commits
return pieces
# Default matches v1.2.x, maint/1.2.x, 1.2.x, 1.x etc.
default_maint_branch_regexp = ".*([0-9]+\.)+x$"
def plus_or_dot(pieces):
"""Return a + if we don't already have one, else return a ."""
if "+" in pieces.get("closest-tag", ""):
return "."
return "+"
def render_pep440(pieces):
"""Build up version string, with post-release "local version identifier".
Our goal: TAG[+DISTANCE.gHEX[.dirty]] . Note that if you
get a tagged build and then dirty it, you'll get TAG+0.gHEX.dirty
Exceptions:
1: no tags. git_describe was just HEX. 0+untagged.DISTANCE.gHEX[.dirty]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += plus_or_dot(pieces)
rendered += "%%d.g%%s" %% (pieces["distance"], pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
else:
# exception #1
rendered = "0+untagged.%%d.g%%s" %% (pieces["distance"],
pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
return rendered
def render_pep440_pre(pieces):
"""TAG[.post.devDISTANCE] -- No -dirty.
Exceptions:
1: no tags. 0.post.devDISTANCE
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"]:
rendered += ".post.dev%%d" %% pieces["distance"]
else:
# exception #1
rendered = "0.post.dev%%d" %% pieces["distance"]
return rendered
def render_pep440_post(pieces):
"""TAG[.postDISTANCE[.dev0]+gHEX] .
The ".dev0" means dirty. Note that .dev0 sorts backwards
(a dirty tree will appear "older" than the corresponding clean one),
but you shouldn't be releasing software with -dirty anyways.
Exceptions:
1: no tags. 0.postDISTANCE[.dev0]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post%%d" %% pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
rendered += plus_or_dot(pieces)
rendered += "g%%s" %% pieces["short"]
else:
# exception #1
rendered = "0.post%%d" %% pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
rendered += "+g%%s" %% pieces["short"]
return rendered
def render_pep440_old(pieces):
"""TAG[.postDISTANCE[.dev0]] .
The ".dev0" means dirty.
Eexceptions:
1: no tags. 0.postDISTANCE[.dev0]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post%%d" %% pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
else:
# exception #1
rendered = "0.post%%d" %% pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
return rendered
def render_git_describe(pieces):
"""TAG[-DISTANCE-gHEX][-dirty].
Like 'git describe --tags --dirty --always'.
Exceptions:
1: no tags. HEX[-dirty] (note: no 'g' prefix)
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"]:
rendered += "-%%d-g%%s" %% (pieces["distance"], pieces["short"])
else:
# exception #1
rendered = pieces["short"]
if pieces["dirty"]:
rendered += "-dirty"
return rendered
def render_git_describe_long(pieces):
"""TAG-DISTANCE-gHEX[-dirty].
Like 'git describe --tags --dirty --always -long'.
The distance/hash is unconditional.
Exceptions:
1: no tags. HEX[-dirty] (note: no 'g' prefix)
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
rendered += "-%%d-g%%s" %% (pieces["distance"], pieces["short"])
else:
# exception #1
rendered = pieces["short"]
if pieces["dirty"]:
rendered += "-dirty"
return rendered
def add_one_to_version(version_string, number_index_to_increment=-1):
"""
Add one to a version string at the given numeric indices.
>>> add_one_to_version('v1.2.3')
'v1.2.4'
"""
# Break up the tag by number groups (preserving multi-digit
# numbers as multidigit)
parts = re.split("([0-9]+)", version_string)
digit_parts = [(i, part) for i, part in enumerate(parts)
if part.isdigit()]
# Deal with negative indexing.
increment_at_index = ((number_index_to_increment + len(digit_parts))
%% len(digit_parts))
for n_seen, (i, part) in enumerate(digit_parts):
if n_seen == increment_at_index:
parts[i] = str(int(part) + 1)
elif n_seen > increment_at_index:
parts[i] = '0'
return ''.join(parts)
def render_pep440_branch_based(pieces):
# [TAG+1 of minor number][.devDISTANCE][+gHEX]. The git short is
# included for dirty.
# exceptions:
# 1: no tags. 0.0.0.devDISTANCE[+gHEX]
replacements = {' ': '.', '(': '', ')': ''}
[branch_name] = [pieces.get('branch').replace(old, new)
for old, new in replacements.items()]
master = branch_name == 'master'
maint = re.match(default_maint_branch_regexp,
branch_name or '')
# If we are on a tag, just pep440-pre it.
if pieces["closest-tag"] and not (pieces["distance"] or
pieces["dirty"]):
rendered = pieces["closest-tag"]
else:
# Put a default closest-tag in.
if not pieces["closest-tag"]:
pieces["closest-tag"] = '0.0.0'
if pieces["distance"] or pieces["dirty"]:
if maint:
rendered = pieces["closest-tag"]
if pieces["distance"]:
rendered += ".post%%d" %% pieces["distance"]
else:
rendered = add_one_to_version(pieces["closest-tag"])
if pieces["distance"]:
rendered += ".dev%%d" %% pieces["distance"]
suffix = []
# Put the branch name in if it isn't master nor a
# maintenance branch.
if not (master or maint):
suffix.append('%%s' %% (branch_name or 'unknown_branch'))
if pieces["dirty"]:
suffix.append('g%%s' %% pieces["short"])
rendered += '+%%s' %% ''.join(suffix)
else:
rendered = pieces["closest-tag"]
return rendered
STYLES = {'default': render_pep440,
'pep440': render_pep440,
'pep440-pre': render_pep440_pre,
'pep440-post': render_pep440_post,
'pep440-old': render_pep440_old,
'git-describe': render_git_describe,
'git-describe-long': render_git_describe_long,
'pep440-old': render_pep440_old,
'pep440-branch-based': render_pep440_branch_based,
}
def render(pieces, style):
"""Render the given version pieces into the requested style."""
if pieces["error"]:
return {"version": "unknown",
"full-revisionid": pieces.get("long"),
"dirty": None,
"error": pieces["error"]}
if not style:
style = 'default'
renderer = STYLES.get(style)
if not renderer:
raise ValueError("unknown style '%%s'" %% style)
rendered = renderer(pieces)
return {"version": rendered, "full-revisionid": pieces["long"],
"dirty": pieces["dirty"], "error": None}
def get_versions():
"""Get version information or return default if unable to do so."""
# I am in _version.py, which lives at ROOT/VERSIONFILE_SOURCE. If we have
# __file__, we can work backwards from there to the root. Some
# py2exe/bbfreeze/non-CPython implementations don't do __file__, in which
# case we can only use expanded keywords.
cfg = get_config()
verbose = cfg.verbose
try:
return git_versions_from_keywords(get_keywords(), cfg.tag_prefix,
verbose)
except NotThisMethod:
pass
try:
root = os.path.realpath(__file__)
# versionfile_source is the relative path from the top of the source
# tree (where the .git directory might live) to this file. Invert
# this to find the root from __file__.
for i in cfg.versionfile_source.split('/'):
root = os.path.dirname(root)
except NameError:
return {"version": "0+unknown", "full-revisionid": None,
"dirty": None,
"error": "unable to find root of source tree"}
try:
pieces = git_pieces_from_vcs(cfg.tag_prefix, root, verbose)
return render(pieces, cfg.style)
except NotThisMethod:
pass
try:
if cfg.parentdir_prefix:
return versions_from_parentdir(cfg.parentdir_prefix, root, verbose)
except NotThisMethod:
pass
return {"version": "0+unknown", "full-revisionid": None,
"dirty": None,
"error": "unable to compute version"}
'''
@register_vcs_handler("git", "get_keywords")
def git_get_keywords(versionfile_abs):
"""Extract version information from the given file."""
# the code embedded in _version.py can just fetch the value of these
# keywords. When used from setup.py, we don't want to import _version.py,
# so we do it with a regexp instead. This function is not used from
# _version.py.
keywords = {}
try:
f = open(versionfile_abs, "r")
for line in f.readlines():
if line.strip().startswith("git_refnames ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["refnames"] = mo.group(1)
if line.strip().startswith("git_full ="):
mo = re.search(r'=\s*"(.*)"', line)
if mo:
keywords["full"] = mo.group(1)
f.close()
except EnvironmentError:
pass
return keywords
@register_vcs_handler("git", "keywords")
def git_versions_from_keywords(keywords, tag_prefix, verbose):
"""Get version information from git keywords."""
if not keywords:
raise NotThisMethod("no keywords at all, weird")
refnames = keywords["refnames"].strip()
if refnames.startswith("$Format"):
if verbose:
print("keywords are unexpanded, not using")
raise NotThisMethod("unexpanded keywords, not a git-archive tarball")
refs = [r.strip() for r in refnames.strip("()").split(",")]
# starting in git-1.8.3, tags are listed as "tag: foo-1.0" instead of
# just "foo-1.0". If we see a "tag: " prefix, prefer those.
TAG = "tag: "
tags = set([r[len(TAG):] for r in refs if r.startswith(TAG)])
if not tags:
# Either we're using git < 1.8.3, or there really are no tags. We use
# a heuristic: assume all version tags have a digit. The old git %d
# expansion behaves like git log --decorate=short and strips out the
# refs/heads/ and refs/tags/ prefixes that would let us distinguish
# between branches and tags. By ignoring refnames without digits, we
# filter out many common branch names like "release" and
# "stabilization", as well as "HEAD" and "master".
tags = set([r for r in refs if re.search(r'\d', r)])
if verbose:
print("discarding '%s', no digits" % ",".join(set(refs) - tags))
if verbose:
print("likely tags: %s" % ",".join(sorted(tags)))
for ref in sorted(tags):
# sorting will prefer e.g. "2.0" over "2.0rc1"
if ref.startswith(tag_prefix):
r = ref[len(tag_prefix):]
if verbose:
print("picking %s" % r)
return {"version": r,
"full-revisionid": keywords["full"].strip(),
"dirty": False, "error": None, "branch": None
}
# no suitable tags, so version is "0+unknown", but full hex is still there
if verbose:
print("no suitable tags, using unknown + full revision id")
return {"version": "0+unknown",
"full-revisionid": keywords["full"].strip(),
"dirty": False, "error": "no suitable tags",
"branch": None}
@register_vcs_handler("git", "pieces_from_vcs")
def git_pieces_from_vcs(tag_prefix, root, verbose, run_command=run_command):
"""Get version from 'git describe' in the root of the source tree.
This only gets called if the git-archive 'subst' keywords were *not*
expanded, and _version.py hasn't already been rewritten with a short
version string, meaning we're inside a checked out source tree.
"""
if not os.path.exists(os.path.join(root, ".git")):
if verbose:
print("no .git in %s" % root)
raise NotThisMethod("no .git directory")
GITS = ["git"]
if sys.platform == "win32":
GITS = ["git.cmd", "git.exe"]
# if there is a tag matching tag_prefix, this yields TAG-NUM-gHEX[-dirty]
# if there isn't one, this yields HEX[-dirty] (no NUM). Note, for git v1.7
# and below, it is necessary to run "git update-index --refresh" first.
describe_out = run_command(GITS, ["describe", "--tags", "--dirty",
"--always", "--long",
"--match", "%s*" % tag_prefix],
cwd=root)
# --long was added in git-1.5.5
if describe_out is None:
raise NotThisMethod("'git describe' failed")
describe_out = describe_out.strip()
full_out = run_command(GITS, ["rev-parse", "HEAD"], cwd=root)
if full_out is None:
raise NotThisMethod("'git rev-parse' failed")
full_out = full_out.strip()
pieces = {}
pieces["long"] = full_out
pieces["short"] = full_out[:7] # maybe improved later
pieces["error"] = None
# abbrev-ref available with git >= 1.7
branch_name = run_command(GITS, ["rev-parse", "--abbrev-ref", "HEAD"],
cwd=root).strip()
if branch_name == 'HEAD':
branches = run_command(GITS, ["branch", "--contains"],
cwd=root).split('\n')
branches = [branch[2:] for branch in branches if branch[4:5] != '(']
if 'master' in branches:
branch_name = 'master'
elif not branches:
branch_name = None
else:
# Pick the first branch that is returned. Good or bad.
branch_name = branches[0]
pieces['branch'] = branch_name
# parse describe_out. It will be like TAG-NUM-gHEX[-dirty] or HEX[-dirty]
# TAG might have hyphens.
git_describe = describe_out
# look for -dirty suffix
dirty = git_describe.endswith("-dirty")
pieces["dirty"] = dirty
if dirty:
git_describe = git_describe[:git_describe.rindex("-dirty")]
# now we have TAG-NUM-gHEX or HEX
if "-" in git_describe:
# TAG-NUM-gHEX
mo = re.search(r'^(.+)-(\d+)-g([0-9a-f]+)$', git_describe)
if not mo:
# unparseable. Maybe git-describe is misbehaving?
pieces["error"] = ("unable to parse git-describe output: '%s'"
% describe_out)
return pieces
# tag
full_tag = mo.group(1)
if not full_tag.startswith(tag_prefix):
if verbose:
fmt = "tag '%s' doesn't start with prefix '%s'"
print(fmt % (full_tag, tag_prefix))
pieces["error"] = ("tag '%s' doesn't start with prefix '%s'"
% (full_tag, tag_prefix))
return pieces
pieces["closest-tag"] = full_tag[len(tag_prefix):]
# distance: number of commits since tag
pieces["distance"] = int(mo.group(2))
# commit: short hex revision ID
pieces["short"] = mo.group(3)
else:
# HEX: no tags
pieces["closest-tag"] = None
count_out = run_command(GITS, ["rev-list", "HEAD", "--count"],
cwd=root)
pieces["distance"] = int(count_out) # total number of commits
return pieces
def do_vcs_install(manifest_in, versionfile_source, ipy):
"""Git-specific installation logic for Versioneer.
For Git, this means creating/changing .gitattributes to mark _version.py
for export-time keyword substitution.
"""
GITS = ["git"]
if sys.platform == "win32":
GITS = ["git.cmd", "git.exe"]
files = [manifest_in, versionfile_source]
if ipy:
files.append(ipy)
try:
me = __file__
if me.endswith(".pyc") or me.endswith(".pyo"):
me = os.path.splitext(me)[0] + ".py"
versioneer_file = os.path.relpath(me)
except NameError:
versioneer_file = "versioneer.py"
files.append(versioneer_file)
present = False
try:
f = open(".gitattributes", "r")
for line in f.readlines():
if line.strip().startswith(versionfile_source):
if "export-subst" in line.strip().split()[1:]:
present = True
f.close()
except EnvironmentError:
pass
if not present:
f = open(".gitattributes", "a+")
f.write("%s export-subst\n" % versionfile_source)
f.close()
files.append(".gitattributes")
run_command(GITS, ["add", "--"] + files)
def versions_from_parentdir(parentdir_prefix, root, verbose):
"""Try to determine the version from the parent directory name.
Source tarballs conventionally unpack into a directory that includes
both the project name and a version string.
"""
dirname = os.path.basename(root)
if not dirname.startswith(parentdir_prefix):
if verbose:
print("guessing rootdir is '%s', but '%s' doesn't start with "
"prefix '%s'" % (root, dirname, parentdir_prefix))
raise NotThisMethod("rootdir doesn't start with parentdir_prefix")
return {"version": dirname[len(parentdir_prefix):],
"full-revisionid": None,
"dirty": False, "error": None}
SHORT_VERSION_PY = """
# This file was generated by 'versioneer.py' (0.15+dev) from
# revision-control system data, or from the parent directory name of an
# unpacked source archive. Distribution tarballs contain a pre-generated copy
# of this file.
import json
import sys
version_json = '''
%s
''' # END VERSION_JSON
def get_versions():
return json.loads(version_json)
"""
def versions_from_file(filename):
"""Try to determine the version from _version.py if present."""
try:
with open(filename) as f:
contents = f.read()
except EnvironmentError:
raise NotThisMethod("unable to read _version.py")
mo = re.search(r"version_json = '''\n(.*)''' # END VERSION_JSON",
contents, re.M | re.S)
if not mo:
raise NotThisMethod("no version_json in _version.py")
return json.loads(mo.group(1))
def write_to_version_file(filename, versions):
"""Write the given version number to the given _version.py file."""
os.unlink(filename)
contents = json.dumps(versions, sort_keys=True,
indent=1, separators=(",", ": "))
with open(filename, "w") as f:
f.write(SHORT_VERSION_PY % contents)
print("set %s to '%s'" % (filename, versions["version"]))
# Default matches v1.2.x, maint/1.2.x, 1.2.x, 1.x etc.
default_maint_branch_regexp = ".*([0-9]+\.)+x$"
def plus_or_dot(pieces):
"""Return a + if we don't already have one, else return a ."""
if "+" in pieces.get("closest-tag", ""):
return "."
return "+"
def render_pep440(pieces):
"""Build up version string, with post-release "local version identifier".
Our goal: TAG[+DISTANCE.gHEX[.dirty]] . Note that if you
get a tagged build and then dirty it, you'll get TAG+0.gHEX.dirty
Exceptions:
1: no tags. git_describe was just HEX. 0+untagged.DISTANCE.gHEX[.dirty]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += plus_or_dot(pieces)
rendered += "%d.g%s" % (pieces["distance"], pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
else:
# exception #1
rendered = "0+untagged.%d.g%s" % (pieces["distance"],
pieces["short"])
if pieces["dirty"]:
rendered += ".dirty"
return rendered
def render_pep440_pre(pieces):
"""TAG[.post.devDISTANCE] -- No -dirty.
Exceptions:
1: no tags. 0.post.devDISTANCE
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"]:
rendered += ".post.dev%d" % pieces["distance"]
else:
# exception #1
rendered = "0.post.dev%d" % pieces["distance"]
return rendered
def render_pep440_post(pieces):
"""TAG[.postDISTANCE[.dev0]+gHEX] .
The ".dev0" means dirty. Note that .dev0 sorts backwards
(a dirty tree will appear "older" than the corresponding clean one),
but you shouldn't be releasing software with -dirty anyways.
Exceptions:
1: no tags. 0.postDISTANCE[.dev0]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
rendered += plus_or_dot(pieces)
rendered += "g%s" % pieces["short"]
else:
# exception #1
rendered = "0.post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
rendered += "+g%s" % pieces["short"]
return rendered
def render_pep440_old(pieces):
"""TAG[.postDISTANCE[.dev0]] .
The ".dev0" means dirty.
Eexceptions:
1: no tags. 0.postDISTANCE[.dev0]
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"] or pieces["dirty"]:
rendered += ".post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
else:
# exception #1
rendered = "0.post%d" % pieces["distance"]
if pieces["dirty"]:
rendered += ".dev0"
return rendered
def render_git_describe(pieces):
"""TAG[-DISTANCE-gHEX][-dirty].
Like 'git describe --tags --dirty --always'.
Exceptions:
1: no tags. HEX[-dirty] (note: no 'g' prefix)
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
if pieces["distance"]:
rendered += "-%d-g%s" % (pieces["distance"], pieces["short"])
else:
# exception #1
rendered = pieces["short"]
if pieces["dirty"]:
rendered += "-dirty"
return rendered
def render_git_describe_long(pieces):
"""TAG-DISTANCE-gHEX[-dirty].
Like 'git describe --tags --dirty --always -long'.
The distance/hash is unconditional.
Exceptions:
1: no tags. HEX[-dirty] (note: no 'g' prefix)
"""
if pieces["closest-tag"]:
rendered = pieces["closest-tag"]
rendered += "-%d-g%s" % (pieces["distance"], pieces["short"])
else:
# exception #1
rendered = pieces["short"]
if pieces["dirty"]:
rendered += "-dirty"
return rendered
def add_one_to_version(version_string, number_index_to_increment=-1):
"""
Add one to a version string at the given numeric indices.
>>> add_one_to_version('v1.2.3')
'v1.2.4'
"""
# Break up the tag by number groups (preserving multi-digit
# numbers as multidigit)
parts = re.split("([0-9]+)", version_string)
digit_parts = [(i, part) for i, part in enumerate(parts)
if part.isdigit()]
# Deal with negative indexing.
increment_at_index = ((number_index_to_increment + len(digit_parts))
% len(digit_parts))
for n_seen, (i, part) in enumerate(digit_parts):
if n_seen == increment_at_index:
parts[i] = str(int(part) + 1)
elif n_seen > increment_at_index:
parts[i] = '0'
return ''.join(parts)
def render_pep440_branch_based(pieces):
# [TAG+1 of minor number][.devDISTANCE][+gHEX]. The git short is
# included for dirty.
# exceptions:
# 1: no tags. 0.0.0.devDISTANCE[+gHEX]
replacements = {' ': '.', '(': '', ')': ''}
branch_name = pieces.get('branch')
for old, new in replacements.items():
branch_name = branch_name.replace(old, new)
master = branch_name == 'master'
maint = re.match(default_maint_branch_regexp,
branch_name or '')
# If we are on a tag, just pep440-pre it.
if pieces["closest-tag"] and not (pieces["distance"] or
pieces["dirty"]):
rendered = pieces["closest-tag"]
else:
# Put a default closest-tag in.
if not pieces["closest-tag"]:
pieces["closest-tag"] = '0.0.0'
if pieces["distance"] or pieces["dirty"]:
if maint:
rendered = pieces["closest-tag"]
if pieces["distance"]:
rendered += ".post%d" % pieces["distance"]
else:
rendered = add_one_to_version(pieces["closest-tag"])
if pieces["distance"]:
rendered += ".dev%d" % pieces["distance"]
suffix = []
# Put the branch name in if it isn't master nor a
# maintenance branch.
if not (master or maint):
suffix.append('%s' % (branch_name or 'unknown_branch'))
if pieces["dirty"]:
suffix.append('g%s' % pieces["short"])
rendered += '+%s' % ''.join(suffix)
else:
rendered = pieces["closest-tag"]
return rendered
STYLES = {'default': render_pep440,
'pep440': render_pep440,
'pep440-pre': render_pep440_pre,
'pep440-post': render_pep440_post,
'pep440-old': render_pep440_old,
'git-describe': render_git_describe,
'git-describe-long': render_git_describe_long,
'pep440-old': render_pep440_old,
'pep440-branch-based': render_pep440_branch_based,
}
def render(pieces, style):
"""Render the given version pieces into the requested style."""
if pieces["error"]:
return {"version": "unknown",
"full-revisionid": pieces.get("long"),
"dirty": None,
"error": pieces["error"]}
if not style:
style = 'default'
renderer = STYLES.get(style)
if not renderer:
raise ValueError("unknown style '%s'" % style)
rendered = renderer(pieces)
return {"version": rendered, "full-revisionid": pieces["long"],
"dirty": pieces["dirty"], "error": None}
class VersioneerBadRootError(Exception):
"""The project root directory is unknown or missing key files."""
def get_versions(verbose=False):
"""Get the project version from whatever source is available.
Returns dict with two keys: 'version' and 'full'.
"""
if "versioneer" in sys.modules:
# see the discussion in cmdclass.py:get_cmdclass()
del sys.modules["versioneer"]
root = get_root()
cfg = get_config_from_root(root)
assert cfg.VCS is not None, "please set [versioneer]VCS= in setup.cfg"
handlers = HANDLERS.get(cfg.VCS)
assert handlers, "unrecognized VCS '%s'" % cfg.VCS
verbose = verbose or cfg.verbose
assert cfg.versionfile_source is not None, \
"please set versioneer.versionfile_source"
assert cfg.tag_prefix is not None, "please set versioneer.tag_prefix"
versionfile_abs = os.path.join(root, cfg.versionfile_source)
# extract version from first of: _version.py, VCS command (e.g. 'git
# describe'), parentdir. This is meant to work for developers using a
# source checkout, for users of a tarball created by 'setup.py sdist',
# and for users of a tarball/zipball created by 'git archive' or github's
# download-from-tag feature or the equivalent in other VCSes.
get_keywords_f = handlers.get("get_keywords")
from_keywords_f = handlers.get("keywords")
if get_keywords_f and from_keywords_f:
try:
keywords = get_keywords_f(versionfile_abs)
ver = from_keywords_f(keywords, cfg.tag_prefix, verbose)
if verbose:
print("got version from expanded keyword %s" % ver)
return ver
except NotThisMethod:
pass
try:
ver = versions_from_file(versionfile_abs)
if verbose:
print("got version from file %s %s" % (versionfile_abs, ver))
return ver
except NotThisMethod:
pass
from_vcs_f = handlers.get("pieces_from_vcs")
if from_vcs_f:
try:
pieces = from_vcs_f(cfg.tag_prefix, root, verbose)
ver = render(pieces, cfg.style)
if verbose:
print("got version from VCS %s" % ver)
return ver
except NotThisMethod:
pass
try:
if cfg.parentdir_prefix:
ver = versions_from_parentdir(cfg.parentdir_prefix, root, verbose)
if verbose:
print("got version from parentdir %s" % ver)
return ver
except NotThisMethod:
pass
if verbose:
print("unable to compute version")
return {"version": "0+unknown", "full-revisionid": None,
"dirty": None, "error": "unable to compute version"}
def get_version():
"""Get the short version string for this project."""
return get_versions()["version"]
def get_cmdclass():
"""Get the custom setuptools/distutils subclasses used by Versioneer."""
if "versioneer" in sys.modules:
del sys.modules["versioneer"]
# this fixes the "python setup.py develop" case (also 'install' and
# 'easy_install .'), in which subdependencies of the main project are
# built (using setup.py bdist_egg) in the same python process. Assume
# a main project A and a dependency B, which use different versions
# of Versioneer. A's setup.py imports A's Versioneer, leaving it in
# sys.modules by the time B's setup.py is executed, causing B to run
# with the wrong versioneer. Setuptools wraps the sub-dep builds in a
# sandbox that restores sys.modules to it's pre-build state, so the
# parent is protected against the child's "import versioneer". By
# removing ourselves from sys.modules here, before the child build
# happens, we protect the child from the parent's versioneer too.
# Also see https://github.com/warner/python-versioneer/issues/52
cmds = {}
# we add "version" to both distutils and setuptools
from distutils.core import Command
class cmd_version(Command):
description = "report generated version string"
user_options = []
boolean_options = []
def initialize_options(self):
pass
def finalize_options(self):
pass
def run(self):
vers = get_versions(verbose=True)
print("Version: %s" % vers["version"])
print(" full-revisionid: %s" % vers.get("full-revisionid"))
print(" dirty: %s" % vers.get("dirty"))
if vers["error"]:
print(" error: %s" % vers["error"])
cmds["version"] = cmd_version
# we override "build_py" in both distutils and setuptools
#
# most invocation pathways end up running build_py:
# distutils/build -> build_py
# distutils/install -> distutils/build ->..
# setuptools/bdist_wheel -> distutils/install ->..
# setuptools/bdist_egg -> distutils/install_lib -> build_py
# setuptools/install -> bdist_egg ->..
# setuptools/develop -> ?
# we override different "build_py" commands for both environments
if "setuptools" in sys.modules:
from setuptools.command.build_py import build_py as _build_py
else:
from distutils.command.build_py import build_py as _build_py
class cmd_build_py(_build_py):
def run(self):
root = get_root()
cfg = get_config_from_root(root)
versions = get_versions()
_build_py.run(self)
# now locate _version.py in the new build/ directory and replace
# it with an updated value
if cfg.versionfile_build:
target_versionfile = os.path.join(self.build_lib,
cfg.versionfile_build)
print("UPDATING %s" % target_versionfile)
write_to_version_file(target_versionfile, versions)
cmds["build_py"] = cmd_build_py
if "cx_Freeze" in sys.modules: # cx_freeze enabled?
from cx_Freeze.dist import build_exe as _build_exe
class cmd_build_exe(_build_exe):
def run(self):
root = get_root()
cfg = get_config_from_root(root)
versions = get_versions()
target_versionfile = cfg.versionfile_source
print("UPDATING %s" % target_versionfile)
write_to_version_file(target_versionfile, versions)
_build_exe.run(self)
os.unlink(target_versionfile)
with open(cfg.versionfile_source, "w") as f:
LONG = LONG_VERSION_PY[cfg.VCS]
f.write(LONG %
{"DOLLAR": "$",
"STYLE": cfg.style,
"TAG_PREFIX": cfg.tag_prefix,
"PARENTDIR_PREFIX": cfg.parentdir_prefix,
"VERSIONFILE_SOURCE": cfg.versionfile_source,
})
cmds["build_exe"] = cmd_build_exe
del cmds["build_py"]
# we override different "sdist" commands for both environments
if "setuptools" in sys.modules:
from setuptools.command.sdist import sdist as _sdist
else:
from distutils.command.sdist import sdist as _sdist
class cmd_sdist(_sdist):
def run(self):
versions = get_versions()
self._versioneer_generated_versions = versions
# unless we update this, the command will keep using the old
# version
self.distribution.metadata.version = versions["version"]
return _sdist.run(self)
def make_release_tree(self, base_dir, files):
root = get_root()
cfg = get_config_from_root(root)
_sdist.make_release_tree(self, base_dir, files)
# now locate _version.py in the new base_dir directory
# (remembering that it may be a hardlink) and replace it with an
# updated value
target_versionfile = os.path.join(base_dir, cfg.versionfile_source)
print("UPDATING %s" % target_versionfile)
write_to_version_file(target_versionfile,
self._versioneer_generated_versions)
cmds["sdist"] = cmd_sdist
return cmds
CONFIG_ERROR = """
setup.cfg is missing the necessary Versioneer configuration. You need
a section like:
[versioneer]
VCS = git
style = pep440
versionfile_source = src/myproject/_version.py
versionfile_build = myproject/_version.py
tag_prefix =
parentdir_prefix = myproject-
You will also need to edit your setup.py to use the results:
import versioneer
setup(version=versioneer.get_version(),
cmdclass=versioneer.get_cmdclass(), ...)
Please read the docstring in ./versioneer.py for configuration instructions,
edit setup.cfg, and re-run the installer or 'python versioneer.py setup'.
"""
SAMPLE_CONFIG = """
# See the docstring in versioneer.py for instructions. Note that you must
# re-run 'versioneer.py setup' after changing this section, and commit the
# resulting files.
[versioneer]
#VCS = git
#style = pep440
#versionfile_source =
#versionfile_build =
#tag_prefix =
#parentdir_prefix =
"""
INIT_PY_SNIPPET = """
from ._version import get_versions
__version__ = get_versions()['version']
del get_versions
"""
def do_setup():
"""Main VCS-independent setup function for installing Versioneer."""
root = get_root()
try:
cfg = get_config_from_root(root)
except (EnvironmentError, configparser.NoSectionError,
configparser.NoOptionError) as e:
if isinstance(e, (EnvironmentError, configparser.NoSectionError)):
print("Adding sample versioneer config to setup.cfg",
file=sys.stderr)
with open(os.path.join(root, "setup.cfg"), "a") as f:
f.write(SAMPLE_CONFIG)
print(CONFIG_ERROR, file=sys.stderr)
return 1
print(" creating %s" % cfg.versionfile_source)
with open(cfg.versionfile_source, "w") as f:
LONG = LONG_VERSION_PY[cfg.VCS]
f.write(LONG % {"DOLLAR": "$",
"STYLE": cfg.style,
"TAG_PREFIX": cfg.tag_prefix,
"PARENTDIR_PREFIX": cfg.parentdir_prefix,
"VERSIONFILE_SOURCE": cfg.versionfile_source,
})
ipy = os.path.join(os.path.dirname(cfg.versionfile_source),
"__init__.py")
if os.path.exists(ipy):
try:
with open(ipy, "r") as f:
old = f.read()
except EnvironmentError:
old = ""
if INIT_PY_SNIPPET not in old:
print(" appending to %s" % ipy)
with open(ipy, "a") as f:
f.write(INIT_PY_SNIPPET)
else:
print(" %s unmodified" % ipy)
else:
print(" %s doesn't exist, ok" % ipy)
ipy = None
# Make sure both the top-level "versioneer.py" and versionfile_source
# (PKG/_version.py, used by runtime code) are in MANIFEST.in, so
# they'll be copied into source distributions. Pip won't be able to
# install the package without this.
manifest_in = os.path.join(root, "MANIFEST.in")
simple_includes = set()
try:
with open(manifest_in, "r") as f:
for line in f:
if line.startswith("include "):
for include in line.split()[1:]:
simple_includes.add(include)
except EnvironmentError:
pass
# That doesn't cover everything MANIFEST.in can do
# (http://docs.python.org/2/distutils/sourcedist.html#commands), so
# it might give some false negatives. Appending redundant 'include'
# lines is safe, though.
if "versioneer.py" not in simple_includes:
print(" appending 'versioneer.py' to MANIFEST.in")
with open(manifest_in, "a") as f:
f.write("include versioneer.py\n")
else:
print(" 'versioneer.py' already in MANIFEST.in")
if cfg.versionfile_source not in simple_includes:
print(" appending versionfile_source ('%s') to MANIFEST.in" %
cfg.versionfile_source)
with open(manifest_in, "a") as f:
f.write("include %s\n" % cfg.versionfile_source)
else:
print(" versionfile_source already in MANIFEST.in")
# Make VCS-specific changes. For git, this means creating/changing
# .gitattributes to mark _version.py for export-time keyword
# substitution.
do_vcs_install(manifest_in, cfg.versionfile_source, ipy)
return 0
def scan_setup_py():
"""Validate the contents of setup.py against Versioneer's expectations."""
found = set()
setters = False
errors = 0
with open("setup.py", "r") as f:
for line in f.readlines():
if "import versioneer" in line:
found.add("import")
if "versioneer.get_cmdclass()" in line:
found.add("cmdclass")
if "versioneer.get_version()" in line:
found.add("get_version")
if "versioneer.VCS" in line:
setters = True
if "versioneer.versionfile_source" in line:
setters = True
if len(found) != 3:
print("")
print("Your setup.py appears to be missing some important items")
print("(but I might be wrong). Please make sure it has something")
print("roughly like the following:")
print("")
print(" import versioneer")
print(" setup( version=versioneer.get_version(),")
print(" cmdclass=versioneer.get_cmdclass(), ...)")
print("")
errors += 1
if setters:
print("You should remove lines like 'versioneer.VCS = ' and")
print("'versioneer.versionfile_source = ' . This configuration")
print("now lives in setup.cfg, and should be removed from setup.py")
print("")
errors += 1
return errors
if __name__ == "__main__":
cmd = sys.argv[1]
if cmd == "setup":
errors = do_setup()
errors += scan_setup_py()
if errors:
sys.exit(1)
| gpl-3.0 |
drincruz/luigi | luigi/contrib/esindex.py | 39 | 14138 | # -*- coding: utf-8 -*-
#
# Copyright 2012-2015 Spotify AB
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
Support for Elasticsearch (1.0.0 or newer).
Provides an :class:`ElasticsearchTarget` and a :class:`CopyToIndex` template task.
Modeled after :class:`luigi.contrib.rdbms.CopyToTable`.
A minimal example (assuming elasticsearch is running on localhost:9200):
.. code-block:: python
class ExampleIndex(CopyToIndex):
index = 'example'
def docs(self):
return [{'_id': 1, 'title': 'An example document.'}]
if __name__ == '__main__':
task = ExampleIndex()
luigi.build([task], local_scheduler=True)
All options:
.. code-block:: python
class ExampleIndex(CopyToIndex):
host = 'localhost'
port = 9200
index = 'example'
doc_type = 'default'
purge_existing_index = True
marker_index_hist_size = 1
def docs(self):
return [{'_id': 1, 'title': 'An example document.'}]
if __name__ == '__main__':
task = ExampleIndex()
luigi.build([task], local_scheduler=True)
`Host`, `port`, `index`, `doc_type` parameters are standard elasticsearch.
`purge_existing_index` will delete the index, whenever an update is required.
This is useful, when one deals with "dumps" that represent the whole data, not just updates.
`marker_index_hist_size` sets the maximum number of entries in the 'marker'
index:
* 0 (default) keeps all updates,
* 1 to only remember the most recent update to the index.
This can be useful, if an index needs to recreated, even though
the corresponding indexing task has been run sometime in the past - but
a later indexing task might have altered the index in the meantime.
There are a two luigi `luigi.cfg` configuration options:
.. code-block:: ini
[elasticsearch]
marker-index = update_log
marker-doc-type = entry
"""
# pylint: disable=F0401,E1101,C0103
import abc
import datetime
import hashlib
import json
import logging
import itertools
import luigi
from luigi import six
logger = logging.getLogger('luigi-interface')
try:
from elasticsearch.helpers import bulk_index
from elasticsearch.connection import Urllib3HttpConnection
import elasticsearch
if elasticsearch.__version__ < (1, 0, 0):
logger.warning("This module works with elasticsearch 1.0.0 "
"or newer only.")
except ImportError:
logger.warning("Loading esindex module without elasticsearch installed. "
"Will crash at runtime if esindex functionality is used.")
class ElasticsearchTarget(luigi.Target):
""" Target for a resource in Elasticsearch."""
marker_index = luigi.configuration.get_config().get('elasticsearch',
'marker-index', 'update_log')
marker_doc_type = luigi.configuration.get_config().get('elasticsearch',
'marker-doc-type', 'entry')
def __init__(self, host, port, index, doc_type, update_id,
marker_index_hist_size=0, http_auth=None, timeout=10,
extra_elasticsearch_args={}):
"""
:param host: Elasticsearch server host
:type host: str
:param port: Elasticsearch server port
:type port: int
:param index: index name
:type index: str
:param doc_type: doctype name
:type doc_type: str
:param update_id: an identifier for this data set
:type update_id: str
:param marker_index_hist_size: list of changes to the index to remember
:type marker_index_hist_size: int
:param timeout: Elasticsearch connection timeout
:type timeout: int
:param extra_elasticsearch_args: extra args for Elasticsearch
:type Extra: dict
"""
self.host = host
self.port = port
self.http_auth = http_auth
self.index = index
self.doc_type = doc_type
self.update_id = update_id
self.marker_index_hist_size = marker_index_hist_size
self.timeout = timeout
self.extra_elasticsearch_args = extra_elasticsearch_args
self.es = elasticsearch.Elasticsearch(
connection_class=Urllib3HttpConnection,
host=self.host,
port=self.port,
http_auth=self.http_auth,
timeout=self.timeout,
**self.extra_elasticsearch_args
)
def marker_index_document_id(self):
"""
Generate an id for the indicator document.
"""
params = '%s:%s:%s' % (self.index, self.doc_type, self.update_id)
return hashlib.sha1(params.encode('utf-8')).hexdigest()
def touch(self):
"""
Mark this update as complete.
The document id would be sufficent but,
for documentation,
we index the parameters `update_id`, `target_index`, `target_doc_type` and `date` as well.
"""
self.create_marker_index()
self.es.index(index=self.marker_index, doc_type=self.marker_doc_type,
id=self.marker_index_document_id(), body={
'update_id': self.update_id,
'target_index': self.index,
'target_doc_type': self.doc_type,
'date': datetime.datetime.now()})
self.es.indices.flush(index=self.marker_index)
self.ensure_hist_size()
def exists(self):
"""
Test, if this task has been run.
"""
try:
self.es.get(index=self.marker_index, doc_type=self.marker_doc_type, id=self.marker_index_document_id())
return True
except elasticsearch.NotFoundError:
logger.debug('Marker document not found.')
except elasticsearch.ElasticsearchException as err:
logger.warn(err)
return False
def create_marker_index(self):
"""
Create the index that will keep track of the tasks if necessary.
"""
if not self.es.indices.exists(index=self.marker_index):
self.es.indices.create(index=self.marker_index)
def ensure_hist_size(self):
"""
Shrink the history of updates for
a `index/doc_type` combination down to `self.marker_index_hist_size`.
"""
if self.marker_index_hist_size == 0:
return
result = self.es.search(index=self.marker_index,
doc_type=self.marker_doc_type,
body={'query': {
'term': {'target_index': self.index}}},
sort=('date:desc',))
for i, hit in enumerate(result.get('hits').get('hits'), start=1):
if i > self.marker_index_hist_size:
marker_document_id = hit.get('_id')
self.es.delete(id=marker_document_id, index=self.marker_index,
doc_type=self.marker_doc_type)
self.es.indices.flush(index=self.marker_index)
class CopyToIndex(luigi.Task):
"""
Template task for inserting a data set into Elasticsearch.
Usage:
1. Subclass and override the required `index` attribute.
2. Implement a custom `docs` method, that returns an iterable over the documents.
A document can be a JSON string,
e.g. from a newline-delimited JSON (ldj) file (default implementation)
or some dictionary.
Optional attributes:
* doc_type (default),
* host (localhost),
* port (9200),
* settings ({'settings': {}})
* mapping (None),
* chunk_size (2000),
* raise_on_error (True),
* purge_existing_index (False),
* marker_index_hist_size (0)
If settings are defined, they are only applied at index creation time.
"""
@property
def host(self):
"""
ES hostname.
"""
return 'localhost'
@property
def port(self):
"""
ES port.
"""
return 9200
@property
def http_auth(self):
"""
ES optional http auth information as either ‘:’ separated string or a tuple,
e.g. `('user', 'pass')` or `"user:pass"`.
"""
return None
@abc.abstractproperty
def index(self):
"""
The target index.
May exist or not.
"""
return None
@property
def doc_type(self):
"""
The target doc_type.
"""
return 'default'
@property
def mapping(self):
"""
Dictionary with custom mapping or `None`.
"""
return None
@property
def settings(self):
"""
Settings to be used at index creation time.
"""
return {'settings': {}}
@property
def chunk_size(self):
"""
Single API call for this number of docs.
"""
return 2000
@property
def raise_on_error(self):
"""
Whether to fail fast.
"""
return True
@property
def purge_existing_index(self):
"""
Whether to delete the `index` completely before any indexing.
"""
return False
@property
def marker_index_hist_size(self):
"""
Number of event log entries in the marker index. 0: unlimited.
"""
return 0
@property
def timeout(self):
"""
Timeout.
"""
return 10
@property
def extra_elasticsearch_args(self):
"""
Extra arguments to pass to the Elasticsearch constructor
"""
return {}
def docs(self):
"""
Return the documents to be indexed.
Beside the user defined fields, the document may contain an `_index`, `_type` and `_id`.
"""
with self.input().open('r') as fobj:
for line in fobj:
yield line
# everything below will rarely have to be overridden
def _docs(self):
"""
Since `self.docs` may yield documents that do not explicitly contain `_index` or `_type`,
add those attributes here, if necessary.
"""
iterdocs = iter(self.docs())
first = next(iterdocs)
needs_parsing = False
if isinstance(first, six.string_types):
needs_parsing = True
elif isinstance(first, dict):
pass
else:
raise RuntimeError('Document must be either JSON strings or dict.')
for doc in itertools.chain([first], iterdocs):
if needs_parsing:
doc = json.loads(doc)
if '_index' not in doc:
doc['_index'] = self.index
if '_type' not in doc:
doc['_type'] = self.doc_type
yield doc
def _init_connection(self):
return elasticsearch.Elasticsearch(
connection_class=Urllib3HttpConnection,
host=self.host,
port=self.port,
http_auth=self.http_auth,
timeout=self.timeout,
**self.extra_elasticsearch_args
)
def create_index(self):
"""
Override to provide code for creating the target index.
By default it will be created without any special settings or mappings.
"""
es = self._init_connection()
if not es.indices.exists(index=self.index):
es.indices.create(index=self.index, body=self.settings)
def delete_index(self):
"""
Delete the index, if it exists.
"""
es = self._init_connection()
if es.indices.exists(index=self.index):
es.indices.delete(index=self.index)
def update_id(self):
"""
This id will be a unique identifier for this indexing task.
"""
return self.task_id
def output(self):
"""
Returns a ElasticsearchTarget representing the inserted dataset.
Normally you don't override this.
"""
return ElasticsearchTarget(
host=self.host,
port=self.port,
http_auth=self.http_auth,
index=self.index,
doc_type=self.doc_type,
update_id=self.update_id(),
marker_index_hist_size=self.marker_index_hist_size,
timeout=self.timeout,
extra_elasticsearch_args=self.extra_elasticsearch_args
)
def run(self):
"""
Run task, namely:
* purge existing index, if requested (`purge_existing_index`),
* create the index, if missing,
* apply mappings, if given,
* set refresh interval to -1 (disable) for performance reasons,
* bulk index in batches of size `chunk_size` (2000),
* set refresh interval to 1s,
* refresh Elasticsearch,
* create entry in marker index.
"""
if self.purge_existing_index:
self.delete_index()
self.create_index()
es = self._init_connection()
if self.mapping:
es.indices.put_mapping(index=self.index, doc_type=self.doc_type,
body=self.mapping)
es.indices.put_settings({"index": {"refresh_interval": "-1"}},
index=self.index)
bulk_index(es, self._docs(), chunk_size=self.chunk_size,
raise_on_error=self.raise_on_error)
es.indices.put_settings({"index": {"refresh_interval": "1s"}},
index=self.index)
es.indices.refresh()
self.output().touch()
| apache-2.0 |
elopezga/ErrorRate | ivi/agilent/agilentDSO7104B.py | 7 | 1686 | """
Python Interchangeable Virtual Instrument Library
Copyright (c) 2012-2014 Alex Forencich
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
from .agilent7000B import *
class agilentDSO7104B(agilent7000B):
"Agilent InfiniiVision DSO7104B IVI oscilloscope driver"
def __init__(self, *args, **kwargs):
self.__dict__.setdefault('_instrument_id', 'DSO7104B')
super(agilentDSO7104B, self).__init__(*args, **kwargs)
self._analog_channel_count = 4
self._digital_channel_count = 0
self._channel_count = self._analog_channel_count + self._digital_channel_count
self._bandwidth = 1e9
self._init_channels()
| mit |
bt3gl/Numerical-Methods-for-Physics | others/laplace_poisson_equations/mg_test.py | 1 | 1396 | #!/usr/bin/env python
"""
an example of using the multigrid class to solve Laplace's equation. Here, we
solve
u_xx = sin(x)
u = 0 on the boundary [0,1]
The analytic solution is u(x) = -sin(x) + x sin(1)
"""
#from io import *
import numpy
import multigrid
import pylab
# the analytic solution
def true(x):
return -numpy.sin(x) + x*numpy.sin(1.0)
# the L2 error norm
def error(myg, r):
# L2 norm of elements in r, multiplied by dx to
# normalize
return numpy.sqrt(myg.dx*numpy.sum((r[myg.ilo:myg.ihi+1]**2)))
# the righthand side
def f(x):
return numpy.sin(x)
# test the multigrid solver
nx = 64
# create the multigrid object
a = multigrid.ccMG1d(nx,
xlBCtype="dirichlet", xrBCtype="dirichlet",
verbose=1)
# initialize the solution to 0
init = a.solnGrid.scratchArray()
a.initSolution(init)
# initialize the RHS using the function f
rhs = f(a.x)
a.initRHS(rhs)
# solve to a relative tolerance of 1.e-11
a.solve(rtol=1.e-11)
# alternately, we can just use smoothing by uncommenting the following
#a.smooth(a.nlevels-1,50000)
# get the solution
v = a.getSolution()
# compute the error from the analytic solution
b = true(a.x)
e = v - b
print " L2 error from true solution = %g\n rel. err from previous cycle = %g\n num. cycles = %d" % \
(error(a.solnGrid, e), a.relativeError, a.numCycles)
| apache-2.0 |
synergeticsedx/deployment-wipro | common/djangoapps/util/organizations_helpers.py | 62 | 3046 | """
Utility library for working with the edx-organizations app
"""
from django.conf import settings
from django.db.utils import DatabaseError
def add_organization(organization_data):
"""
Client API operation adapter/wrapper
"""
if not organizations_enabled():
return None
from organizations import api as organizations_api
return organizations_api.add_organization(organization_data=organization_data)
def add_organization_course(organization_data, course_id):
"""
Client API operation adapter/wrapper
"""
if not organizations_enabled():
return None
from organizations import api as organizations_api
return organizations_api.add_organization_course(organization_data=organization_data, course_key=course_id)
def get_organization(organization_id):
"""
Client API operation adapter/wrapper
"""
if not organizations_enabled():
return []
from organizations import api as organizations_api
return organizations_api.get_organization(organization_id)
def get_organization_by_short_name(organization_short_name):
"""
Client API operation adapter/wrapper
"""
if not organizations_enabled():
return None
from organizations import api as organizations_api
from organizations.exceptions import InvalidOrganizationException
try:
return organizations_api.get_organization_by_short_name(organization_short_name)
except InvalidOrganizationException:
return None
def get_organizations():
"""
Client API operation adapter/wrapper
"""
if not organizations_enabled():
return []
from organizations import api as organizations_api
# Due to the way unit tests run for edx-platform, models are not yet available at the time
# of Django admin form instantiation. This unfortunately results in an invocation of the following
# workflow, because the test configuration is (correctly) configured to exercise the application
# The good news is that this case does not manifest in the Real World, because migrations have
# been run ahead of application instantiation and the flag set only when that is truly the case.
try:
return organizations_api.get_organizations()
except DatabaseError:
return []
def get_organization_courses(organization_id):
"""
Client API operation adapter/wrapper
"""
if not organizations_enabled():
return []
from organizations import api as organizations_api
return organizations_api.get_organization_courses(organization_id)
def get_course_organizations(course_id):
"""
Client API operation adapter/wrapper
"""
if not organizations_enabled():
return []
from organizations import api as organizations_api
return organizations_api.get_course_organizations(course_id)
def organizations_enabled():
"""
Returns boolean indication if organizations app is enabled on not.
"""
return settings.FEATURES.get('ORGANIZATIONS_APP', False)
| agpl-3.0 |
marlengit/BitcoinUnlimited | qa/rpc-tests/prioritise_transaction.py | 1 | 6053 | #!/usr/bin/env python3
# Copyright (c) 2015 The Bitcoin Core developers
# Copyright (c) 2015-2016 The Bitcoin Unlimited developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
# Test PrioritiseTransaction code
#
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import *
from test_framework.mininode import COIN, MAX_BLOCK_SIZE
class PrioritiseTransactionTest(BitcoinTestFramework):
def __init__(self):
self.txouts = gen_return_txouts()
def setup_chain(self):
print("Initializing test directory "+self.options.tmpdir)
initialize_chain_clean(self.options.tmpdir, 1)
def setup_network(self):
self.nodes = []
self.is_network_split = False
self.nodes.append(start_node(0, self.options.tmpdir, ["-debug", "-printpriority=1"]))
self.relayfee = self.nodes[0].getnetworkinfo()['relayfee']
def run_test(self):
utxo_count = 90
utxos = create_confirmed_utxos(self.relayfee, self.nodes[0], utxo_count)
base_fee = self.relayfee*100 # our transactions are smaller than 100kb
txids = []
# Create 3 batches of transactions at 3 different fee rate levels
range_size = utxo_count // 3
for i in range(3):
txids.append([])
start_range = i * range_size
end_range = start_range + range_size
txids[i] = create_lots_of_big_transactions(self.nodes[0], self.txouts, utxos[start_range:end_range], (i+1)*base_fee)
# Make sure that the size of each group of transactions exceeds
# MAX_BLOCK_SIZE -- otherwise the test needs to be revised to create
# more transactions.
mempool = self.nodes[0].getrawmempool(True)
sizes = [0, 0, 0]
for i in range(3):
for j in txids[i]:
assert(j in mempool)
sizes[i] += mempool[j]['size']
assert(sizes[i] > MAX_BLOCK_SIZE) # Fail => raise utxo_count
# add a fee delta to something in the cheapest bucket and make sure it gets mined
# also check that a different entry in the cheapest bucket is NOT mined (lower
# the priority to ensure its not mined due to priority)
self.nodes[0].prioritisetransaction(txids[0][0], 0, int(3*base_fee*COIN))
self.nodes[0].prioritisetransaction(txids[0][1], -1e15, 0)
self.nodes[0].generate(1)
mempool = self.nodes[0].getrawmempool()
print("Assert that prioritised transaction was mined")
assert(txids[0][0] not in mempool)
assert(txids[0][1] in mempool)
high_fee_tx = None
for x in txids[2]:
if x not in mempool:
high_fee_tx = x
# Something high-fee should have been mined!
assert(high_fee_tx != None)
# Add a prioritisation before a tx is in the mempool (de-prioritising a
# high-fee transaction so that it's now low fee).
self.nodes[0].prioritisetransaction(high_fee_tx, -1e15, -int(2*base_fee*COIN))
# Add everything back to mempool
self.nodes[0].invalidateblock(self.nodes[0].getbestblockhash())
# Check to make sure our high fee rate tx is back in the mempool
mempool = self.nodes[0].getrawmempool()
assert(high_fee_tx in mempool)
# Now verify the modified-high feerate transaction isn't mined before
# the other high fee transactions. Keep mining until our mempool has
# decreased by all the high fee size that we calculated above.
while (self.nodes[0].getmempoolinfo()['bytes'] > sizes[0] + sizes[1]):
self.nodes[0].generate(1)
# High fee transaction should not have been mined, but other high fee rate
# transactions should have been.
mempool = self.nodes[0].getrawmempool()
print("Assert that de-prioritised transaction is still in mempool")
assert(high_fee_tx in mempool)
for x in txids[2]:
if (x != high_fee_tx):
assert(x not in mempool)
# Create a free, low priority transaction. Should be rejected.
utxo_list = self.nodes[0].listunspent()
assert(len(utxo_list) > 0)
utxo = utxo_list[0]
inputs = []
outputs = {}
inputs.append({"txid" : utxo["txid"], "vout" : utxo["vout"]})
outputs[self.nodes[0].getnewaddress()] = utxo["amount"] - self.relayfee
raw_tx = self.nodes[0].createrawtransaction(inputs, outputs)
tx_hex = self.nodes[0].signrawtransaction(raw_tx)["hex"]
txid = self.nodes[0].sendrawtransaction(tx_hex)
# A tx that spends an in-mempool tx has 0 priority, so we can use it to
# test the effect of using prioritise transaction for mempool acceptance
inputs = []
inputs.append({"txid": txid, "vout": 0})
outputs = {}
outputs[self.nodes[0].getnewaddress()] = utxo["amount"] - self.relayfee
raw_tx2 = self.nodes[0].createrawtransaction(inputs, outputs)
tx2_hex = self.nodes[0].signrawtransaction(raw_tx2)["hex"]
tx2_id = self.nodes[0].decoderawtransaction(tx2_hex)["txid"]
try:
self.nodes[0].sendrawtransaction(tx2_hex)
except JSONRPCException as exp:
assert_equal(exp.error['code'], -26) # insufficient fee
assert(tx2_id not in self.nodes[0].getrawmempool())
else:
assert(False)
# This is a less than 1000-byte transaction, so just set the fee
# to be the minimum for a 1000 byte transaction and check that it is
# accepted.
self.nodes[0].prioritisetransaction(tx2_id, 0, int(self.relayfee*COIN))
print("Assert that prioritised free transaction is accepted to mempool")
assert_equal(self.nodes[0].sendrawtransaction(tx2_hex), tx2_id)
assert(tx2_id in self.nodes[0].getrawmempool())
if __name__ == '__main__':
PrioritiseTransactionTest().main()
| mit |
TeamBliss-Devices/android_kernel_samsung_ms013g | arch/ia64/scripts/unwcheck.py | 13143 | 1714 | #!/usr/bin/python
#
# Usage: unwcheck.py FILE
#
# This script checks the unwind info of each function in file FILE
# and verifies that the sum of the region-lengths matches the total
# length of the function.
#
# Based on a shell/awk script originally written by Harish Patil,
# which was converted to Perl by Matthew Chapman, which was converted
# to Python by David Mosberger.
#
import os
import re
import sys
if len(sys.argv) != 2:
print "Usage: %s FILE" % sys.argv[0]
sys.exit(2)
readelf = os.getenv("READELF", "readelf")
start_pattern = re.compile("<([^>]*)>: \[0x([0-9a-f]+)-0x([0-9a-f]+)\]")
rlen_pattern = re.compile(".*rlen=([0-9]+)")
def check_func (func, slots, rlen_sum):
if slots != rlen_sum:
global num_errors
num_errors += 1
if not func: func = "[%#x-%#x]" % (start, end)
print "ERROR: %s: %lu slots, total region length = %lu" % (func, slots, rlen_sum)
return
num_funcs = 0
num_errors = 0
func = False
slots = 0
rlen_sum = 0
for line in os.popen("%s -u %s" % (readelf, sys.argv[1])):
m = start_pattern.match(line)
if m:
check_func(func, slots, rlen_sum)
func = m.group(1)
start = long(m.group(2), 16)
end = long(m.group(3), 16)
slots = 3 * (end - start) / 16
rlen_sum = 0L
num_funcs += 1
else:
m = rlen_pattern.match(line)
if m:
rlen_sum += long(m.group(1))
check_func(func, slots, rlen_sum)
if num_errors == 0:
print "No errors detected in %u functions." % num_funcs
else:
if num_errors > 1:
err="errors"
else:
err="error"
print "%u %s detected in %u functions." % (num_errors, err, num_funcs)
sys.exit(1)
| gpl-2.0 |
kaiweifan/vse-lbaas-plugin-poc | quantum/tests/unit/test_agent_ovs_cleanup.py | 2 | 4345 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (c) 2012 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import contextlib
import itertools
import mock
from oslo.config import cfg
from quantum.agent.linux import ip_lib
from quantum.agent.linux import ovs_lib
from quantum.agent import ovs_cleanup_util as util
from quantum.openstack.common import uuidutils
from quantum.tests import base
class TestOVSCleanup(base.BaseTestCase):
def setUp(self):
super(TestOVSCleanup, self).setUp()
self.addCleanup(cfg.CONF.reset)
def test_setup_conf(self):
conf = util.setup_conf()
self.assertEqual(conf.external_network_bridge, 'br-ex')
self.assertEqual(conf.ovs_integration_bridge, 'br-int')
self.assertFalse(conf.ovs_all_ports)
self.assertEqual(conf.AGENT.root_helper, 'sudo')
def test_main(self):
bridges = ['br-int', 'br-ex']
ports = ['p1', 'p2', 'p3']
conf = mock.Mock()
conf.AGENT.root_helper = 'dummy_sudo'
conf.ovs_all_ports = False
conf.ovs_integration_bridge = 'br-int'
conf.external_network_bridge = 'br-ex'
with contextlib.nested(
mock.patch('quantum.common.config.setup_logging'),
mock.patch('quantum.agent.ovs_cleanup_util.setup_conf',
return_value=conf),
mock.patch('quantum.agent.linux.ovs_lib.get_bridges',
return_value=bridges),
mock.patch('quantum.agent.linux.ovs_lib.OVSBridge'),
mock.patch.object(util, 'collect_quantum_ports',
return_value=ports),
mock.patch.object(util, 'delete_quantum_ports')
) as (_log, _conf, _get, ovs, collect, delete):
with mock.patch('quantum.common.config.setup_logging'):
util.main()
ovs.assert_has_calls([mock.call().delete_ports(
all_ports=False)])
collect.assert_called_once_with(set(bridges), 'dummy_sudo')
delete.assert_called_once_with(ports, 'dummy_sudo')
def test_collect_quantum_ports(self):
port1 = ovs_lib.VifPort('tap1234', 1, uuidutils.generate_uuid(),
'11:22:33:44:55:66', 'br')
port2 = ovs_lib.VifPort('tap5678', 2, uuidutils.generate_uuid(),
'77:88:99:aa:bb:cc', 'br')
port3 = ovs_lib.VifPort('tap90ab', 3, uuidutils.generate_uuid(),
'99:00:aa:bb:cc:dd', 'br')
ports = [[port1, port2], [port3]]
portnames = [p.port_name for p in itertools.chain(*ports)]
with mock.patch('quantum.agent.linux.ovs_lib.OVSBridge') as ovs:
ovs.return_value.get_vif_ports.side_effect = ports
bridges = ['br-int', 'br-ex']
ret = util.collect_quantum_ports(bridges, 'dummy_sudo')
self.assertEqual(ret, portnames)
def test_delete_quantum_ports(self):
ports = ['tap1234', 'tap5678', 'tap09ab']
port_found = [True, False, True]
delete_ports = [p for p, found
in itertools.izip(ports, port_found) if found]
with contextlib.nested(
mock.patch.object(ip_lib, 'device_exists',
side_effect=port_found),
mock.patch.object(ip_lib, 'IPDevice')
) as (device_exists, ip_dev):
util.delete_quantum_ports(ports, 'dummy_sudo')
device_exists.assert_has_calls([mock.call(p) for p in ports])
ip_dev.assert_has_calls(
[mock.call('tap1234', 'dummy_sudo'),
mock.call().link.delete(),
mock.call('tap09ab', 'dummy_sudo'),
mock.call().link.delete()])
| apache-2.0 |
chouseknecht/ansible | lib/ansible/plugins/strategy/debug.py | 134 | 1288 | # This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
DOCUMENTATION = '''
strategy: debug
short_description: Executes tasks in interactive debug session.
description:
- Task execution is 'linear' but controlled by an interactive debug session.
version_added: "2.1"
author: Kishin Yagami
'''
import cmd
import pprint
import sys
from ansible.plugins.strategy.linear import StrategyModule as LinearStrategyModule
class StrategyModule(LinearStrategyModule):
def __init__(self, tqm):
super(StrategyModule, self).__init__(tqm)
self.debugger_active = True
| gpl-3.0 |
tzewangdorje/SIPserv | Twisted-13.1.0/twisted/internet/fdesc.py | 62 | 3223 | # -*- test-case-name: twisted.test.test_fdesc -*-
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Utility functions for dealing with POSIX file descriptors.
"""
import os
import errno
try:
import fcntl
except ImportError:
fcntl = None
# twisted imports
from twisted.internet.main import CONNECTION_LOST, CONNECTION_DONE
def setNonBlocking(fd):
"""
Set the file description of the given file descriptor to non-blocking.
"""
flags = fcntl.fcntl(fd, fcntl.F_GETFL)
flags = flags | os.O_NONBLOCK
fcntl.fcntl(fd, fcntl.F_SETFL, flags)
def setBlocking(fd):
"""
Set the file description of the given file descriptor to blocking.
"""
flags = fcntl.fcntl(fd, fcntl.F_GETFL)
flags = flags & ~os.O_NONBLOCK
fcntl.fcntl(fd, fcntl.F_SETFL, flags)
if fcntl is None:
# fcntl isn't available on Windows. By default, handles aren't
# inherited on Windows, so we can do nothing here.
_setCloseOnExec = _unsetCloseOnExec = lambda fd: None
else:
def _setCloseOnExec(fd):
"""
Make a file descriptor close-on-exec.
"""
flags = fcntl.fcntl(fd, fcntl.F_GETFD)
flags = flags | fcntl.FD_CLOEXEC
fcntl.fcntl(fd, fcntl.F_SETFD, flags)
def _unsetCloseOnExec(fd):
"""
Make a file descriptor close-on-exec.
"""
flags = fcntl.fcntl(fd, fcntl.F_GETFD)
flags = flags & ~fcntl.FD_CLOEXEC
fcntl.fcntl(fd, fcntl.F_SETFD, flags)
def readFromFD(fd, callback):
"""
Read from file descriptor, calling callback with resulting data.
If successful, call 'callback' with a single argument: the
resulting data.
Returns same thing FileDescriptor.doRead would: CONNECTION_LOST,
CONNECTION_DONE, or None.
@type fd: C{int}
@param fd: non-blocking file descriptor to be read from.
@param callback: a callable which accepts a single argument. If
data is read from the file descriptor it will be called with this
data. Handling exceptions from calling the callback is up to the
caller.
Note that if the descriptor is still connected but no data is read,
None will be returned but callback will not be called.
@return: CONNECTION_LOST on error, CONNECTION_DONE when fd is
closed, otherwise None.
"""
try:
output = os.read(fd, 8192)
except (OSError, IOError) as ioe:
if ioe.args[0] in (errno.EAGAIN, errno.EINTR):
return
else:
return CONNECTION_LOST
if not output:
return CONNECTION_DONE
callback(output)
def writeToFD(fd, data):
"""
Write data to file descriptor.
Returns same thing FileDescriptor.writeSomeData would.
@type fd: C{int}
@param fd: non-blocking file descriptor to be written to.
@type data: C{str} or C{buffer}
@param data: bytes to write to fd.
@return: number of bytes written, or CONNECTION_LOST.
"""
try:
return os.write(fd, data)
except (OSError, IOError) as io:
if io.errno in (errno.EAGAIN, errno.EINTR):
return 0
return CONNECTION_LOST
__all__ = ["setNonBlocking", "setBlocking", "readFromFD", "writeToFD"]
| gpl-3.0 |
drpaneas/linuxed.gr | lib/python2.7/site-packages/unidecode/x06b.py | 252 | 4608 | data = (
'Xiang ', # 0x00
'Nong ', # 0x01
'Bo ', # 0x02
'Chan ', # 0x03
'Lan ', # 0x04
'Ju ', # 0x05
'Shuang ', # 0x06
'She ', # 0x07
'Wei ', # 0x08
'Cong ', # 0x09
'Quan ', # 0x0a
'Qu ', # 0x0b
'Cang ', # 0x0c
'[?] ', # 0x0d
'Yu ', # 0x0e
'Luo ', # 0x0f
'Li ', # 0x10
'Zan ', # 0x11
'Luan ', # 0x12
'Dang ', # 0x13
'Jue ', # 0x14
'Em ', # 0x15
'Lan ', # 0x16
'Lan ', # 0x17
'Zhu ', # 0x18
'Lei ', # 0x19
'Li ', # 0x1a
'Ba ', # 0x1b
'Nang ', # 0x1c
'Yu ', # 0x1d
'Ling ', # 0x1e
'Tsuki ', # 0x1f
'Qian ', # 0x20
'Ci ', # 0x21
'Huan ', # 0x22
'Xin ', # 0x23
'Yu ', # 0x24
'Yu ', # 0x25
'Qian ', # 0x26
'Ou ', # 0x27
'Xu ', # 0x28
'Chao ', # 0x29
'Chu ', # 0x2a
'Chi ', # 0x2b
'Kai ', # 0x2c
'Yi ', # 0x2d
'Jue ', # 0x2e
'Xi ', # 0x2f
'Xu ', # 0x30
'Xia ', # 0x31
'Yu ', # 0x32
'Kuai ', # 0x33
'Lang ', # 0x34
'Kuan ', # 0x35
'Shuo ', # 0x36
'Xi ', # 0x37
'Ai ', # 0x38
'Yi ', # 0x39
'Qi ', # 0x3a
'Hu ', # 0x3b
'Chi ', # 0x3c
'Qin ', # 0x3d
'Kuan ', # 0x3e
'Kan ', # 0x3f
'Kuan ', # 0x40
'Kan ', # 0x41
'Chuan ', # 0x42
'Sha ', # 0x43
'Gua ', # 0x44
'Yin ', # 0x45
'Xin ', # 0x46
'Xie ', # 0x47
'Yu ', # 0x48
'Qian ', # 0x49
'Xiao ', # 0x4a
'Yi ', # 0x4b
'Ge ', # 0x4c
'Wu ', # 0x4d
'Tan ', # 0x4e
'Jin ', # 0x4f
'Ou ', # 0x50
'Hu ', # 0x51
'Ti ', # 0x52
'Huan ', # 0x53
'Xu ', # 0x54
'Pen ', # 0x55
'Xi ', # 0x56
'Xiao ', # 0x57
'Xu ', # 0x58
'Xi ', # 0x59
'Sen ', # 0x5a
'Lian ', # 0x5b
'Chu ', # 0x5c
'Yi ', # 0x5d
'Kan ', # 0x5e
'Yu ', # 0x5f
'Chuo ', # 0x60
'Huan ', # 0x61
'Zhi ', # 0x62
'Zheng ', # 0x63
'Ci ', # 0x64
'Bu ', # 0x65
'Wu ', # 0x66
'Qi ', # 0x67
'Bu ', # 0x68
'Bu ', # 0x69
'Wai ', # 0x6a
'Ju ', # 0x6b
'Qian ', # 0x6c
'Chi ', # 0x6d
'Se ', # 0x6e
'Chi ', # 0x6f
'Se ', # 0x70
'Zhong ', # 0x71
'Sui ', # 0x72
'Sui ', # 0x73
'Li ', # 0x74
'Cuo ', # 0x75
'Yu ', # 0x76
'Li ', # 0x77
'Gui ', # 0x78
'Dai ', # 0x79
'Dai ', # 0x7a
'Si ', # 0x7b
'Jian ', # 0x7c
'Zhe ', # 0x7d
'Mo ', # 0x7e
'Mo ', # 0x7f
'Yao ', # 0x80
'Mo ', # 0x81
'Cu ', # 0x82
'Yang ', # 0x83
'Tian ', # 0x84
'Sheng ', # 0x85
'Dai ', # 0x86
'Shang ', # 0x87
'Xu ', # 0x88
'Xun ', # 0x89
'Shu ', # 0x8a
'Can ', # 0x8b
'Jue ', # 0x8c
'Piao ', # 0x8d
'Qia ', # 0x8e
'Qiu ', # 0x8f
'Su ', # 0x90
'Qing ', # 0x91
'Yun ', # 0x92
'Lian ', # 0x93
'Yi ', # 0x94
'Fou ', # 0x95
'Zhi ', # 0x96
'Ye ', # 0x97
'Can ', # 0x98
'Hun ', # 0x99
'Dan ', # 0x9a
'Ji ', # 0x9b
'Ye ', # 0x9c
'Zhen ', # 0x9d
'Yun ', # 0x9e
'Wen ', # 0x9f
'Chou ', # 0xa0
'Bin ', # 0xa1
'Ti ', # 0xa2
'Jin ', # 0xa3
'Shang ', # 0xa4
'Yin ', # 0xa5
'Diao ', # 0xa6
'Cu ', # 0xa7
'Hui ', # 0xa8
'Cuan ', # 0xa9
'Yi ', # 0xaa
'Dan ', # 0xab
'Du ', # 0xac
'Jiang ', # 0xad
'Lian ', # 0xae
'Bin ', # 0xaf
'Du ', # 0xb0
'Tsukusu ', # 0xb1
'Jian ', # 0xb2
'Shu ', # 0xb3
'Ou ', # 0xb4
'Duan ', # 0xb5
'Zhu ', # 0xb6
'Yin ', # 0xb7
'Qing ', # 0xb8
'Yi ', # 0xb9
'Sha ', # 0xba
'Que ', # 0xbb
'Ke ', # 0xbc
'Yao ', # 0xbd
'Jun ', # 0xbe
'Dian ', # 0xbf
'Hui ', # 0xc0
'Hui ', # 0xc1
'Gu ', # 0xc2
'Que ', # 0xc3
'Ji ', # 0xc4
'Yi ', # 0xc5
'Ou ', # 0xc6
'Hui ', # 0xc7
'Duan ', # 0xc8
'Yi ', # 0xc9
'Xiao ', # 0xca
'Wu ', # 0xcb
'Guan ', # 0xcc
'Mu ', # 0xcd
'Mei ', # 0xce
'Mei ', # 0xcf
'Ai ', # 0xd0
'Zuo ', # 0xd1
'Du ', # 0xd2
'Yu ', # 0xd3
'Bi ', # 0xd4
'Bi ', # 0xd5
'Bi ', # 0xd6
'Pi ', # 0xd7
'Pi ', # 0xd8
'Bi ', # 0xd9
'Chan ', # 0xda
'Mao ', # 0xdb
'[?] ', # 0xdc
'[?] ', # 0xdd
'Pu ', # 0xde
'Mushiru ', # 0xdf
'Jia ', # 0xe0
'Zhan ', # 0xe1
'Sai ', # 0xe2
'Mu ', # 0xe3
'Tuo ', # 0xe4
'Xun ', # 0xe5
'Er ', # 0xe6
'Rong ', # 0xe7
'Xian ', # 0xe8
'Ju ', # 0xe9
'Mu ', # 0xea
'Hao ', # 0xeb
'Qiu ', # 0xec
'Dou ', # 0xed
'Mushiru ', # 0xee
'Tan ', # 0xef
'Pei ', # 0xf0
'Ju ', # 0xf1
'Duo ', # 0xf2
'Cui ', # 0xf3
'Bi ', # 0xf4
'San ', # 0xf5
'[?] ', # 0xf6
'Mao ', # 0xf7
'Sui ', # 0xf8
'Yu ', # 0xf9
'Yu ', # 0xfa
'Tuo ', # 0xfb
'He ', # 0xfc
'Jian ', # 0xfd
'Ta ', # 0xfe
'San ', # 0xff
)
| mit |
fource/fource | your_hacks/aditya/fource_http.py | 1 | 3453 | import requests
class HttpClass(object):
"""
Generate an HTTP Request. Accepts request parameters as params_dict during init.
Accepted Parameters:
HTTP URL as url - String
HTTP Method as method - String
HTTP Headers as headers - Dictionary
HTTP Basic Auth as auth - Tuple
Query String/POST Data/PUT Data as data - Dictionary
Request Type in Accept Header as datatype - String
Usage:
from http import HttpClass
params = {'url':'https://httpbin.org/get','method':'get'}
reqobj = HttpClass(params)
r = reqobj.execute()
"""
def __init__(self, http_params):
self.req_url = http_params.get('url')
self.req_method = http_params.get('method')
if self.req_method is None:
self.req_method = 'get'
self.req_headers = http_params.get('headers')
if self.req_headers is None:
self.req_headers = {}
self.req_auth = http_params.get('auth')
self.req_data = http_params.get('data')
self.requestDataTypeGenerator(http_params.get('data_type'))
self.result = None
def requestDataTypeGenerator(self, datatype):
data_to_accept = {'json':'application/json','xml':'application/xml'}
if data_to_accept.get(datatype) is None:
self.req_headers['Accept'] = data_to_accept.get('json')
else:
self.req_headers['Accept'] = data_to_accept.get(datatype)
def execute(self):
if self.req_method.lower() == 'get':
self.getRequest()
if self.req_method == 'post':
self.postRequest()
if self.req_method == 'put':
self.putRequest()
if self.req_method == 'delete':
self.delRequest()
return self.result
def getRequest(self):
if self.req_auth is None:
r = requests.get(self.req_url,headers=self.req_headers,params=self.req_data)
else:
r = requests.get(self.req_url,headers=self.req_headers,params=self.req_data,auth=self.req_auth)
self.result = {'status':r.status_code, 'request_headers':r.request.headers, 'url':r.url, 'response_headers':r.headers, 'response': r.text}
def postRequest(self):
if self.req_auth is None:
r = requests.post(self.req_url,headers=self.req_headers,params=self.req_data)
else:
r = requests.post(self.req_url,headers=self.req_headers,params=self.req_data,auth=self.req_auth)
self.result = {'status':r.status_code, 'request_headers':r.request.headers, 'url':r.url, 'response_headers':r.headers, 'response': r.text}
def putRequest(self):
if self.req_auth is None:
r = requests.put(self.req_url,headers=self.req_headers,params=self.req_data)
else:
r = requests.put(self.req_url,headers=self.req_headers,params=self.req_data,auth=self.req_auth)
self.result = {'status':r.status_code, 'request_headers':r.request.headers, 'url':r.url, 'response_headers':r.headers, 'response': r.text}
def delRequest(self):
if self.req_auth is None:
r = requests.delete(self.req_url,headers=self.req_headers,params=self.req_data)
else:
r = requests.delete(self.req_url,headers=self.req_headers,params=self.req_data,auth=self.req_auth)
self.result = {'status':r.status_code, 'request_headers':r.request.headers, 'url':r.url, 'response_headers':r.headers, 'response': r.text}
| mit |
BillBillBillBill/WishTalk-server | WishTalk/api/image.py | 1 | 2980 | # -*- coding: utf-8 -*-
#!/usr/bin/env python
import os
import uuid
from api import api, GlobalError
from flask import request, Response
from model.image import Image as Image_model
from server import db
from util.jsonResponse import jsonSuccess, jsonError
from config import UPLOAD_PATH
from werkzeug.utils import secure_filename
from PIL import Image
import StringIO
ALLOWED_EXTENSIONS = set(['png','jpg','jpeg', 'PNG', 'JPG', 'JPEG'])
def allowed_file(filename):
return '.' in filename and filename.rsplit('.',1)[1] in ALLOWED_EXTENSIONS
def get_all_img_list():
return filter(lambda filename: filename.split('.')[-1] in ALLOWED_EXTENSIONS, os.listdir(UPLOAD_PATH))
@api.route('/image/<string:filename>', methods=['GET'])
def get_image(filename=None):
if not filename:
return jsonError(), 404
else:
filename = secure_filename(filename)
if os.path.exists(os.path.join(UPLOAD_PATH, filename)):
width = int(request.args.get('width', -1))
height = int(request.args.get('height', -1))
compress = request.args.get('compress', '')
format = 'png'
if compress:
format = 'jpeg'
image = Image.open(os.path.join(UPLOAD_PATH, filename))
w, h = image.size
if width == -1 and height == -1:
width = w
height = h
elif width == -1 and height != -1:
width = int(height * 1.0 / h * w)
elif width != -1 and height == -1:
height = int(width * 1.0 / w * h)
image = image.resize((width, height), Image.ANTIALIAS)
o = StringIO.StringIO()
image.save(o, format)
return Response(o.getvalue(), mimetype='image/' + format)
else:
return jsonError(), 404
@api.route('/image', methods=['POST'])
def upload_image():
'''
curl -F "image=@/home/jay/Desktop/1432538993239.jpg" "http://localhost:5000/api/image"
'''
try:
image = request.files['image']
if image and allowed_file(image.filename):
filename = uuid.uuid4().hex + '.' + image.filename.rsplit('.',1)[1]
image.save(os.path.join(UPLOAD_PATH, filename))
image = Image_model(filename)
db.session.add(image)
db.session.commit()
return jsonSuccess({'filename': filename}), 201
else:
return jsonError(GlobalError.INVALID_FILE), 403
except Exception, e:
print e
return jsonError(GlobalError.INVALID_FILE), 403
@api.route('/image/<string:filename>', methods=['DELETE'])
def delete_image(filename=None):
if not filename:
return jsonError(), 404
filename = secure_filename(filename)
try:
os.remove(os.path.join(UPLOAD_PATH, filename))
Image_model.query.filter_by(filename=filename).delete()
db.session.commit()
except:
pass
return jsonSuccess(), 204
| mit |
infoxchange/django-localflavor | tests/test_be.py | 7 | 2985 | from __future__ import unicode_literals
from django.test import SimpleTestCase
from localflavor.be.forms import BEPhoneNumberField, BEPostalCodeField, BEProvinceSelect, BERegionSelect
class BELocalFlavorTests(SimpleTestCase):
def test_BEPostalCodeField(self):
error_format = ['Enter a valid postal code in the range and format 1XXX - 9XXX.']
valid = {
'1451': '1451',
'2540': '2540',
}
invalid = {
'0287': error_format,
'14309': error_format,
'873': error_format,
'35 74': error_format,
'859A': error_format,
}
self.assertFieldOutput(BEPostalCodeField, valid, invalid)
def test_BEPhoneNumberField(self):
error_format = [
('Enter a valid phone number in one of the formats 0x xxx xx xx, '
'0xx xx xx xx, 04xx xx xx xx, 0x/xxx.xx.xx, 0xx/xx.xx.xx, '
'04xx/xx.xx.xx, 0x.xxx.xx.xx, 0xx.xx.xx.xx, 04xx.xx.xx.xx, '
'0xxxxxxxx or 04xxxxxxxx.')
]
valid = {
'01 234 56 78': '01 234 56 78',
'01/234.56.78': '01/234.56.78',
'01.234.56.78': '01.234.56.78',
'012 34 56 78': '012 34 56 78',
'012/34.56.78': '012/34.56.78',
'012.34.56.78': '012.34.56.78',
'0412 34 56 78': '0412 34 56 78',
'0412/34.56.78': '0412/34.56.78',
'0412.34.56.78': '0412.34.56.78',
'012345678': '012345678',
'0412345678': '0412345678',
}
invalid = {
'01234567': error_format,
'12/345.67.89': error_format,
'012/345.678.90': error_format,
'012/34.56.789': error_format,
'0123/45.67.89': error_format,
'012/345 678 90': error_format,
'012/34 56 789': error_format,
'012.34 56 789': error_format,
}
self.assertFieldOutput(BEPhoneNumberField, valid, invalid)
def test_BERegionSelect(self):
f = BERegionSelect()
out = '''<select name="regions">
<option value="BRU">Brussels Capital Region</option>
<option value="VLG" selected="selected">Flemish Region</option>
<option value="WAL">Wallonia</option>
</select>'''
self.assertHTMLEqual(f.render('regions', 'VLG'), out)
def test_BEProvinceSelect(self):
f = BEProvinceSelect()
out = '''<select name="provinces">
<option value="VAN">Antwerp</option>
<option value="BRU">Brussels</option>
<option value="VOV">East Flanders</option>
<option value="VBR">Flemish Brabant</option>
<option value="WHT">Hainaut</option>
<option value="WLG" selected="selected">Liege</option>
<option value="VLI">Limburg</option>
<option value="WLX">Luxembourg</option>
<option value="WNA">Namur</option>
<option value="WBR">Walloon Brabant</option>
<option value="VWV">West Flanders</option>
</select>'''
self.assertHTMLEqual(f.render('provinces', 'WLG'), out)
| bsd-3-clause |
dgsantana/arsenalsuite | cpp/apps/bach/data_export/FixCachedKeywords.py | 10 | 7224 | #!/usr/bin/env python2.5
#
# Copyright (c) 2009 Dr. D Studios. (Please refer to license for details)
# SVN_META_HEADURL = "$HeadURL: $"
# SVN_META_ID = "$Id: FixCachedKeywords.py 9408 2010-03-03 22:35:49Z brobison $"
#
import sys
import os
from PyQt4.QtSql import *
#-----------------------------------------------------------------------------
class FixCachedKeywords:
#-----------------------------------------------------------------------------
def __init__( self, parent ):
self.parent = parent
self.pgAssetName2Id = {}
self.pgAssetId2Name = {}
self.pgKeywordName2Id = {}
self.pgKeywordId2Name = {}
self.pgKeywordMapping = {}
self._pgdb = QSqlDatabase.addDatabase( "QPSQL", "pgDB" )
self._pgdb.setDatabaseName( "bach" )
self._pgdb.setHostName( "sql01" )
self._pgdb.setUserName( "bach" )
self._pgdb.setPassword( "escher" )
if not self._pgdb.open():
self.p( "Couldn't open Bach DB" )
return False
self.p( "Opened Bach DB" )
self.dryRun = True
self.dryRun = False
self.fout = file( 'fixKeyword.bach.sql', 'wt' )
self.collectPGData_Keyword()
idx = 0
for k in self.pgKeywordMapping:
keywords = ','.join( self.pgKeywordMapping[ k ] )
s = "UPDATE bachasset SET cachedkeywords='%s' WHERE keybachasset=%d;" % ( esc( keywords ), k )
self._doPGSqlMod( s )
print idx, len( self.pgKeywordMapping), s
#-----------------------------------------------------------------------------
def p( self, p ):
self.parent.printIt( p )
#-----------------------------------------------------------------------------
def pS( self, p ):
self.parent.stat( p )
#-----------------------------------------------------------------------------
def _doPGSql( self, query ):
# self.p( '>>> Executing: [Bach] [%s]' % query )
q = QSqlQuery( query, self._pgdb )
#self.p( '<<< Done' )
return q
#-----------------------------------------------------------------------------
def _doPGSqlMod( self, query ):
self.fout.write( query )
self.fout.write( '\n' )
if self.dryRun:
return
#self.p( '>>> Executing: [Bach] [%s]' % query )
q = QSqlQuery( query, self._pgdb )
#self.p( '<<< Done' )
return q
#-----------------------------------------------------------------------------
def collectPGData_Asset(self):
q = self._doPGSql("""SELECT path, keybachasset FROM bachasset""")
while(q.next()):
name, id = extractPGAsset( q )
self.pgAssetName2Id[ name ] = id
self.pgAssetId2Name[ id ] = name
#-----------------------------------------------------------------------------
def collectPGData_Keyword(self):
q = self._doPGSql("""SELECT keybachasset, name FROM
bachkeywordmap, bachasset, bachkeyword
WHERE
fkeybachasset=keybachasset AND
fkeybachkeyword=keybachkeyword""")
while(q.next()):
d = extractPGKeywordMapping( q )
id = d[ 0 ]
name = d[ 1 ]
if not id in self.pgKeywordMapping:
self.pgKeywordMapping[ id ] = [ name ]
self.pgKeywordMapping[ id ].append( name )
#-----------------------------------------------------------------------------
def collectPGData(self):
self.p( "Preloading Bach data..." )
#----------------
self.collectPGData_Asset()
self.collectPGData_Keyword()
#----------------
self.p( "... finished" )
#-----------------------------------------------------------------------------
def assetExists(self, path):
if not path in self.pgAssetName2Id:
return 0
return self.pgAssetName2Id[ path ]
#-----------------------------------------------------------------------------
def getAssetId( self, path ):
return self.assetExists( path )
#-----------------------------------------------------------------------------
def keywordExists(self, name):
if not name in self.pgKeywordName2Id:
return 0
return self.pgKeywordName2Id[ name ]
#-----------------------------------------------------------------------------
def getKeywordId( self, name ):
return self.keywordExists( name )
#-----------------------------------------------------------------------------
def keywordMapExists(self, imgPath, keywordName):
if not imgPath in self.pgKeywordMapping:
return False
if not keywordName in self.pgKeywordMapping[ imgPath ]:
return False
return True
#-----------------------------------------------------------------------------
def collectionExists(self, name):
if not name in self.pgCollectionName2Id:
return 0
return self.pgCollectionName2Id[ name ]
#-----------------------------------------------------------------------------
def getCollectionId( self, name ):
return self.collectionExists( name )
#-----------------------------------------------------------------------------
def collectionMapExists(self, imgPath, collectionName):
if not imgPath in self.pgCollectionMapping:
return False
if not collectionName in self.pgCollectionMapping[ imgPath ]:
return False
return True
#-----------------------------------------------------------------------------
def esc( s ):
s = s.replace( '\'', '\'\'' )
return s
#-----------------------------------------------------------------------------
def toS( variant ):
v = variant.toString()
return str( v.toAscii() )
#-----------------------------------------------------------------------------
def toI( variant ):
v, ok = variant.toInt()
return int( v )
#-----------------------------------------------------------------------------
def extractPGAsset( query ):
name = toS( query.value( 0 ) )
id = toI( query.value( 1 ) )
return name, id
#-----------------------------------------------------------------------------
def extractPGKeyword( query ):
name = toS( query.value( 0 ) )
id = toI( query.value( 1 ) )
return name, id
#-----------------------------------------------------------------------------
def extractPGCollection( query ):
name = toS( query.value( 0 ) )
id = toI( query.value( 1 ) )
return name, id
#-----------------------------------------------------------------------------
def extractPGCollectionMapping( query ):
d = []
d.append( toI( query.value(0) ) )
d.append( toS( query.value(1) ) )
return d
#-----------------------------------------------------------------------------
def extractPGKeywordMapping( query ):
d = []
d.append( toI( query.value(0) ) )
d.append( toS( query.value(1) ) )
return d
#-----------------------------------------------------------------------------
class Printer():
def printIt(self,p):
print p
if __name__=='__main__':
printer = Printer()
fixit = FixCachedKeywords( printer )
| gpl-2.0 |
maestrano/openerp | doc/_themes/flask_theme_support.py | 2228 | 4875 | # flasky extensions. flasky pygments style based on tango style
from pygments.style import Style
from pygments.token import Keyword, Name, Comment, String, Error, \
Number, Operator, Generic, Whitespace, Punctuation, Other, Literal
class FlaskyStyle(Style):
background_color = "#f8f8f8"
default_style = ""
styles = {
# No corresponding class for the following:
#Text: "", # class: ''
Whitespace: "underline #f8f8f8", # class: 'w'
Error: "#a40000 border:#ef2929", # class: 'err'
Other: "#000000", # class 'x'
Comment: "italic #8f5902", # class: 'c'
Comment.Preproc: "noitalic", # class: 'cp'
Keyword: "bold #004461", # class: 'k'
Keyword.Constant: "bold #004461", # class: 'kc'
Keyword.Declaration: "bold #004461", # class: 'kd'
Keyword.Namespace: "bold #004461", # class: 'kn'
Keyword.Pseudo: "bold #004461", # class: 'kp'
Keyword.Reserved: "bold #004461", # class: 'kr'
Keyword.Type: "bold #004461", # class: 'kt'
Operator: "#582800", # class: 'o'
Operator.Word: "bold #004461", # class: 'ow' - like keywords
Punctuation: "bold #000000", # class: 'p'
# because special names such as Name.Class, Name.Function, etc.
# are not recognized as such later in the parsing, we choose them
# to look the same as ordinary variables.
Name: "#000000", # class: 'n'
Name.Attribute: "#c4a000", # class: 'na' - to be revised
Name.Builtin: "#004461", # class: 'nb'
Name.Builtin.Pseudo: "#3465a4", # class: 'bp'
Name.Class: "#000000", # class: 'nc' - to be revised
Name.Constant: "#000000", # class: 'no' - to be revised
Name.Decorator: "#888", # class: 'nd' - to be revised
Name.Entity: "#ce5c00", # class: 'ni'
Name.Exception: "bold #cc0000", # class: 'ne'
Name.Function: "#000000", # class: 'nf'
Name.Property: "#000000", # class: 'py'
Name.Label: "#f57900", # class: 'nl'
Name.Namespace: "#000000", # class: 'nn' - to be revised
Name.Other: "#000000", # class: 'nx'
Name.Tag: "bold #004461", # class: 'nt' - like a keyword
Name.Variable: "#000000", # class: 'nv' - to be revised
Name.Variable.Class: "#000000", # class: 'vc' - to be revised
Name.Variable.Global: "#000000", # class: 'vg' - to be revised
Name.Variable.Instance: "#000000", # class: 'vi' - to be revised
Number: "#990000", # class: 'm'
Literal: "#000000", # class: 'l'
Literal.Date: "#000000", # class: 'ld'
String: "#4e9a06", # class: 's'
String.Backtick: "#4e9a06", # class: 'sb'
String.Char: "#4e9a06", # class: 'sc'
String.Doc: "italic #8f5902", # class: 'sd' - like a comment
String.Double: "#4e9a06", # class: 's2'
String.Escape: "#4e9a06", # class: 'se'
String.Heredoc: "#4e9a06", # class: 'sh'
String.Interpol: "#4e9a06", # class: 'si'
String.Other: "#4e9a06", # class: 'sx'
String.Regex: "#4e9a06", # class: 'sr'
String.Single: "#4e9a06", # class: 's1'
String.Symbol: "#4e9a06", # class: 'ss'
Generic: "#000000", # class: 'g'
Generic.Deleted: "#a40000", # class: 'gd'
Generic.Emph: "italic #000000", # class: 'ge'
Generic.Error: "#ef2929", # class: 'gr'
Generic.Heading: "bold #000080", # class: 'gh'
Generic.Inserted: "#00A000", # class: 'gi'
Generic.Output: "#888", # class: 'go'
Generic.Prompt: "#745334", # class: 'gp'
Generic.Strong: "bold #000000", # class: 'gs'
Generic.Subheading: "bold #800080", # class: 'gu'
Generic.Traceback: "bold #a40000", # class: 'gt'
}
| agpl-3.0 |
michaelkressaty/ssepaperless | ssepaperless/users/views.py | 57 | 1696 | # -*- coding: utf-8 -*-
# Import the reverse lookup function
from django.core.urlresolvers import reverse
# view imports
from django.views.generic import DetailView
from django.views.generic import RedirectView
from django.views.generic import UpdateView
from django.views.generic import ListView
# Only authenticated users can access views using this.
from braces.views import LoginRequiredMixin
# Import the form from users/forms.py
from .forms import UserForm
# Import the customized User model
from .models import User
class UserDetailView(LoginRequiredMixin, DetailView):
model = User
# These next two lines tell the view to index lookups by username
slug_field = "username"
slug_url_kwarg = "username"
class UserRedirectView(LoginRequiredMixin, RedirectView):
permanent = False
def get_redirect_url(self):
return reverse("users:detail",
kwargs={"username": self.request.user.username})
class UserUpdateView(LoginRequiredMixin, UpdateView):
form_class = UserForm
# we already imported User in the view code above, remember?
model = User
# send the user back to their own page after a successful update
def get_success_url(self):
return reverse("users:detail",
kwargs={"username": self.request.user.username})
def get_object(self):
# Only get the User record for the user making the request
return User.objects.get(username=self.request.user.username)
class UserListView(LoginRequiredMixin, ListView):
model = User
# These next two lines tell the view to index lookups by username
slug_field = "username"
slug_url_kwarg = "username"
| bsd-3-clause |
miniconfig/home-assistant | homeassistant/components/climate/zwave.py | 1 | 7581 | """
Support for Z-Wave climate devices.
For more details about this platform, please refer to the documentation at
https://home-assistant.io/components/climate.zwave/
"""
# Because we do not compile openzwave on CI
# pylint: disable=import-error
import logging
from homeassistant.components.climate import DOMAIN
from homeassistant.components.climate import ClimateDevice
from homeassistant.components.zwave import ZWaveDeviceEntity
from homeassistant.components.zwave import async_setup_platform # noqa # pylint: disable=unused-import
from homeassistant.const import (
TEMP_CELSIUS, TEMP_FAHRENHEIT, ATTR_TEMPERATURE)
_LOGGER = logging.getLogger(__name__)
CONF_NAME = 'name'
DEFAULT_NAME = 'Z-Wave Climate'
REMOTEC = 0x5254
REMOTEC_ZXT_120 = 0x8377
REMOTEC_ZXT_120_THERMOSTAT = (REMOTEC, REMOTEC_ZXT_120)
ATTR_OPERATING_STATE = 'operating_state'
ATTR_FAN_STATE = 'fan_state'
WORKAROUND_ZXT_120 = 'zxt_120'
DEVICE_MAPPINGS = {
REMOTEC_ZXT_120_THERMOSTAT: WORKAROUND_ZXT_120
}
def get_device(hass, values, **kwargs):
"""Create zwave entity device."""
temp_unit = hass.config.units.temperature_unit
return ZWaveClimate(values, temp_unit)
class ZWaveClimate(ZWaveDeviceEntity, ClimateDevice):
"""Representation of a Z-Wave Climate device."""
def __init__(self, values, temp_unit):
"""Initialize the Z-Wave climate device."""
ZWaveDeviceEntity.__init__(self, values, DOMAIN)
self._target_temperature = None
self._current_temperature = None
self._current_operation = None
self._operation_list = None
self._operating_state = None
self._current_fan_mode = None
self._fan_list = None
self._fan_state = None
self._current_swing_mode = None
self._swing_list = None
self._unit = temp_unit
_LOGGER.debug("temp_unit is %s", self._unit)
self._zxt_120 = None
# Make sure that we have values for the key before converting to int
if (self.node.manufacturer_id.strip() and
self.node.product_id.strip()):
specific_sensor_key = (
int(self.node.manufacturer_id, 16),
int(self.node.product_id, 16))
if specific_sensor_key in DEVICE_MAPPINGS:
if DEVICE_MAPPINGS[specific_sensor_key] == WORKAROUND_ZXT_120:
_LOGGER.debug("Remotec ZXT-120 Zwave Thermostat"
" workaround")
self._zxt_120 = 1
self.update_properties()
def update_properties(self):
"""Callback on data changes for node values."""
# Operation Mode
if self.values.mode:
self._current_operation = self.values.mode.data
operation_list = self.values.mode.data_items
if operation_list:
self._operation_list = list(operation_list)
_LOGGER.debug("self._operation_list=%s", self._operation_list)
_LOGGER.debug("self._current_operation=%s", self._current_operation)
# Current Temp
if self.values.temperature:
self._current_temperature = self.values.temperature.data
device_unit = self.values.temperature.units
if device_unit is not None:
self._unit = device_unit
# Fan Mode
if self.values.fan_mode:
self._current_fan_mode = self.values.fan_mode.data
fan_list = self.values.fan_mode.data_items
if fan_list:
self._fan_list = list(fan_list)
_LOGGER.debug("self._fan_list=%s", self._fan_list)
_LOGGER.debug("self._current_fan_mode=%s",
self._current_fan_mode)
# Swing mode
if self._zxt_120 == 1:
if self.values.zxt_120_swing_mode:
self._current_swing_mode = self.values.zxt_120_swing_mode.data
swing_list = self.values.zxt_120_swing_mode.data_items
if swing_list:
self._swing_list = list(swing_list)
_LOGGER.debug("self._swing_list=%s", self._swing_list)
_LOGGER.debug("self._current_swing_mode=%s",
self._current_swing_mode)
# Set point
if self.values.primary.data == 0:
_LOGGER.debug("Setpoint is 0, setting default to "
"current_temperature=%s",
self._current_temperature)
self._target_temperature = (
round((float(self._current_temperature)), 1))
else:
self._target_temperature = round(
(float(self.values.primary.data)), 1)
# Operating state
if self.values.operating_state:
self._operating_state = self.values.operating_state.data
# Fan operating state
if self.values.fan_state:
self._fan_state = self.values.fan_state.data
@property
def should_poll(self):
"""No polling on Z-Wave."""
return False
@property
def current_fan_mode(self):
"""Return the fan speed set."""
return self._current_fan_mode
@property
def fan_list(self):
"""List of available fan modes."""
return self._fan_list
@property
def current_swing_mode(self):
"""Return the swing mode set."""
return self._current_swing_mode
@property
def swing_list(self):
"""List of available swing modes."""
return self._swing_list
@property
def temperature_unit(self):
"""Return the unit of measurement."""
if self._unit == 'C':
return TEMP_CELSIUS
elif self._unit == 'F':
return TEMP_FAHRENHEIT
else:
return self._unit
@property
def current_temperature(self):
"""Return the current temperature."""
return self._current_temperature
@property
def current_operation(self):
"""Return the current operation mode."""
return self._current_operation
@property
def operation_list(self):
"""List of available operation modes."""
return self._operation_list
@property
def target_temperature(self):
"""Return the temperature we try to reach."""
return self._target_temperature
def set_temperature(self, **kwargs):
"""Set new target temperature."""
if kwargs.get(ATTR_TEMPERATURE) is not None:
temperature = kwargs.get(ATTR_TEMPERATURE)
else:
return
self.values.primary.data = temperature
self.schedule_update_ha_state()
def set_fan_mode(self, fan):
"""Set new target fan mode."""
if self.values.fan_mode:
self.values.fan_mode.data = bytes(fan, 'utf-8')
def set_operation_mode(self, operation_mode):
"""Set new target operation mode."""
if self.values.mode:
self.values.mode.data = bytes(operation_mode, 'utf-8')
def set_swing_mode(self, swing_mode):
"""Set new target swing mode."""
if self._zxt_120 == 1:
if self.values.zxt_120_swing_mode:
self.values.zxt_120_swing_mode = bytes(swing_mode, 'utf-8')
@property
def device_state_attributes(self):
"""Return the device specific state attributes."""
data = super().device_state_attributes
if self._operating_state:
data[ATTR_OPERATING_STATE] = self._operating_state,
if self._fan_state:
data[ATTR_FAN_STATE] = self._fan_state
return data
| mit |
DOAJ/doaj | doajtest/unit/test_article_prepare_update_publisher.py | 1 | 3830 | from parameterized import parameterized
from combinatrix.testintegration import load_parameter_sets
from doajtest.fixtures import ArticleFixtureFactory, AccountFixtureFactory, JournalFixtureFactory
from doajtest.helpers import DoajTestCase
from portality.bll import DOAJ
from portality.bll import exceptions
from portality.models import Article, Account, Journal
from portality.lib.paths import rel2abs
from doajtest.mocks.bll_article import BLLArticleMockFactory
EXCEPTIONS = {
"ArgumentException": exceptions.ArgumentException,
"DuplicateArticleException": exceptions.DuplicateArticleException,
"ArticleNotAcceptable": exceptions.ArticleNotAcceptable
}
def prepare_update_publisher_load_cases():
return load_parameter_sets(rel2abs(__file__, "..", "matrices", "article_create_article"), "prepare_update_publisher",
"test_id",
{"test_id": []})
class TestBLLPrepareUpdatePublisher(DoajTestCase):
def setUp(self):
super(TestBLLPrepareUpdatePublisher, self).setUp()
self.svc = DOAJ.articleService()
self.is_id_updated = self.svc._doi_or_fulltext_updated
self.has_permission = self.svc.has_permissions
self.merge = Article.merge
acc_source = AccountFixtureFactory.make_publisher_source()
self.publisher = Account(**acc_source)
def tearDown(self):
super(TestBLLPrepareUpdatePublisher, self).tearDown()
self.svc._doi_or_fulltext_updated = self.is_id_updated
self.svc.has_permissions = self.has_permission
Article.merge = self.merge
@parameterized.expand(prepare_update_publisher_load_cases)
def test_prepare_update_publisher(self, value, kwargs):
Article.merge = BLLArticleMockFactory.merge_mock
duplicate_arg = kwargs.get("duplicate")
merge_duplicate_arg = kwargs.get("merge_duplicate")
doi_or_ft_update_arg = kwargs.get("doi_or_ft_updated")
is_update_arg = kwargs.get("is_update")
raises_arg = kwargs.get("raises")
pissn1 = "1234-5678"
eissn1 = "9876-5432"
pissn2 = "1111-1111"
eissn2 = "2222-2222"
doi = "10.1234/article-10"
ft = "https://example.com"
if doi_or_ft_update_arg == "yes":
self.svc._doi_or_fulltext_updated = BLLArticleMockFactory.doi_or_fulltext_updated(True,True)
else:
self.svc._doi_or_fulltext_updated = BLLArticleMockFactory.doi_or_fulltext_updated(False,False)
article_src = ArticleFixtureFactory.make_article_source(pissn=pissn1, eissn=eissn1, doi=doi, fulltext=ft)
article = Article(**article_src)
article.set_id("article_id")
duplicate = None
if duplicate_arg != "none":
duplicate_src = ArticleFixtureFactory.make_article_source(pissn=pissn2, eissn=eissn2, doi=doi, fulltext=ft)
duplicate = Article(**duplicate_src)
if duplicate_arg == "same_as_article_id":
duplicate.set_id("article_id")
elif duplicate_arg == "different_than_article_id":
duplicate.set_id("duplicate_id")
merge_duplicate = True if merge_duplicate_arg == "yes" else False
if duplicate_arg == "different_than_article_id":
self.svc.has_permissions = BLLArticleMockFactory.has_permissions(False)
else:
self.svc.has_permissions = BLLArticleMockFactory.has_permissions(True)
if raises_arg == "DuplicateArticle":
with self.assertRaises(exceptions.DuplicateArticleException):
self.svc._prepare_update_publisher(article,duplicate,merge_duplicate,self.publisher,True)
else:
assert self.svc._prepare_update_publisher(article,duplicate,merge_duplicate,self.publisher,True) == int(is_update_arg) | apache-2.0 |
Argon-Zhou/django | tests/template_tests/filter_tests/test_wordwrap.py | 324 | 1666 | from django.template.defaultfilters import wordwrap
from django.test import SimpleTestCase
from django.utils.safestring import mark_safe
from ..utils import setup
class WordwrapTests(SimpleTestCase):
@setup({'wordwrap01':
'{% autoescape off %}{{ a|wordwrap:"3" }} {{ b|wordwrap:"3" }}{% endautoescape %}'})
def test_wordwrap01(self):
output = self.engine.render_to_string('wordwrap01', {'a': 'a & b', 'b': mark_safe('a & b')})
self.assertEqual(output, 'a &\nb a &\nb')
@setup({'wordwrap02': '{{ a|wordwrap:"3" }} {{ b|wordwrap:"3" }}'})
def test_wordwrap02(self):
output = self.engine.render_to_string('wordwrap02', {'a': 'a & b', 'b': mark_safe('a & b')})
self.assertEqual(output, 'a &\nb a &\nb')
class FunctionTests(SimpleTestCase):
def test_wrap(self):
self.assertEqual(
wordwrap('this is a long paragraph of text that really needs to be wrapped I\'m afraid', 14),
'this is a long\nparagraph of\ntext that\nreally needs\nto be wrapped\nI\'m afraid',
)
def test_indent(self):
self.assertEqual(
wordwrap('this is a short paragraph of text.\n But this line should be indented', 14),
'this is a\nshort\nparagraph of\ntext.\n But this\nline should be\nindented',
)
def test_indent2(self):
self.assertEqual(
wordwrap('this is a short paragraph of text.\n But this line should be indented', 15),
'this is a short\nparagraph of\ntext.\n But this line\nshould be\nindented',
)
def test_non_string_input(self):
self.assertEqual(wordwrap(123, 2), '123')
| bsd-3-clause |
JeroenZegers/Nabu-MSSS | nabu/postprocessing/reconstructors/deepclusteringnoise_reconstructor.py | 1 | 5366 | """@file deepclusteringnoise_reconstructor.py
contains the reconstor class using deep clustering for modified noise architecture"""
from sklearn.cluster import KMeans
import mask_reconstructor
from nabu.postprocessing import data_reader
import numpy as np
import os
class DeepclusteringnoiseReconstructor(mask_reconstructor.MaskReconstructor):
"""the deepclusteringnoise reconstructor class for modified architecture for noise
a reconstructor using deep clustering"""
requested_output_names = ['bin_emb', 'noise_filter']
def __init__(self, conf, evalconf, dataconf, rec_dir, task, optimal_frame_permutation=False):
"""DeepclusteringnoiseReconstructor constructor
Args:
conf: the reconstructor configuration as a dictionary
evalconf: the evaluator configuration as a ConfigParser
dataconf: the database configuration
rec_dir: the directory where the reconstructions will be stored
task: name of task
"""
super(DeepclusteringnoiseReconstructor, self).__init__(
conf, evalconf, dataconf, rec_dir, task, optimal_frame_permutation)
if 'noise_threshold_for_kmeans' in conf:
self.noise_threshold = float(conf['noise_threshold_for_kmeans'])
else:
self.noise_threshold = 0.75
if 'min_kmeans_perc' in conf:
self.min_kmeans_perc = float(conf['min_kmeans_perc'])
else:
self.min_kmeans_perc = 0.05
# get the usedbins reader
usedbins_names = conf['usedbins'].split(' ')
usedbins_dataconfs = []
for usedbins_name in usedbins_names:
usedbins_dataconfs.append(dict(dataconf.items(usedbins_name)))
self.usedbins_reader = data_reader.DataReader(usedbins_dataconfs, self.segment_lengths)
# directory where cluster centroids will be stored
self.center_store_dir = os.path.join(rec_dir, 'cluster_centers')
if not os.path.isdir(self.center_store_dir):
os.makedirs(self.center_store_dir)
def _get_masks(self, output, utt_info):
"""estimate the masks
Args:
output: the output of a single utterance of the neural network
utt_info: some info on the utterance
Returns:
the estimated masks"""
embeddings = output['bin_emb'] # Embeddingvectors
noise_filter = output['noise_filter'] # noise filter output network (alpha)
# only the non-silence bins will be used for the clustering
usedbins, _ = self.usedbins_reader(self.pos)
[T, F] = np.shape(usedbins)
emb_dim = np.shape(embeddings)[1]/F
if np.shape(embeddings)[0] != T:
raise Exception('Number of frames in usedbins does not match the sequence length')
if np.shape(noise_filter)[0] != T:
raise Exception('Number of frames in noise detect does not match the sequence length')
if np.shape(noise_filter)[1] != F:
raise Exception('Number of noise detect outputs does not match number of frequency bins')
# reshape the embeddings vectors
embeddings = embeddings[:T, :]
embeddings_resh = np.reshape(embeddings, [T*F, emb_dim])
embeddings_resh_norm = np.linalg.norm(embeddings_resh, axis=1, keepdims=True)
embeddings_resh = embeddings_resh/embeddings_resh_norm
if np.isnan(embeddings_resh).any():
print 'Embedding reshape contains NaN'
# reshape noise filter
noise_filter = noise_filter[:T, :]
noise_filter_resh = np.reshape(noise_filter, T*F)
# which cells have not too much noise
no_noise = noise_filter_resh > self.noise_threshold
# only keep the active bins (above threshold) for clustering and not too noisy
usedbins_resh = np.reshape(usedbins, T*F)
filt = np.logical_and(usedbins_resh, no_noise)
perc_for_kmeans = float(np.sum(filt))/float(np.sum(usedbins_resh))
if perc_for_kmeans < self.min_kmeans_perc:
print \
'Found that less then %.1f%% (%.1f%%)of the tf bins with energy where considered non-noise for the Kmeans. ' \
'Lowering the noise threshold so that %.1f%% of the bins will be considered' % \
(self.min_kmeans_perc*100, perc_for_kmeans*100, self.min_kmeans_perc*100)
num_bins_wanted = int(np.ceil(np.sum(usedbins_resh)*self.min_kmeans_perc))
noise_filt_used_bin = noise_filter_resh * usedbins_resh
sorted_noise_filt_used_bin_inds = np.argsort(noise_filt_used_bin)
sorted_noise_filt_used_bin_inds = sorted_noise_filt_used_bin_inds[::-1]
filt = sorted_noise_filt_used_bin_inds[:num_bins_wanted]
embeddings_speech_resh = embeddings_resh[filt]
if np.isnan(embeddings_speech_resh).any():
print 'embeddings_speech_resh contains NaN'
if np.shape(embeddings_speech_resh)[0] < 2:
return np.zeros([self.nrS, T, F])
# apply kmeans clustering and assign each bin to a cluster
kmeans_model = KMeans(n_clusters=self.nrS, init='k-means++', n_init=10, max_iter=100, n_jobs=-1)
# Sometime it fails due to some indexerror and I'm not sure why. Just retry then. max 5 times
for _ in range(5):
try:
kmeans_model.fit(embeddings_speech_resh)
except IndexError:
continue
break
# assign each cell to cluster
predicted_labels = kmeans_model.predict(embeddings_resh)
predicted_labels_resh = np.reshape(predicted_labels, [T, F])
# reconstruct the masks from the cluster labels
masks = np.zeros([self.nrS, T, F])
for spk in range(self.nrS):
masks[spk, :, :] = (predicted_labels_resh == spk)*noise_filter
if np.isnan(masks).any():
print 'masks contains NaN'
# store the clusters
np.save(os.path.join(self.center_store_dir, utt_info['utt_name']), kmeans_model.cluster_centers_)
return masks
| mit |
evernym/zeno | plenum/test/txn_author_agreement/acceptance/helper.py | 2 | 2183 | import json
from indy.ledger import (
append_txn_author_agreement_acceptance_to_request, sign_request
)
from plenum.common.util import randomString
from plenum.test.pool_transactions.helper import (
prepare_nym_request, prepare_new_node_data, prepare_node_request
)
# TODO makes sense to make more generic and move to upper level helper
def build_nym_request(looper, sdk_wallet):
return looper.loop.run_until_complete(
prepare_nym_request(
sdk_wallet,
named_seed=randomString(32),
alias=randomString(5),
role=None
)
)[0]
# TODO makes sense to make more generic and move to upper level helper
def build_node_request(looper, tconf, tdir, sdk_wallet):
new_node_name = 'Node' + randomString(3)
sigseed, verkey, bls_key, nodeIp, nodePort, clientIp, clientPort, key_proof = \
prepare_new_node_data(tconf, tdir, new_node_name)
_, steward_did = sdk_wallet
node_request = looper.loop.run_until_complete(
prepare_node_request(steward_did,
new_node_name=new_node_name,
clientIp=clientIp,
clientPort=clientPort,
nodeIp=nodeIp,
nodePort=nodePort,
bls_key=bls_key,
sigseed=sigseed,
services=[],
key_proof=key_proof))
return node_request
def add_taa_acceptance(
looper,
request_json,
taa_text,
taa_version,
taa_acceptance_mech,
taa_acceptance_time
):
return looper.loop.run_until_complete(
append_txn_author_agreement_acceptance_to_request(
request_json,
text=taa_text,
version=taa_version,
taa_digest=None,
mechanism=taa_acceptance_mech,
time=taa_acceptance_time
)
)
def sign_request_dict(looper, sdk_wallet, req_dict):
wallet_h, did = sdk_wallet
req_json = looper.loop.run_until_complete(
sign_request(wallet_h, did, json.dumps(req_dict)))
return json.loads(req_json)
| apache-2.0 |
deepcell/xhtml2pdf | demo/wsgi/pisawsgidemo.py | 56 | 2238 | #!/bin/python2.5
# -*- coding: utf-8 -*-
# Copyright 2010 Dirk Holtwick, holtwick.it
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Copyright 2010 Dirk Holtwick, holtwick.it
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
__version__ = "$Revision: 103 $"
__author__ = "$Author: holtwick $"
__date__ = "$Date: 2007-10-31 17:08:54 +0100 (Mi, 31 Okt 2007) $"
__svnid__ = "$Id: pisa.py 103 2007-10-31 16:08:54Z holtwick $"
from wsgiref.simple_server import make_server
import logging
import sx.pisa3.pisa_wsgi as pisa_wsgi
def SimpleApp(environ, start_response):
# That's the magic!
#
# Set the environment variable "pisa.topdf" to the filename
# you would like to have for the resulting PDF
environ["pisa.topdf"] = "index.pdf"
# Simple Hello World example
start_response(
'200 OK', [
('content-type', "text/html"),
])
return ["Hello <strong>World</strong>"]
if __name__ == '__main__':
HOST = ''
PORT = 8080
logging.basicConfig(level=logging.DEBUG)
app = SimpleApp
# Add PISA WSGI Middleware
app = pisa_wsgi.PisaMiddleware(app)
httpd = make_server(HOST, PORT, app)
print "Serving HTTP on port %d..." % PORT
httpd.serve_forever()
| apache-2.0 |
K-Constantine/Amaraki | core/deps/gyp/pylib/gyp/flock_tool.py | 1835 | 1748 | #!/usr/bin/env python
# Copyright (c) 2011 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""These functions are executed via gyp-flock-tool when using the Makefile
generator. Used on systems that don't have a built-in flock."""
import fcntl
import os
import struct
import subprocess
import sys
def main(args):
executor = FlockTool()
executor.Dispatch(args)
class FlockTool(object):
"""This class emulates the 'flock' command."""
def Dispatch(self, args):
"""Dispatches a string command to a method."""
if len(args) < 1:
raise Exception("Not enough arguments")
method = "Exec%s" % self._CommandifyName(args[0])
getattr(self, method)(*args[1:])
def _CommandifyName(self, name_string):
"""Transforms a tool name like copy-info-plist to CopyInfoPlist"""
return name_string.title().replace('-', '')
def ExecFlock(self, lockfile, *cmd_list):
"""Emulates the most basic behavior of Linux's flock(1)."""
# Rely on exception handling to report errors.
# Note that the stock python on SunOS has a bug
# where fcntl.flock(fd, LOCK_EX) always fails
# with EBADF, that's why we use this F_SETLK
# hack instead.
fd = os.open(lockfile, os.O_WRONLY|os.O_NOCTTY|os.O_CREAT, 0666)
if sys.platform.startswith('aix'):
# Python on AIX is compiled with LARGEFILE support, which changes the
# struct size.
op = struct.pack('hhIllqq', fcntl.F_WRLCK, 0, 0, 0, 0, 0, 0)
else:
op = struct.pack('hhllhhl', fcntl.F_WRLCK, 0, 0, 0, 0, 0, 0)
fcntl.fcntl(fd, fcntl.F_SETLK, op)
return subprocess.call(cmd_list)
if __name__ == '__main__':
sys.exit(main(sys.argv[1:]))
| mit |
svn2github/kgyp | pylib/gyp/generator/xcode.py | 8 | 56539 | # Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import filecmp
import gyp.common
import gyp.xcodeproj_file
import gyp.xcode_ninja
import errno
import os
import sys
import posixpath
import re
import shutil
import subprocess
import tempfile
# Project files generated by this module will use _intermediate_var as a
# custom Xcode setting whose value is a DerivedSources-like directory that's
# project-specific and configuration-specific. The normal choice,
# DERIVED_FILE_DIR, is target-specific, which is thought to be too restrictive
# as it is likely that multiple targets within a single project file will want
# to access the same set of generated files. The other option,
# PROJECT_DERIVED_FILE_DIR, is unsuitable because while it is project-specific,
# it is not configuration-specific. INTERMEDIATE_DIR is defined as
# $(PROJECT_DERIVED_FILE_DIR)/$(CONFIGURATION).
_intermediate_var = 'INTERMEDIATE_DIR'
# SHARED_INTERMEDIATE_DIR is the same, except that it is shared among all
# targets that share the same BUILT_PRODUCTS_DIR.
_shared_intermediate_var = 'SHARED_INTERMEDIATE_DIR'
_library_search_paths_var = 'LIBRARY_SEARCH_PATHS'
generator_default_variables = {
'EXECUTABLE_PREFIX': '',
'EXECUTABLE_SUFFIX': '',
'STATIC_LIB_PREFIX': 'lib',
'SHARED_LIB_PREFIX': 'lib',
'STATIC_LIB_SUFFIX': '.a',
'SHARED_LIB_SUFFIX': '.dylib',
# INTERMEDIATE_DIR is a place for targets to build up intermediate products.
# It is specific to each build environment. It is only guaranteed to exist
# and be constant within the context of a project, corresponding to a single
# input file. Some build environments may allow their intermediate directory
# to be shared on a wider scale, but this is not guaranteed.
'INTERMEDIATE_DIR': '$(%s)' % _intermediate_var,
'OS': 'mac',
'PRODUCT_DIR': '$(BUILT_PRODUCTS_DIR)',
'LIB_DIR': '$(BUILT_PRODUCTS_DIR)',
'RULE_INPUT_ROOT': '$(INPUT_FILE_BASE)',
'RULE_INPUT_EXT': '$(INPUT_FILE_SUFFIX)',
'RULE_INPUT_NAME': '$(INPUT_FILE_NAME)',
'RULE_INPUT_PATH': '$(INPUT_FILE_PATH)',
'RULE_INPUT_DIRNAME': '$(INPUT_FILE_DIRNAME)',
'SHARED_INTERMEDIATE_DIR': '$(%s)' % _shared_intermediate_var,
'CONFIGURATION_NAME': '$(CONFIGURATION)',
}
# The Xcode-specific sections that hold paths.
generator_additional_path_sections = [
'mac_bundle_resources',
'mac_framework_headers',
'mac_framework_private_headers',
# 'mac_framework_dirs', input already handles _dirs endings.
]
# The Xcode-specific keys that exist on targets and aren't moved down to
# configurations.
generator_additional_non_configuration_keys = [
'ios_app_extension',
'ios_watch_app',
'ios_watchkit_extension',
'mac_bundle',
'mac_bundle_resources',
'mac_framework_headers',
'mac_framework_private_headers',
'mac_xctest_bundle',
'xcode_create_dependents_test_runner',
]
# We want to let any rules apply to files that are resources also.
generator_extra_sources_for_rules = [
'mac_bundle_resources',
'mac_framework_headers',
'mac_framework_private_headers',
]
# Xcode's standard set of library directories, which don't need to be duplicated
# in LIBRARY_SEARCH_PATHS. This list is not exhaustive, but that's okay.
xcode_standard_library_dirs = frozenset([
'$(SDKROOT)/usr/lib',
'$(SDKROOT)/usr/local/lib',
])
def CreateXCConfigurationList(configuration_names):
xccl = gyp.xcodeproj_file.XCConfigurationList({'buildConfigurations': []})
if len(configuration_names) == 0:
configuration_names = ['Default']
for configuration_name in configuration_names:
xcbc = gyp.xcodeproj_file.XCBuildConfiguration({
'name': configuration_name})
xccl.AppendProperty('buildConfigurations', xcbc)
xccl.SetProperty('defaultConfigurationName', configuration_names[0])
return xccl
class XcodeProject(object):
def __init__(self, gyp_path, path, build_file_dict):
self.gyp_path = gyp_path
self.path = path
self.project = gyp.xcodeproj_file.PBXProject(path=path)
projectDirPath = gyp.common.RelativePath(
os.path.dirname(os.path.abspath(self.gyp_path)),
os.path.dirname(path) or '.')
self.project.SetProperty('projectDirPath', projectDirPath)
self.project_file = \
gyp.xcodeproj_file.XCProjectFile({'rootObject': self.project})
self.build_file_dict = build_file_dict
# TODO(mark): add destructor that cleans up self.path if created_dir is
# True and things didn't complete successfully. Or do something even
# better with "try"?
self.created_dir = False
try:
os.makedirs(self.path)
self.created_dir = True
except OSError, e:
if e.errno != errno.EEXIST:
raise
def Finalize1(self, xcode_targets, serialize_all_tests):
# Collect a list of all of the build configuration names used by the
# various targets in the file. It is very heavily advised to keep each
# target in an entire project (even across multiple project files) using
# the same set of configuration names.
configurations = []
for xct in self.project.GetProperty('targets'):
xccl = xct.GetProperty('buildConfigurationList')
xcbcs = xccl.GetProperty('buildConfigurations')
for xcbc in xcbcs:
name = xcbc.GetProperty('name')
if name not in configurations:
configurations.append(name)
# Replace the XCConfigurationList attached to the PBXProject object with
# a new one specifying all of the configuration names used by the various
# targets.
try:
xccl = CreateXCConfigurationList(configurations)
self.project.SetProperty('buildConfigurationList', xccl)
except:
sys.stderr.write("Problem with gyp file %s\n" % self.gyp_path)
raise
# The need for this setting is explained above where _intermediate_var is
# defined. The comments below about wanting to avoid project-wide build
# settings apply here too, but this needs to be set on a project-wide basis
# so that files relative to the _intermediate_var setting can be displayed
# properly in the Xcode UI.
#
# Note that for configuration-relative files such as anything relative to
# _intermediate_var, for the purposes of UI tree view display, Xcode will
# only resolve the configuration name once, when the project file is
# opened. If the active build configuration is changed, the project file
# must be closed and reopened if it is desired for the tree view to update.
# This is filed as Apple radar 6588391.
xccl.SetBuildSetting(_intermediate_var,
'$(PROJECT_DERIVED_FILE_DIR)/$(CONFIGURATION)')
xccl.SetBuildSetting(_shared_intermediate_var,
'$(SYMROOT)/DerivedSources/$(CONFIGURATION)')
# Set user-specified project-wide build settings and config files. This
# is intended to be used very sparingly. Really, almost everything should
# go into target-specific build settings sections. The project-wide
# settings are only intended to be used in cases where Xcode attempts to
# resolve variable references in a project context as opposed to a target
# context, such as when resolving sourceTree references while building up
# the tree tree view for UI display.
# Any values set globally are applied to all configurations, then any
# per-configuration values are applied.
for xck, xcv in self.build_file_dict.get('xcode_settings', {}).iteritems():
xccl.SetBuildSetting(xck, xcv)
if 'xcode_config_file' in self.build_file_dict:
config_ref = self.project.AddOrGetFileInRootGroup(
self.build_file_dict['xcode_config_file'])
xccl.SetBaseConfiguration(config_ref)
build_file_configurations = self.build_file_dict.get('configurations', {})
if build_file_configurations:
for config_name in configurations:
build_file_configuration_named = \
build_file_configurations.get(config_name, {})
if build_file_configuration_named:
xcc = xccl.ConfigurationNamed(config_name)
for xck, xcv in build_file_configuration_named.get('xcode_settings',
{}).iteritems():
xcc.SetBuildSetting(xck, xcv)
if 'xcode_config_file' in build_file_configuration_named:
config_ref = self.project.AddOrGetFileInRootGroup(
build_file_configurations[config_name]['xcode_config_file'])
xcc.SetBaseConfiguration(config_ref)
# Sort the targets based on how they appeared in the input.
# TODO(mark): Like a lot of other things here, this assumes internal
# knowledge of PBXProject - in this case, of its "targets" property.
# ordinary_targets are ordinary targets that are already in the project
# file. run_test_targets are the targets that run unittests and should be
# used for the Run All Tests target. support_targets are the action/rule
# targets used by GYP file targets, just kept for the assert check.
ordinary_targets = []
run_test_targets = []
support_targets = []
# targets is full list of targets in the project.
targets = []
# does the it define it's own "all"?
has_custom_all = False
# targets_for_all is the list of ordinary_targets that should be listed
# in this project's "All" target. It includes each non_runtest_target
# that does not have suppress_wildcard set.
targets_for_all = []
for target in self.build_file_dict['targets']:
target_name = target['target_name']
toolset = target['toolset']
qualified_target = gyp.common.QualifiedTarget(self.gyp_path, target_name,
toolset)
xcode_target = xcode_targets[qualified_target]
# Make sure that the target being added to the sorted list is already in
# the unsorted list.
assert xcode_target in self.project._properties['targets']
targets.append(xcode_target)
ordinary_targets.append(xcode_target)
if xcode_target.support_target:
support_targets.append(xcode_target.support_target)
targets.append(xcode_target.support_target)
if not int(target.get('suppress_wildcard', False)):
targets_for_all.append(xcode_target)
if target_name.lower() == 'all':
has_custom_all = True;
# If this target has a 'run_as' attribute, add its target to the
# targets, and add it to the test targets.
if target.get('run_as'):
# Make a target to run something. It should have one
# dependency, the parent xcode target.
xccl = CreateXCConfigurationList(configurations)
run_target = gyp.xcodeproj_file.PBXAggregateTarget({
'name': 'Run ' + target_name,
'productName': xcode_target.GetProperty('productName'),
'buildConfigurationList': xccl,
},
parent=self.project)
run_target.AddDependency(xcode_target)
command = target['run_as']
script = ''
if command.get('working_directory'):
script = script + 'cd "%s"\n' % \
gyp.xcodeproj_file.ConvertVariablesToShellSyntax(
command.get('working_directory'))
if command.get('environment'):
script = script + "\n".join(
['export %s="%s"' %
(key, gyp.xcodeproj_file.ConvertVariablesToShellSyntax(val))
for (key, val) in command.get('environment').iteritems()]) + "\n"
# Some test end up using sockets, files on disk, etc. and can get
# confused if more then one test runs at a time. The generator
# flag 'xcode_serialize_all_test_runs' controls the forcing of all
# tests serially. It defaults to True. To get serial runs this
# little bit of python does the same as the linux flock utility to
# make sure only one runs at a time.
command_prefix = ''
if serialize_all_tests:
command_prefix = \
"""python -c "import fcntl, subprocess, sys
file = open('$TMPDIR/GYP_serialize_test_runs', 'a')
fcntl.flock(file.fileno(), fcntl.LOCK_EX)
sys.exit(subprocess.call(sys.argv[1:]))" """
# If we were unable to exec for some reason, we want to exit
# with an error, and fixup variable references to be shell
# syntax instead of xcode syntax.
script = script + 'exec ' + command_prefix + '%s\nexit 1\n' % \
gyp.xcodeproj_file.ConvertVariablesToShellSyntax(
gyp.common.EncodePOSIXShellList(command.get('action')))
ssbp = gyp.xcodeproj_file.PBXShellScriptBuildPhase({
'shellScript': script,
'showEnvVarsInLog': 0,
})
run_target.AppendProperty('buildPhases', ssbp)
# Add the run target to the project file.
targets.append(run_target)
run_test_targets.append(run_target)
xcode_target.test_runner = run_target
# Make sure that the list of targets being replaced is the same length as
# the one replacing it, but allow for the added test runner targets.
assert len(self.project._properties['targets']) == \
len(ordinary_targets) + len(support_targets)
self.project._properties['targets'] = targets
# Get rid of unnecessary levels of depth in groups like the Source group.
self.project.RootGroupsTakeOverOnlyChildren(True)
# Sort the groups nicely. Do this after sorting the targets, because the
# Products group is sorted based on the order of the targets.
self.project.SortGroups()
# Create an "All" target if there's more than one target in this project
# file and the project didn't define its own "All" target. Put a generated
# "All" target first so that people opening up the project for the first
# time will build everything by default.
if len(targets_for_all) > 1 and not has_custom_all:
xccl = CreateXCConfigurationList(configurations)
all_target = gyp.xcodeproj_file.PBXAggregateTarget(
{
'buildConfigurationList': xccl,
'name': 'All',
},
parent=self.project)
for target in targets_for_all:
all_target.AddDependency(target)
# TODO(mark): This is evil because it relies on internal knowledge of
# PBXProject._properties. It's important to get the "All" target first,
# though.
self.project._properties['targets'].insert(0, all_target)
# The same, but for run_test_targets.
if len(run_test_targets) > 1:
xccl = CreateXCConfigurationList(configurations)
run_all_tests_target = gyp.xcodeproj_file.PBXAggregateTarget(
{
'buildConfigurationList': xccl,
'name': 'Run All Tests',
},
parent=self.project)
for run_test_target in run_test_targets:
run_all_tests_target.AddDependency(run_test_target)
# Insert after the "All" target, which must exist if there is more than
# one run_test_target.
self.project._properties['targets'].insert(1, run_all_tests_target)
def Finalize2(self, xcode_targets, xcode_target_to_target_dict):
# Finalize2 needs to happen in a separate step because the process of
# updating references to other projects depends on the ordering of targets
# within remote project files. Finalize1 is responsible for sorting duty,
# and once all project files are sorted, Finalize2 can come in and update
# these references.
# To support making a "test runner" target that will run all the tests
# that are direct dependents of any given target, we look for
# xcode_create_dependents_test_runner being set on an Aggregate target,
# and generate a second target that will run the tests runners found under
# the marked target.
for bf_tgt in self.build_file_dict['targets']:
if int(bf_tgt.get('xcode_create_dependents_test_runner', 0)):
tgt_name = bf_tgt['target_name']
toolset = bf_tgt['toolset']
qualified_target = gyp.common.QualifiedTarget(self.gyp_path,
tgt_name, toolset)
xcode_target = xcode_targets[qualified_target]
if isinstance(xcode_target, gyp.xcodeproj_file.PBXAggregateTarget):
# Collect all the run test targets.
all_run_tests = []
pbxtds = xcode_target.GetProperty('dependencies')
for pbxtd in pbxtds:
pbxcip = pbxtd.GetProperty('targetProxy')
dependency_xct = pbxcip.GetProperty('remoteGlobalIDString')
if hasattr(dependency_xct, 'test_runner'):
all_run_tests.append(dependency_xct.test_runner)
# Directly depend on all the runners as they depend on the target
# that builds them.
if len(all_run_tests) > 0:
run_all_target = gyp.xcodeproj_file.PBXAggregateTarget({
'name': 'Run %s Tests' % tgt_name,
'productName': tgt_name,
},
parent=self.project)
for run_test_target in all_run_tests:
run_all_target.AddDependency(run_test_target)
# Insert the test runner after the related target.
idx = self.project._properties['targets'].index(xcode_target)
self.project._properties['targets'].insert(idx + 1, run_all_target)
# Update all references to other projects, to make sure that the lists of
# remote products are complete. Otherwise, Xcode will fill them in when
# it opens the project file, which will result in unnecessary diffs.
# TODO(mark): This is evil because it relies on internal knowledge of
# PBXProject._other_pbxprojects.
for other_pbxproject in self.project._other_pbxprojects.keys():
self.project.AddOrGetProjectReference(other_pbxproject)
self.project.SortRemoteProductReferences()
# Give everything an ID.
self.project_file.ComputeIDs()
# Make sure that no two objects in the project file have the same ID. If
# multiple objects wind up with the same ID, upon loading the file, Xcode
# will only recognize one object (the last one in the file?) and the
# results are unpredictable.
self.project_file.EnsureNoIDCollisions()
def Write(self):
# Write the project file to a temporary location first. Xcode watches for
# changes to the project file and presents a UI sheet offering to reload
# the project when it does change. However, in some cases, especially when
# multiple projects are open or when Xcode is busy, things don't work so
# seamlessly. Sometimes, Xcode is able to detect that a project file has
# changed but can't unload it because something else is referencing it.
# To mitigate this problem, and to avoid even having Xcode present the UI
# sheet when an open project is rewritten for inconsequential changes, the
# project file is written to a temporary file in the xcodeproj directory
# first. The new temporary file is then compared to the existing project
# file, if any. If they differ, the new file replaces the old; otherwise,
# the new project file is simply deleted. Xcode properly detects a file
# being renamed over an open project file as a change and so it remains
# able to present the "project file changed" sheet under this system.
# Writing to a temporary file first also avoids the possible problem of
# Xcode rereading an incomplete project file.
(output_fd, new_pbxproj_path) = \
tempfile.mkstemp(suffix='.tmp', prefix='project.pbxproj.gyp.',
dir=self.path)
try:
output_file = os.fdopen(output_fd, 'wb')
self.project_file.Print(output_file)
output_file.close()
pbxproj_path = os.path.join(self.path, 'project.pbxproj')
same = False
try:
same = filecmp.cmp(pbxproj_path, new_pbxproj_path, False)
except OSError, e:
if e.errno != errno.ENOENT:
raise
if same:
# The new file is identical to the old one, just get rid of the new
# one.
os.unlink(new_pbxproj_path)
else:
# The new file is different from the old one, or there is no old one.
# Rename the new file to the permanent name.
#
# tempfile.mkstemp uses an overly restrictive mode, resulting in a
# file that can only be read by the owner, regardless of the umask.
# There's no reason to not respect the umask here, which means that
# an extra hoop is required to fetch it and reset the new file's mode.
#
# No way to get the umask without setting a new one? Set a safe one
# and then set it back to the old value.
umask = os.umask(077)
os.umask(umask)
os.chmod(new_pbxproj_path, 0666 & ~umask)
os.rename(new_pbxproj_path, pbxproj_path)
except Exception:
# Don't leave turds behind. In fact, if this code was responsible for
# creating the xcodeproj directory, get rid of that too.
os.unlink(new_pbxproj_path)
if self.created_dir:
shutil.rmtree(self.path, True)
raise
def AddSourceToTarget(source, type, pbxp, xct):
# TODO(mark): Perhaps source_extensions and library_extensions can be made a
# little bit fancier.
source_extensions = ['c', 'cc', 'cpp', 'cxx', 'm', 'mm', 's', 'swift']
# .o is conceptually more of a "source" than a "library," but Xcode thinks
# of "sources" as things to compile and "libraries" (or "frameworks") as
# things to link with. Adding an object file to an Xcode target's frameworks
# phase works properly.
library_extensions = ['a', 'dylib', 'framework', 'o']
basename = posixpath.basename(source)
(root, ext) = posixpath.splitext(basename)
if ext:
ext = ext[1:].lower()
if ext in source_extensions and type != 'none':
xct.SourcesPhase().AddFile(source)
elif ext in library_extensions and type != 'none':
xct.FrameworksPhase().AddFile(source)
else:
# Files that aren't added to a sources or frameworks build phase can still
# go into the project file, just not as part of a build phase.
pbxp.AddOrGetFileInRootGroup(source)
def AddResourceToTarget(resource, pbxp, xct):
# TODO(mark): Combine with AddSourceToTarget above? Or just inline this call
# where it's used.
xct.ResourcesPhase().AddFile(resource)
def AddHeaderToTarget(header, pbxp, xct, is_public):
# TODO(mark): Combine with AddSourceToTarget above? Or just inline this call
# where it's used.
settings = '{ATTRIBUTES = (%s, ); }' % ('Private', 'Public')[is_public]
xct.HeadersPhase().AddFile(header, settings)
_xcode_variable_re = re.compile(r'(\$\((.*?)\))')
def ExpandXcodeVariables(string, expansions):
"""Expands Xcode-style $(VARIABLES) in string per the expansions dict.
In some rare cases, it is appropriate to expand Xcode variables when a
project file is generated. For any substring $(VAR) in string, if VAR is a
key in the expansions dict, $(VAR) will be replaced with expansions[VAR].
Any $(VAR) substring in string for which VAR is not a key in the expansions
dict will remain in the returned string.
"""
matches = _xcode_variable_re.findall(string)
if matches == None:
return string
matches.reverse()
for match in matches:
(to_replace, variable) = match
if not variable in expansions:
continue
replacement = expansions[variable]
string = re.sub(re.escape(to_replace), replacement, string)
return string
_xcode_define_re = re.compile(r'([\\\"\' ])')
def EscapeXcodeDefine(s):
"""We must escape the defines that we give to XCode so that it knows not to
split on spaces and to respect backslash and quote literals. However, we
must not quote the define, or Xcode will incorrectly intepret variables
especially $(inherited)."""
return re.sub(_xcode_define_re, r'\\\1', s)
def PerformBuild(data, configurations, params):
options = params['options']
for build_file, build_file_dict in data.iteritems():
(build_file_root, build_file_ext) = os.path.splitext(build_file)
if build_file_ext != '.gyp':
continue
xcodeproj_path = build_file_root + options.suffix + '.xcodeproj'
if options.generator_output:
xcodeproj_path = os.path.join(options.generator_output, xcodeproj_path)
for config in configurations:
arguments = ['xcodebuild', '-project', xcodeproj_path]
arguments += ['-configuration', config]
print "Building [%s]: %s" % (config, arguments)
subprocess.check_call(arguments)
def GenerateOutput(target_list, target_dicts, data, params):
# Optionally configure each spec to use ninja as the external builder.
ninja_wrapper = params.get('flavor') == 'ninja'
if ninja_wrapper:
(target_list, target_dicts, data) = \
gyp.xcode_ninja.CreateWrapper(target_list, target_dicts, data, params)
options = params['options']
generator_flags = params.get('generator_flags', {})
parallel_builds = generator_flags.get('xcode_parallel_builds', True)
serialize_all_tests = \
generator_flags.get('xcode_serialize_all_test_runs', True)
project_version = generator_flags.get('xcode_project_version', None)
skip_excluded_files = \
not generator_flags.get('xcode_list_excluded_files', True)
xcode_projects = {}
for build_file, build_file_dict in data.iteritems():
(build_file_root, build_file_ext) = os.path.splitext(build_file)
if build_file_ext != '.gyp':
continue
xcodeproj_path = build_file_root + options.suffix + '.xcodeproj'
if options.generator_output:
xcodeproj_path = os.path.join(options.generator_output, xcodeproj_path)
xcp = XcodeProject(build_file, xcodeproj_path, build_file_dict)
xcode_projects[build_file] = xcp
pbxp = xcp.project
if parallel_builds:
pbxp.SetProperty('attributes',
{'BuildIndependentTargetsInParallel': 'YES'})
if project_version:
xcp.project_file.SetXcodeVersion(project_version)
# Add gyp/gypi files to project
if not generator_flags.get('standalone'):
main_group = pbxp.GetProperty('mainGroup')
build_group = gyp.xcodeproj_file.PBXGroup({'name': 'Build'})
main_group.AppendChild(build_group)
for included_file in build_file_dict['included_files']:
build_group.AddOrGetFileByPath(included_file, False)
xcode_targets = {}
xcode_target_to_target_dict = {}
for qualified_target in target_list:
[build_file, target_name, toolset] = \
gyp.common.ParseQualifiedTarget(qualified_target)
spec = target_dicts[qualified_target]
if spec['toolset'] != 'target':
raise Exception(
'Multiple toolsets not supported in xcode build (target %s)' %
qualified_target)
configuration_names = [spec['default_configuration']]
for configuration_name in sorted(spec['configurations'].keys()):
if configuration_name not in configuration_names:
configuration_names.append(configuration_name)
xcp = xcode_projects[build_file]
pbxp = xcp.project
# Set up the configurations for the target according to the list of names
# supplied.
xccl = CreateXCConfigurationList(configuration_names)
# Create an XCTarget subclass object for the target. The type with
# "+bundle" appended will be used if the target has "mac_bundle" set.
# loadable_modules not in a mac_bundle are mapped to
# com.googlecode.gyp.xcode.bundle, a pseudo-type that xcode.py interprets
# to create a single-file mh_bundle.
_types = {
'executable': 'com.apple.product-type.tool',
'loadable_module': 'com.googlecode.gyp.xcode.bundle',
'shared_library': 'com.apple.product-type.library.dynamic',
'static_library': 'com.apple.product-type.library.static',
'executable+bundle': 'com.apple.product-type.application',
'loadable_module+bundle': 'com.apple.product-type.bundle',
'loadable_module+xctest': 'com.apple.product-type.bundle.unit-test',
'shared_library+bundle': 'com.apple.product-type.framework',
'executable+extension+bundle': 'com.apple.product-type.app-extension',
'executable+watch+extension+bundle':
'com.apple.product-type.watchkit-extension',
'executable+watch+bundle': 'com.apple.product-type.application.watchapp',
}
target_properties = {
'buildConfigurationList': xccl,
'name': target_name,
}
type = spec['type']
is_xctest = int(spec.get('mac_xctest_bundle', 0))
is_bundle = int(spec.get('mac_bundle', 0)) or is_xctest
is_app_extension = int(spec.get('ios_app_extension', 0))
is_watchkit_extension = int(spec.get('ios_watchkit_extension', 0))
is_watch_app = int(spec.get('ios_watch_app', 0))
if type != 'none':
type_bundle_key = type
if is_xctest:
type_bundle_key += '+xctest'
assert type == 'loadable_module', (
'mac_xctest_bundle targets must have type loadable_module '
'(target %s)' % target_name)
elif is_app_extension:
assert is_bundle, ('ios_app_extension flag requires mac_bundle '
'(target %s)' % target_name)
type_bundle_key += '+extension+bundle'
elif is_watchkit_extension:
assert is_bundle, ('ios_watchkit_extension flag requires mac_bundle '
'(target %s)' % target_name)
type_bundle_key += '+watch+extension+bundle'
elif is_watch_app:
assert is_bundle, ('ios_watch_app flag requires mac_bundle '
'(target %s)' % target_name)
type_bundle_key += '+watch+bundle'
elif is_bundle:
type_bundle_key += '+bundle'
xctarget_type = gyp.xcodeproj_file.PBXNativeTarget
try:
target_properties['productType'] = _types[type_bundle_key]
except KeyError, e:
gyp.common.ExceptionAppend(e, "-- unknown product type while "
"writing target %s" % target_name)
raise
else:
xctarget_type = gyp.xcodeproj_file.PBXAggregateTarget
assert not is_bundle, (
'mac_bundle targets cannot have type none (target "%s")' %
target_name)
assert not is_xctest, (
'mac_xctest_bundle targets cannot have type none (target "%s")' %
target_name)
target_product_name = spec.get('product_name')
if target_product_name is not None:
target_properties['productName'] = target_product_name
xct = xctarget_type(target_properties, parent=pbxp,
force_outdir=spec.get('product_dir'),
force_prefix=spec.get('product_prefix'),
force_extension=spec.get('product_extension'))
pbxp.AppendProperty('targets', xct)
xcode_targets[qualified_target] = xct
xcode_target_to_target_dict[xct] = spec
spec_actions = spec.get('actions', [])
spec_rules = spec.get('rules', [])
# Xcode has some "issues" with checking dependencies for the "Compile
# sources" step with any source files/headers generated by actions/rules.
# To work around this, if a target is building anything directly (not
# type "none"), then a second target is used to run the GYP actions/rules
# and is made a dependency of this target. This way the work is done
# before the dependency checks for what should be recompiled.
support_xct = None
# The Xcode "issues" don't affect xcode-ninja builds, since the dependency
# logic all happens in ninja. Don't bother creating the extra targets in
# that case.
if type != 'none' and (spec_actions or spec_rules) and not ninja_wrapper:
support_xccl = CreateXCConfigurationList(configuration_names);
support_target_suffix = generator_flags.get(
'support_target_suffix', ' Support')
support_target_properties = {
'buildConfigurationList': support_xccl,
'name': target_name + support_target_suffix,
}
if target_product_name:
support_target_properties['productName'] = \
target_product_name + ' Support'
support_xct = \
gyp.xcodeproj_file.PBXAggregateTarget(support_target_properties,
parent=pbxp)
pbxp.AppendProperty('targets', support_xct)
xct.AddDependency(support_xct)
# Hang the support target off the main target so it can be tested/found
# by the generator during Finalize.
xct.support_target = support_xct
prebuild_index = 0
# Add custom shell script phases for "actions" sections.
for action in spec_actions:
# There's no need to write anything into the script to ensure that the
# output directories already exist, because Xcode will look at the
# declared outputs and automatically ensure that they exist for us.
# Do we have a message to print when this action runs?
message = action.get('message')
if message:
message = 'echo note: ' + gyp.common.EncodePOSIXShellArgument(message)
else:
message = ''
# Turn the list into a string that can be passed to a shell.
action_string = gyp.common.EncodePOSIXShellList(action['action'])
# Convert Xcode-type variable references to sh-compatible environment
# variable references.
message_sh = gyp.xcodeproj_file.ConvertVariablesToShellSyntax(message)
action_string_sh = gyp.xcodeproj_file.ConvertVariablesToShellSyntax(
action_string)
script = ''
# Include the optional message
if message_sh:
script += message_sh + '\n'
# Be sure the script runs in exec, and that if exec fails, the script
# exits signalling an error.
script += 'exec ' + action_string_sh + '\nexit 1\n'
ssbp = gyp.xcodeproj_file.PBXShellScriptBuildPhase({
'inputPaths': action['inputs'],
'name': 'Action "' + action['action_name'] + '"',
'outputPaths': action['outputs'],
'shellScript': script,
'showEnvVarsInLog': 0,
})
if support_xct:
support_xct.AppendProperty('buildPhases', ssbp)
else:
# TODO(mark): this assumes too much knowledge of the internals of
# xcodeproj_file; some of these smarts should move into xcodeproj_file
# itself.
xct._properties['buildPhases'].insert(prebuild_index, ssbp)
prebuild_index = prebuild_index + 1
# TODO(mark): Should verify that at most one of these is specified.
if int(action.get('process_outputs_as_sources', False)):
for output in action['outputs']:
AddSourceToTarget(output, type, pbxp, xct)
if int(action.get('process_outputs_as_mac_bundle_resources', False)):
for output in action['outputs']:
AddResourceToTarget(output, pbxp, xct)
# tgt_mac_bundle_resources holds the list of bundle resources so
# the rule processing can check against it.
if is_bundle:
tgt_mac_bundle_resources = spec.get('mac_bundle_resources', [])
else:
tgt_mac_bundle_resources = []
# Add custom shell script phases driving "make" for "rules" sections.
#
# Xcode's built-in rule support is almost powerful enough to use directly,
# but there are a few significant deficiencies that render them unusable.
# There are workarounds for some of its inadequacies, but in aggregate,
# the workarounds added complexity to the generator, and some workarounds
# actually require input files to be crafted more carefully than I'd like.
# Consequently, until Xcode rules are made more capable, "rules" input
# sections will be handled in Xcode output by shell script build phases
# performed prior to the compilation phase.
#
# The following problems with Xcode rules were found. The numbers are
# Apple radar IDs. I hope that these shortcomings are addressed, I really
# liked having the rules handled directly in Xcode during the period that
# I was prototyping this.
#
# 6588600 Xcode compiles custom script rule outputs too soon, compilation
# fails. This occurs when rule outputs from distinct inputs are
# interdependent. The only workaround is to put rules and their
# inputs in a separate target from the one that compiles the rule
# outputs. This requires input file cooperation and it means that
# process_outputs_as_sources is unusable.
# 6584932 Need to declare that custom rule outputs should be excluded from
# compilation. A possible workaround is to lie to Xcode about a
# rule's output, giving it a dummy file it doesn't know how to
# compile. The rule action script would need to touch the dummy.
# 6584839 I need a way to declare additional inputs to a custom rule.
# A possible workaround is a shell script phase prior to
# compilation that touches a rule's primary input files if any
# would-be additional inputs are newer than the output. Modifying
# the source tree - even just modification times - feels dirty.
# 6564240 Xcode "custom script" build rules always dump all environment
# variables. This is a low-prioroty problem and is not a
# show-stopper.
rules_by_ext = {}
for rule in spec_rules:
rules_by_ext[rule['extension']] = rule
# First, some definitions:
#
# A "rule source" is a file that was listed in a target's "sources"
# list and will have a rule applied to it on the basis of matching the
# rule's "extensions" attribute. Rule sources are direct inputs to
# rules.
#
# Rule definitions may specify additional inputs in their "inputs"
# attribute. These additional inputs are used for dependency tracking
# purposes.
#
# A "concrete output" is a rule output with input-dependent variables
# resolved. For example, given a rule with:
# 'extension': 'ext', 'outputs': ['$(INPUT_FILE_BASE).cc'],
# if the target's "sources" list contained "one.ext" and "two.ext",
# the "concrete output" for rule input "two.ext" would be "two.cc". If
# a rule specifies multiple outputs, each input file that the rule is
# applied to will have the same number of concrete outputs.
#
# If any concrete outputs are outdated or missing relative to their
# corresponding rule_source or to any specified additional input, the
# rule action must be performed to generate the concrete outputs.
# concrete_outputs_by_rule_source will have an item at the same index
# as the rule['rule_sources'] that it corresponds to. Each item is a
# list of all of the concrete outputs for the rule_source.
concrete_outputs_by_rule_source = []
# concrete_outputs_all is a flat list of all concrete outputs that this
# rule is able to produce, given the known set of input files
# (rule_sources) that apply to it.
concrete_outputs_all = []
# messages & actions are keyed by the same indices as rule['rule_sources']
# and concrete_outputs_by_rule_source. They contain the message and
# action to perform after resolving input-dependent variables. The
# message is optional, in which case None is stored for each rule source.
messages = []
actions = []
for rule_source in rule.get('rule_sources', []):
rule_source_dirname, rule_source_basename = \
posixpath.split(rule_source)
(rule_source_root, rule_source_ext) = \
posixpath.splitext(rule_source_basename)
# These are the same variable names that Xcode uses for its own native
# rule support. Because Xcode's rule engine is not being used, they
# need to be expanded as they are written to the makefile.
rule_input_dict = {
'INPUT_FILE_BASE': rule_source_root,
'INPUT_FILE_SUFFIX': rule_source_ext,
'INPUT_FILE_NAME': rule_source_basename,
'INPUT_FILE_PATH': rule_source,
'INPUT_FILE_DIRNAME': rule_source_dirname,
}
concrete_outputs_for_this_rule_source = []
for output in rule.get('outputs', []):
# Fortunately, Xcode and make both use $(VAR) format for their
# variables, so the expansion is the only transformation necessary.
# Any remaning $(VAR)-type variables in the string can be given
# directly to make, which will pick up the correct settings from
# what Xcode puts into the environment.
concrete_output = ExpandXcodeVariables(output, rule_input_dict)
concrete_outputs_for_this_rule_source.append(concrete_output)
# Add all concrete outputs to the project.
pbxp.AddOrGetFileInRootGroup(concrete_output)
concrete_outputs_by_rule_source.append( \
concrete_outputs_for_this_rule_source)
concrete_outputs_all.extend(concrete_outputs_for_this_rule_source)
# TODO(mark): Should verify that at most one of these is specified.
if int(rule.get('process_outputs_as_sources', False)):
for output in concrete_outputs_for_this_rule_source:
AddSourceToTarget(output, type, pbxp, xct)
# If the file came from the mac_bundle_resources list or if the rule
# is marked to process outputs as bundle resource, do so.
was_mac_bundle_resource = rule_source in tgt_mac_bundle_resources
if was_mac_bundle_resource or \
int(rule.get('process_outputs_as_mac_bundle_resources', False)):
for output in concrete_outputs_for_this_rule_source:
AddResourceToTarget(output, pbxp, xct)
# Do we have a message to print when this rule runs?
message = rule.get('message')
if message:
message = gyp.common.EncodePOSIXShellArgument(message)
message = ExpandXcodeVariables(message, rule_input_dict)
messages.append(message)
# Turn the list into a string that can be passed to a shell.
action_string = gyp.common.EncodePOSIXShellList(rule['action'])
action = ExpandXcodeVariables(action_string, rule_input_dict)
actions.append(action)
if len(concrete_outputs_all) > 0:
# TODO(mark): There's a possibilty for collision here. Consider
# target "t" rule "A_r" and target "t_A" rule "r".
makefile_name = '%s.make' % re.sub(
'[^a-zA-Z0-9_]', '_' , '%s_%s' % (target_name, rule['rule_name']))
makefile_path = os.path.join(xcode_projects[build_file].path,
makefile_name)
# TODO(mark): try/close? Write to a temporary file and swap it only
# if it's got changes?
makefile = open(makefile_path, 'wb')
# make will build the first target in the makefile by default. By
# convention, it's called "all". List all (or at least one)
# concrete output for each rule source as a prerequisite of the "all"
# target.
makefile.write('all: \\\n')
for concrete_output_index in \
xrange(0, len(concrete_outputs_by_rule_source)):
# Only list the first (index [0]) concrete output of each input
# in the "all" target. Otherwise, a parallel make (-j > 1) would
# attempt to process each input multiple times simultaneously.
# Otherwise, "all" could just contain the entire list of
# concrete_outputs_all.
concrete_output = \
concrete_outputs_by_rule_source[concrete_output_index][0]
if concrete_output_index == len(concrete_outputs_by_rule_source) - 1:
eol = ''
else:
eol = ' \\'
makefile.write(' %s%s\n' % (concrete_output, eol))
for (rule_source, concrete_outputs, message, action) in \
zip(rule['rule_sources'], concrete_outputs_by_rule_source,
messages, actions):
makefile.write('\n')
# Add a rule that declares it can build each concrete output of a
# rule source. Collect the names of the directories that are
# required.
concrete_output_dirs = []
for concrete_output_index in xrange(0, len(concrete_outputs)):
concrete_output = concrete_outputs[concrete_output_index]
if concrete_output_index == 0:
bol = ''
else:
bol = ' '
makefile.write('%s%s \\\n' % (bol, concrete_output))
concrete_output_dir = posixpath.dirname(concrete_output)
if (concrete_output_dir and
concrete_output_dir not in concrete_output_dirs):
concrete_output_dirs.append(concrete_output_dir)
makefile.write(' : \\\n')
# The prerequisites for this rule are the rule source itself and
# the set of additional rule inputs, if any.
prerequisites = [rule_source]
prerequisites.extend(rule.get('inputs', []))
for prerequisite_index in xrange(0, len(prerequisites)):
prerequisite = prerequisites[prerequisite_index]
if prerequisite_index == len(prerequisites) - 1:
eol = ''
else:
eol = ' \\'
makefile.write(' %s%s\n' % (prerequisite, eol))
# Make sure that output directories exist before executing the rule
# action.
if len(concrete_output_dirs) > 0:
makefile.write('\t@mkdir -p "%s"\n' %
'" "'.join(concrete_output_dirs))
# The rule message and action have already had the necessary variable
# substitutions performed.
if message:
# Mark it with note: so Xcode picks it up in build output.
makefile.write('\t@echo note: %s\n' % message)
makefile.write('\t%s\n' % action)
makefile.close()
# It might be nice to ensure that needed output directories exist
# here rather than in each target in the Makefile, but that wouldn't
# work if there ever was a concrete output that had an input-dependent
# variable anywhere other than in the leaf position.
# Don't declare any inputPaths or outputPaths. If they're present,
# Xcode will provide a slight optimization by only running the script
# phase if any output is missing or outdated relative to any input.
# Unfortunately, it will also assume that all outputs are touched by
# the script, and if the outputs serve as files in a compilation
# phase, they will be unconditionally rebuilt. Since make might not
# rebuild everything that could be declared here as an output, this
# extra compilation activity is unnecessary. With inputPaths and
# outputPaths not supplied, make will always be called, but it knows
# enough to not do anything when everything is up-to-date.
# To help speed things up, pass -j COUNT to make so it does some work
# in parallel. Don't use ncpus because Xcode will build ncpus targets
# in parallel and if each target happens to have a rules step, there
# would be ncpus^2 things going. With a machine that has 2 quad-core
# Xeons, a build can quickly run out of processes based on
# scheduling/other tasks, and randomly failing builds are no good.
script = \
"""JOB_COUNT="$(/usr/sbin/sysctl -n hw.ncpu)"
if [ "${JOB_COUNT}" -gt 4 ]; then
JOB_COUNT=4
fi
exec xcrun make -f "${PROJECT_FILE_PATH}/%s" -j "${JOB_COUNT}"
exit 1
""" % makefile_name
ssbp = gyp.xcodeproj_file.PBXShellScriptBuildPhase({
'name': 'Rule "' + rule['rule_name'] + '"',
'shellScript': script,
'showEnvVarsInLog': 0,
})
if support_xct:
support_xct.AppendProperty('buildPhases', ssbp)
else:
# TODO(mark): this assumes too much knowledge of the internals of
# xcodeproj_file; some of these smarts should move into xcodeproj_file
# itself.
xct._properties['buildPhases'].insert(prebuild_index, ssbp)
prebuild_index = prebuild_index + 1
# Extra rule inputs also go into the project file. Concrete outputs were
# already added when they were computed.
groups = ['inputs', 'inputs_excluded']
if skip_excluded_files:
groups = [x for x in groups if not x.endswith('_excluded')]
for group in groups:
for item in rule.get(group, []):
pbxp.AddOrGetFileInRootGroup(item)
# Add "sources".
for source in spec.get('sources', []):
(source_root, source_extension) = posixpath.splitext(source)
if source_extension[1:] not in rules_by_ext:
# AddSourceToTarget will add the file to a root group if it's not
# already there.
AddSourceToTarget(source, type, pbxp, xct)
else:
pbxp.AddOrGetFileInRootGroup(source)
# Add "mac_bundle_resources" and "mac_framework_private_headers" if
# it's a bundle of any type.
if is_bundle:
for resource in tgt_mac_bundle_resources:
(resource_root, resource_extension) = posixpath.splitext(resource)
if resource_extension[1:] not in rules_by_ext:
AddResourceToTarget(resource, pbxp, xct)
else:
pbxp.AddOrGetFileInRootGroup(resource)
for header in spec.get('mac_framework_private_headers', []):
AddHeaderToTarget(header, pbxp, xct, False)
# Add "mac_framework_headers". These can be valid for both frameworks
# and static libraries.
if is_bundle or type == 'static_library':
for header in spec.get('mac_framework_headers', []):
AddHeaderToTarget(header, pbxp, xct, True)
# Add "copies".
pbxcp_dict = {}
for copy_group in spec.get('copies', []):
dest = copy_group['destination']
if dest[0] not in ('/', '$'):
# Relative paths are relative to $(SRCROOT).
dest = '$(SRCROOT)/' + dest
# Coalesce multiple "copies" sections in the same target with the same
# "destination" property into the same PBXCopyFilesBuildPhase, otherwise
# they'll wind up with ID collisions.
pbxcp = pbxcp_dict.get(dest, None)
if pbxcp is None:
pbxcp = gyp.xcodeproj_file.PBXCopyFilesBuildPhase({
'name': 'Copy to ' + copy_group['destination']
},
parent=xct)
pbxcp.SetDestination(dest)
# TODO(mark): The usual comment about this knowing too much about
# gyp.xcodeproj_file internals applies.
xct._properties['buildPhases'].insert(prebuild_index, pbxcp)
pbxcp_dict[dest] = pbxcp
for file in copy_group['files']:
pbxcp.AddFile(file)
# Excluded files can also go into the project file.
if not skip_excluded_files:
for key in ['sources', 'mac_bundle_resources', 'mac_framework_headers',
'mac_framework_private_headers']:
excluded_key = key + '_excluded'
for item in spec.get(excluded_key, []):
pbxp.AddOrGetFileInRootGroup(item)
# So can "inputs" and "outputs" sections of "actions" groups.
groups = ['inputs', 'inputs_excluded', 'outputs', 'outputs_excluded']
if skip_excluded_files:
groups = [x for x in groups if not x.endswith('_excluded')]
for action in spec.get('actions', []):
for group in groups:
for item in action.get(group, []):
# Exclude anything in BUILT_PRODUCTS_DIR. They're products, not
# sources.
if not item.startswith('$(BUILT_PRODUCTS_DIR)/'):
pbxp.AddOrGetFileInRootGroup(item)
for postbuild in spec.get('postbuilds', []):
action_string_sh = gyp.common.EncodePOSIXShellList(postbuild['action'])
script = 'exec ' + action_string_sh + '\nexit 1\n'
# Make the postbuild step depend on the output of ld or ar from this
# target. Apparently putting the script step after the link step isn't
# sufficient to ensure proper ordering in all cases. With an input
# declared but no outputs, the script step should run every time, as
# desired.
ssbp = gyp.xcodeproj_file.PBXShellScriptBuildPhase({
'inputPaths': ['$(BUILT_PRODUCTS_DIR)/$(EXECUTABLE_PATH)'],
'name': 'Postbuild "' + postbuild['postbuild_name'] + '"',
'shellScript': script,
'showEnvVarsInLog': 0,
})
xct.AppendProperty('buildPhases', ssbp)
# Add dependencies before libraries, because adding a dependency may imply
# adding a library. It's preferable to keep dependencies listed first
# during a link phase so that they can override symbols that would
# otherwise be provided by libraries, which will usually include system
# libraries. On some systems, ld is finicky and even requires the
# libraries to be ordered in such a way that unresolved symbols in
# earlier-listed libraries may only be resolved by later-listed libraries.
# The Mac linker doesn't work that way, but other platforms do, and so
# their linker invocations need to be constructed in this way. There's
# no compelling reason for Xcode's linker invocations to differ.
if 'dependencies' in spec:
for dependency in spec['dependencies']:
xct.AddDependency(xcode_targets[dependency])
# The support project also gets the dependencies (in case they are
# needed for the actions/rules to work).
if support_xct:
support_xct.AddDependency(xcode_targets[dependency])
if 'libraries' in spec:
for library in spec['libraries']:
xct.FrameworksPhase().AddFile(library)
# Add the library's directory to LIBRARY_SEARCH_PATHS if necessary.
# I wish Xcode handled this automatically.
library_dir = posixpath.dirname(library)
if library_dir not in xcode_standard_library_dirs and (
not xct.HasBuildSetting(_library_search_paths_var) or
library_dir not in xct.GetBuildSetting(_library_search_paths_var)):
xct.AppendBuildSetting(_library_search_paths_var, library_dir)
for configuration_name in configuration_names:
configuration = spec['configurations'][configuration_name]
xcbc = xct.ConfigurationNamed(configuration_name)
for include_dir in configuration.get('mac_framework_dirs', []):
xcbc.AppendBuildSetting('FRAMEWORK_SEARCH_PATHS', include_dir)
for include_dir in configuration.get('include_dirs', []):
xcbc.AppendBuildSetting('HEADER_SEARCH_PATHS', include_dir)
for library_dir in configuration.get('library_dirs', []):
if library_dir not in xcode_standard_library_dirs and (
not xcbc.HasBuildSetting(_library_search_paths_var) or
library_dir not in xcbc.GetBuildSetting(_library_search_paths_var)):
xcbc.AppendBuildSetting(_library_search_paths_var, library_dir)
if 'defines' in configuration:
for define in configuration['defines']:
set_define = EscapeXcodeDefine(define)
xcbc.AppendBuildSetting('GCC_PREPROCESSOR_DEFINITIONS', set_define)
if 'xcode_settings' in configuration:
for xck, xcv in configuration['xcode_settings'].iteritems():
xcbc.SetBuildSetting(xck, xcv)
if 'xcode_config_file' in configuration:
config_ref = pbxp.AddOrGetFileInRootGroup(
configuration['xcode_config_file'])
xcbc.SetBaseConfiguration(config_ref)
build_files = []
for build_file, build_file_dict in data.iteritems():
if build_file.endswith('.gyp'):
build_files.append(build_file)
for build_file in build_files:
xcode_projects[build_file].Finalize1(xcode_targets, serialize_all_tests)
for build_file in build_files:
xcode_projects[build_file].Finalize2(xcode_targets,
xcode_target_to_target_dict)
for build_file in build_files:
xcode_projects[build_file].Write()
| bsd-3-clause |
lgscofield/odoo | addons/sale_order_dates/__openerp__.py | 260 | 1771 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Dates on Sales Order',
'version': '1.1',
'category': 'Sales Management',
'description': """
Add additional date information to the sales order.
===================================================
You can add the following additional dates to a sales order:
------------------------------------------------------------
* Requested Date (will be used as the expected date on pickings)
* Commitment Date
* Effective Date
""",
'author': 'OpenERP SA',
'website': 'https://www.odoo.com/page/crm',
'depends': ['sale_stock'],
'data': ['sale_order_dates_view.xml'],
'demo': [],
'test': ['test/requested_date.yml'],
'installable': True,
'auto_install': False,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
nzlosh/st2 | tools/migrate_triggers_to_include_ref_count.py | 3 | 2553 | #!/usr/bin/env python
# Copyright 2020 The StackStorm Authors.
# Copyright 2019 Extreme Networks, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from mongoengine.queryset import Q
from st2common import config
from st2common.script_setup import setup as common_setup
from st2common.script_setup import teardown as common_teardown
from st2common.persistence.rule import Rule
from st2common.persistence.trigger import Trigger
from st2common.models.db.trigger import TriggerDB
class TriggerMigrator(object):
def _get_trigger_with_parameters(self):
"""
All TriggerDB that has a parameter.
"""
return TriggerDB.objects(Q(parameters__exists=True) & Q(parameters__nin=[{}]))
def _get_rules_for_trigger(self, trigger_ref):
"""
All rules that reference the supplied trigger_ref.
"""
return Rule.get_all(**{"trigger": trigger_ref})
def _update_trigger_ref_count(self, trigger_db, ref_count):
"""
Non-publishing ref_count update to a TriggerDB.
"""
trigger_db.ref_count = ref_count
Trigger.add_or_update(trigger_db, publish=False, dispatch_trigger=False)
def migrate(self):
"""
Will migrate all Triggers that should have ref_count to have the right ref_count.
"""
trigger_dbs = self._get_trigger_with_parameters()
for trigger_db in trigger_dbs:
trigger_ref = trigger_db.get_reference().ref
rules = self._get_rules_for_trigger(trigger_ref=trigger_ref)
ref_count = len(rules)
print("Updating Trigger %s to ref_count %s" % (trigger_ref, ref_count))
self._update_trigger_ref_count(trigger_db=trigger_db, ref_count=ref_count)
def setup():
common_setup(config=config, setup_db=True, register_mq_exchanges=True)
def teartown():
common_teardown()
def main():
setup()
try:
TriggerMigrator().migrate()
finally:
teartown()
if __name__ == "__main__":
main()
| apache-2.0 |
ewbankkit/cloud-custodian | tests/test_rdsparamgroup.py | 6 | 9956 | # Copyright 2017 Capital One Services, LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import, division, print_function, unicode_literals
from .common import BaseTest, functional
from botocore.exceptions import ClientError
class RDSParamGroupTest(BaseTest):
@functional
def test_rdsparamgroup_delete(self):
session_factory = self.replay_flight_data("test_rdsparamgroup_delete")
client = session_factory().client("rds")
name = "pg-test"
# Create the PG
client.create_db_parameter_group(
DBParameterGroupName=name,
DBParameterGroupFamily="mysql5.5",
Description="test",
)
# Ensure it exists
ret = client.describe_db_parameter_groups(DBParameterGroupName=name)
self.assertEqual(len(ret["DBParameterGroups"]), 1)
# Delete it via custodian
p = self.load_policy(
{
"name": "rdspg-delete",
"resource": "rds-param-group",
"filters": [{"DBParameterGroupName": name}],
"actions": [{"type": "delete"}],
},
session_factory=session_factory,
)
resources = p.run()
self.assertEqual(len(resources), 1)
# Verify it is gone
try:
client.describe_db_parameter_groups(DBParameterGroupName=name)
except ClientError:
pass
else:
self.fail("parameter group {} still exists".format(name))
self.addCleanup(client.delete_db_parameter_group, DBParameterGroupName=name)
@functional
def test_rdsparamgroup_copy(self):
session_factory = self.replay_flight_data("test_rdsparamgroup_copy")
client = session_factory().client("rds")
name = "pg-orig"
copy_name = "pg-copy"
# Create the PG
client.create_db_parameter_group(
DBParameterGroupName=name,
DBParameterGroupFamily="mysql5.5",
Description="test",
)
self.addCleanup(client.delete_db_parameter_group, DBParameterGroupName=name)
# Copy it via custodian
p = self.load_policy(
{
"name": "rdspg-copy",
"resource": "rds-param-group",
"filters": [{"DBParameterGroupName": name}],
"actions": [{"type": "copy", "name": copy_name}],
},
session_factory=session_factory,
)
resources = p.run()
self.assertEqual(len(resources), 1)
# Ensure it exists
ret = client.describe_db_parameter_groups(DBParameterGroupName=copy_name)
self.assertEqual(len(ret["DBParameterGroups"]), 1)
self.addCleanup(
client.delete_db_parameter_group, DBParameterGroupName=copy_name
)
@functional
def test_rdsparamgroup_modify(self):
session_factory = self.replay_flight_data("test_rdsparamgroup_modify")
client = session_factory().client("rds")
name = "pg-test"
# Create the PG
client.create_db_parameter_group(
DBParameterGroupName=name,
DBParameterGroupFamily="mysql5.5",
Description="test",
)
self.addCleanup(client.delete_db_parameter_group, DBParameterGroupName=name)
# Modify it via custodian
p = self.load_policy(
{
"name": "rdspg-modify",
"resource": "rds-param-group",
"filters": [{"DBParameterGroupName": name}],
"actions": [
{
"type": "modify",
"params": [
{"name": "autocommit", "value": "0"},
{"name": "automatic_sp_privileges", "value": "1"},
],
}
],
},
session_factory=session_factory,
)
resources = p.run()
self.assertEqual(len(resources), 1)
# Ensure that params were set
ret = client.describe_db_parameters(DBParameterGroupName=name)
count = 0
for param in ret["Parameters"]:
if param["ParameterName"] == "autocommit":
self.assertEqual(param["ParameterValue"], "0")
count += 1
elif param["ParameterName"] == "automatic_sp_privileges":
self.assertEqual(param["ParameterValue"], "1")
count += 1
if count == 2:
break
self.assertEqual(count, 2)
class RDSClusterParamGroupTest(BaseTest):
@functional
def test_rdsclusterparamgroup_delete(self):
session_factory = self.replay_flight_data("test_rdsclusterparamgroup_delete")
client = session_factory().client("rds")
name = "pg-cluster-test"
# Create the PG
client.create_db_cluster_parameter_group(
DBClusterParameterGroupName=name,
DBParameterGroupFamily="aurora5.6",
Description="test",
)
# Ensure it exists
ret = client.describe_db_cluster_parameter_groups(
DBClusterParameterGroupName=name
)
self.assertEqual(len(ret["DBClusterParameterGroups"]), 1)
# Delete it via custodian
p = self.load_policy(
{
"name": "rdspgc-delete",
"resource": "rds-cluster-param-group",
"filters": [{"DBClusterParameterGroupName": name}],
"actions": [{"type": "delete"}],
},
session_factory=session_factory,
)
resources = p.run()
self.assertEqual(len(resources), 1)
# Verify it is gone
try:
client.describe_db_cluster_parameter_groups(
DBClusterParameterGroupName=name
)
except ClientError:
pass
else:
self.fail("parameter group cluster {} still exists".format(name))
self.addCleanup(
client.delete_db_cluster_parameter_group,
DBClusterParameterGroupName=name,
)
@functional
def test_rdsclusterparamgroup_copy(self):
session_factory = self.replay_flight_data("test_rdsclusterparamgroup_copy")
client = session_factory().client("rds")
name = "pgc-orig"
copy_name = "pgc-copy"
# Create the PG
client.create_db_cluster_parameter_group(
DBClusterParameterGroupName=name,
DBParameterGroupFamily="aurora5.6",
Description="test",
)
self.addCleanup(
client.delete_db_cluster_parameter_group, DBClusterParameterGroupName=name
)
# Copy it via custodian
p = self.load_policy(
{
"name": "rdspgc-copy",
"resource": "rds-cluster-param-group",
"filters": [{"DBClusterParameterGroupName": name}],
"actions": [{"type": "copy", "name": copy_name}],
},
session_factory=session_factory,
)
resources = p.run()
self.assertEqual(len(resources), 1)
# Ensure it exists
ret = client.describe_db_cluster_parameter_groups(
DBClusterParameterGroupName=copy_name
)
self.assertEqual(len(ret["DBClusterParameterGroups"]), 1)
self.addCleanup(
client.delete_db_cluster_parameter_group,
DBClusterParameterGroupName=copy_name,
)
@functional
def test_rdsclusterparamgroup_modify(self):
session_factory = self.replay_flight_data("test_rdsclusterparamgroup_modify")
client = session_factory().client("rds")
name = "pgc-test"
# Create the PG
client.create_db_cluster_parameter_group(
DBClusterParameterGroupName=name,
DBParameterGroupFamily="aurora5.6",
Description="test",
)
self.addCleanup(
client.delete_db_cluster_parameter_group, DBClusterParameterGroupName=name
)
# Modify it via custodian
p = self.load_policy(
{
"name": "rdspgc-modify",
"resource": "rds-cluster-param-group",
"filters": [{"DBClusterParameterGroupName": name}],
"actions": [
{
"type": "modify",
"params": [
{"name": "auto_increment_increment", "value": "1"},
{"name": "auto_increment_offset", "value": "2"},
],
}
],
},
session_factory=session_factory,
)
resources = p.run()
self.assertEqual(len(resources), 1)
# Ensure that params were set
ret = client.describe_db_cluster_parameters(DBClusterParameterGroupName=name)
count = 0
for param in ret["Parameters"]:
if param["ParameterName"] == "auto_increment_increment":
self.assertEqual(param["ParameterValue"], "1")
count += 1
elif param["ParameterName"] == "auto_increment_offset":
self.assertEqual(param["ParameterValue"], "2")
count += 1
if count == 2:
break
self.assertEqual(count, 2)
| apache-2.0 |
arg-hya/taxiCab | Tools/Misc/TaskPointGenerator.py | 1 | 1502 | import json
import shapefile as shp
import matplotlib.pyplot as plt
import random
def mean(numbers):
return float(sum(numbers)) / max(len(numbers), 1)
numbersX = []
numbersY = []
TaskPoints = {}
shpFilePath = r"D:\TaxiCab\mycode\Plots\ShapefileAndTrajectory\taxi_zones\taxi_zones"
sf = shp.Reader(shpFilePath)
records = sf.records()
plt.figure()
for shape in sf.shapeRecords():
#print(records[0][3])
x = [i[0] for i in shape.shape.points[:]]
meanX = mean(x)
numbersX.append(meanX)
y = [i[1] for i in shape.shape.points[:]]
meanY = mean(y)
numbersY.append(meanY)
plt.plot(x,y)
num = 0 ##range(263)
for x, y in zip(numbersX, numbersY):
plt.text(x, y, str(num), color="red", fontsize=12)
num = num + 1
plt.plot(numbersX, numbersY, 'o', color='blue', markersize=7, markeredgewidth=0.0)
#print (len(numbersX))
#print (numbersY)
plt.show()
Date_min = 1
Date_max = 30
Beta_min = 2
Beta_max = 30
#print (range(len(numbersX)))
for i in range(len(numbersX)):
date = "2017/1/"
TaskPoints_trace = []
TaskPoints_trace.append(records[i][3])
TaskPoints_trace.append(numbersX[i])
TaskPoints_trace.append(numbersY[i])
TaskPoints_trace.append(random.randint(Beta_min, Beta_max))
date += str(random.randint(Date_min, Date_max))
TaskPoints_trace.append(date)
TaskPoints[i] = TaskPoints_trace
json.dump(TaskPoints, open('Data1/TaxiZone_TaskPoints.json', 'w'), indent=4, sort_keys=True, separators=(',', ':'))
| gpl-3.0 |
kleins11/intdatasci-byte2 | jmankoff-mobile/lib/jinja2/bccache.py | 346 | 12793 | # -*- coding: utf-8 -*-
"""
jinja2.bccache
~~~~~~~~~~~~~~
This module implements the bytecode cache system Jinja is optionally
using. This is useful if you have very complex template situations and
the compiliation of all those templates slow down your application too
much.
Situations where this is useful are often forking web applications that
are initialized on the first request.
:copyright: (c) 2010 by the Jinja Team.
:license: BSD.
"""
from os import path, listdir
import os
import sys
import stat
import errno
import marshal
import tempfile
import fnmatch
from hashlib import sha1
from jinja2.utils import open_if_exists
from jinja2._compat import BytesIO, pickle, PY2, text_type
# marshal works better on 3.x, one hack less required
if not PY2:
marshal_dump = marshal.dump
marshal_load = marshal.load
else:
def marshal_dump(code, f):
if isinstance(f, file):
marshal.dump(code, f)
else:
f.write(marshal.dumps(code))
def marshal_load(f):
if isinstance(f, file):
return marshal.load(f)
return marshal.loads(f.read())
bc_version = 2
# magic version used to only change with new jinja versions. With 2.6
# we change this to also take Python version changes into account. The
# reason for this is that Python tends to segfault if fed earlier bytecode
# versions because someone thought it would be a good idea to reuse opcodes
# or make Python incompatible with earlier versions.
bc_magic = 'j2'.encode('ascii') + \
pickle.dumps(bc_version, 2) + \
pickle.dumps((sys.version_info[0] << 24) | sys.version_info[1])
class Bucket(object):
"""Buckets are used to store the bytecode for one template. It's created
and initialized by the bytecode cache and passed to the loading functions.
The buckets get an internal checksum from the cache assigned and use this
to automatically reject outdated cache material. Individual bytecode
cache subclasses don't have to care about cache invalidation.
"""
def __init__(self, environment, key, checksum):
self.environment = environment
self.key = key
self.checksum = checksum
self.reset()
def reset(self):
"""Resets the bucket (unloads the bytecode)."""
self.code = None
def load_bytecode(self, f):
"""Loads bytecode from a file or file like object."""
# make sure the magic header is correct
magic = f.read(len(bc_magic))
if magic != bc_magic:
self.reset()
return
# the source code of the file changed, we need to reload
checksum = pickle.load(f)
if self.checksum != checksum:
self.reset()
return
# if marshal_load fails then we need to reload
try:
self.code = marshal_load(f)
except (EOFError, ValueError, TypeError):
self.reset()
return
def write_bytecode(self, f):
"""Dump the bytecode into the file or file like object passed."""
if self.code is None:
raise TypeError('can\'t write empty bucket')
f.write(bc_magic)
pickle.dump(self.checksum, f, 2)
marshal_dump(self.code, f)
def bytecode_from_string(self, string):
"""Load bytecode from a string."""
self.load_bytecode(BytesIO(string))
def bytecode_to_string(self):
"""Return the bytecode as string."""
out = BytesIO()
self.write_bytecode(out)
return out.getvalue()
class BytecodeCache(object):
"""To implement your own bytecode cache you have to subclass this class
and override :meth:`load_bytecode` and :meth:`dump_bytecode`. Both of
these methods are passed a :class:`~jinja2.bccache.Bucket`.
A very basic bytecode cache that saves the bytecode on the file system::
from os import path
class MyCache(BytecodeCache):
def __init__(self, directory):
self.directory = directory
def load_bytecode(self, bucket):
filename = path.join(self.directory, bucket.key)
if path.exists(filename):
with open(filename, 'rb') as f:
bucket.load_bytecode(f)
def dump_bytecode(self, bucket):
filename = path.join(self.directory, bucket.key)
with open(filename, 'wb') as f:
bucket.write_bytecode(f)
A more advanced version of a filesystem based bytecode cache is part of
Jinja2.
"""
def load_bytecode(self, bucket):
"""Subclasses have to override this method to load bytecode into a
bucket. If they are not able to find code in the cache for the
bucket, it must not do anything.
"""
raise NotImplementedError()
def dump_bytecode(self, bucket):
"""Subclasses have to override this method to write the bytecode
from a bucket back to the cache. If it unable to do so it must not
fail silently but raise an exception.
"""
raise NotImplementedError()
def clear(self):
"""Clears the cache. This method is not used by Jinja2 but should be
implemented to allow applications to clear the bytecode cache used
by a particular environment.
"""
def get_cache_key(self, name, filename=None):
"""Returns the unique hash key for this template name."""
hash = sha1(name.encode('utf-8'))
if filename is not None:
filename = '|' + filename
if isinstance(filename, text_type):
filename = filename.encode('utf-8')
hash.update(filename)
return hash.hexdigest()
def get_source_checksum(self, source):
"""Returns a checksum for the source."""
return sha1(source.encode('utf-8')).hexdigest()
def get_bucket(self, environment, name, filename, source):
"""Return a cache bucket for the given template. All arguments are
mandatory but filename may be `None`.
"""
key = self.get_cache_key(name, filename)
checksum = self.get_source_checksum(source)
bucket = Bucket(environment, key, checksum)
self.load_bytecode(bucket)
return bucket
def set_bucket(self, bucket):
"""Put the bucket into the cache."""
self.dump_bytecode(bucket)
class FileSystemBytecodeCache(BytecodeCache):
"""A bytecode cache that stores bytecode on the filesystem. It accepts
two arguments: The directory where the cache items are stored and a
pattern string that is used to build the filename.
If no directory is specified a default cache directory is selected. On
Windows the user's temp directory is used, on UNIX systems a directory
is created for the user in the system temp directory.
The pattern can be used to have multiple separate caches operate on the
same directory. The default pattern is ``'__jinja2_%s.cache'``. ``%s``
is replaced with the cache key.
>>> bcc = FileSystemBytecodeCache('/tmp/jinja_cache', '%s.cache')
This bytecode cache supports clearing of the cache using the clear method.
"""
def __init__(self, directory=None, pattern='__jinja2_%s.cache'):
if directory is None:
directory = self._get_default_cache_dir()
self.directory = directory
self.pattern = pattern
def _get_default_cache_dir(self):
def _unsafe_dir():
raise RuntimeError('Cannot determine safe temp directory. You '
'need to explicitly provide one.')
tmpdir = tempfile.gettempdir()
# On windows the temporary directory is used specific unless
# explicitly forced otherwise. We can just use that.
if os.name == 'nt':
return tmpdir
if not hasattr(os, 'getuid'):
_unsafe_dir()
dirname = '_jinja2-cache-%d' % os.getuid()
actual_dir = os.path.join(tmpdir, dirname)
try:
os.mkdir(actual_dir, stat.S_IRWXU)
except OSError as e:
if e.errno != errno.EEXIST:
raise
try:
os.chmod(actual_dir, stat.S_IRWXU)
actual_dir_stat = os.lstat(actual_dir)
if actual_dir_stat.st_uid != os.getuid() \
or not stat.S_ISDIR(actual_dir_stat.st_mode) \
or stat.S_IMODE(actual_dir_stat.st_mode) != stat.S_IRWXU:
_unsafe_dir()
except OSError as e:
if e.errno != errno.EEXIST:
raise
actual_dir_stat = os.lstat(actual_dir)
if actual_dir_stat.st_uid != os.getuid() \
or not stat.S_ISDIR(actual_dir_stat.st_mode) \
or stat.S_IMODE(actual_dir_stat.st_mode) != stat.S_IRWXU:
_unsafe_dir()
return actual_dir
def _get_cache_filename(self, bucket):
return path.join(self.directory, self.pattern % bucket.key)
def load_bytecode(self, bucket):
f = open_if_exists(self._get_cache_filename(bucket), 'rb')
if f is not None:
try:
bucket.load_bytecode(f)
finally:
f.close()
def dump_bytecode(self, bucket):
f = open(self._get_cache_filename(bucket), 'wb')
try:
bucket.write_bytecode(f)
finally:
f.close()
def clear(self):
# imported lazily here because google app-engine doesn't support
# write access on the file system and the function does not exist
# normally.
from os import remove
files = fnmatch.filter(listdir(self.directory), self.pattern % '*')
for filename in files:
try:
remove(path.join(self.directory, filename))
except OSError:
pass
class MemcachedBytecodeCache(BytecodeCache):
"""This class implements a bytecode cache that uses a memcache cache for
storing the information. It does not enforce a specific memcache library
(tummy's memcache or cmemcache) but will accept any class that provides
the minimal interface required.
Libraries compatible with this class:
- `werkzeug <http://werkzeug.pocoo.org/>`_.contrib.cache
- `python-memcached <http://www.tummy.com/Community/software/python-memcached/>`_
- `cmemcache <http://gijsbert.org/cmemcache/>`_
(Unfortunately the django cache interface is not compatible because it
does not support storing binary data, only unicode. You can however pass
the underlying cache client to the bytecode cache which is available
as `django.core.cache.cache._client`.)
The minimal interface for the client passed to the constructor is this:
.. class:: MinimalClientInterface
.. method:: set(key, value[, timeout])
Stores the bytecode in the cache. `value` is a string and
`timeout` the timeout of the key. If timeout is not provided
a default timeout or no timeout should be assumed, if it's
provided it's an integer with the number of seconds the cache
item should exist.
.. method:: get(key)
Returns the value for the cache key. If the item does not
exist in the cache the return value must be `None`.
The other arguments to the constructor are the prefix for all keys that
is added before the actual cache key and the timeout for the bytecode in
the cache system. We recommend a high (or no) timeout.
This bytecode cache does not support clearing of used items in the cache.
The clear method is a no-operation function.
.. versionadded:: 2.7
Added support for ignoring memcache errors through the
`ignore_memcache_errors` parameter.
"""
def __init__(self, client, prefix='jinja2/bytecode/', timeout=None,
ignore_memcache_errors=True):
self.client = client
self.prefix = prefix
self.timeout = timeout
self.ignore_memcache_errors = ignore_memcache_errors
def load_bytecode(self, bucket):
try:
code = self.client.get(self.prefix + bucket.key)
except Exception:
if not self.ignore_memcache_errors:
raise
code = None
if code is not None:
bucket.bytecode_from_string(code)
def dump_bytecode(self, bucket):
args = (self.prefix + bucket.key, bucket.bytecode_to_string())
if self.timeout is not None:
args += (self.timeout,)
try:
self.client.set(*args)
except Exception:
if not self.ignore_memcache_errors:
raise
| apache-2.0 |
Chipe1/aima-python | tests/test_mdp.py | 2 | 8327 | import pytest
from mdp import *
random.seed("aima-python")
sequential_decision_environment_1 = GridMDP([[-0.1, -0.1, -0.1, +1],
[-0.1, None, -0.1, -1],
[-0.1, -0.1, -0.1, -0.1]],
terminals=[(3, 2), (3, 1)])
sequential_decision_environment_2 = GridMDP([[-2, -2, -2, +1],
[-2, None, -2, -1],
[-2, -2, -2, -2]],
terminals=[(3, 2), (3, 1)])
sequential_decision_environment_3 = GridMDP([[-1.0, -0.1, -0.1, -0.1, -0.1, 0.5],
[-0.1, None, None, -0.5, -0.1, -0.1],
[-0.1, None, 1.0, 3.0, None, -0.1],
[-0.1, -0.1, -0.1, None, None, -0.1],
[0.5, -0.1, -0.1, -0.1, -0.1, -1.0]],
terminals=[(2, 2), (3, 2), (0, 4), (5, 0)])
def test_value_iteration():
assert value_iteration(sequential_decision_environment, .01) == {
(3, 2): 1.0, (3, 1): -1.0,
(3, 0): 0.12958868267972745, (0, 1): 0.39810203830605462,
(0, 2): 0.50928545646220924, (1, 0): 0.25348746162470537,
(0, 0): 0.29543540628363629, (1, 2): 0.64958064617168676,
(2, 0): 0.34461306281476806, (2, 1): 0.48643676237737926,
(2, 2): 0.79536093684710951}
assert value_iteration(sequential_decision_environment_1, .01) == {
(3, 2): 1.0, (3, 1): -1.0,
(3, 0): -0.0897388258468311, (0, 1): 0.146419707398967840,
(0, 2): 0.30596200514385086, (1, 0): 0.010092796415625799,
(0, 0): 0.00633408092008296, (1, 2): 0.507390193380827400,
(2, 0): 0.15072242145212010, (2, 1): 0.358309043654212570,
(2, 2): 0.71675493618997840}
assert value_iteration(sequential_decision_environment_2, .01) == {
(3, 2): 1.0, (3, 1): -1.0,
(3, 0): -3.5141584808407855, (0, 1): -7.8000009574737180,
(0, 2): -6.1064293596058830, (1, 0): -7.1012549580376760,
(0, 0): -8.5872244532783200, (1, 2): -3.9653547121245810,
(2, 0): -5.3099468802901630, (2, 1): -3.3543366255753995,
(2, 2): -1.7383376462930498}
assert value_iteration(sequential_decision_environment_3, .01) == {
(0, 0): 4.350592130345558, (0, 1): 3.640700980321895, (0, 2): 3.0734806370346943, (0, 3): 2.5754335063434937,
(0, 4): -1.0,
(1, 0): 3.640700980321895, (1, 1): 3.129579352304856, (1, 4): 2.0787517066719916,
(2, 0): 3.0259220379893352, (2, 1): 2.5926103577982897, (2, 2): 1.0, (2, 4): 2.507774181360808,
(3, 0): 2.5336747364500076, (3, 2): 3.0, (3, 3): 2.292172805400873, (3, 4): 2.996383110867515,
(4, 0): 2.1014575936349886, (4, 3): 3.1297590518608907, (4, 4): 3.6408806798779287,
(5, 0): -1.0, (5, 1): 2.5756132058995282, (5, 2): 3.0736603365907276, (5, 3): 3.6408806798779287,
(5, 4): 4.350771829901593}
def test_policy_iteration():
assert policy_iteration(sequential_decision_environment) == {
(0, 0): (0, 1), (0, 1): (0, 1), (0, 2): (1, 0),
(1, 0): (1, 0), (1, 2): (1, 0), (2, 0): (0, 1),
(2, 1): (0, 1), (2, 2): (1, 0), (3, 0): (-1, 0),
(3, 1): None, (3, 2): None}
assert policy_iteration(sequential_decision_environment_1) == {
(0, 0): (0, 1), (0, 1): (0, 1), (0, 2): (1, 0),
(1, 0): (1, 0), (1, 2): (1, 0), (2, 0): (0, 1),
(2, 1): (0, 1), (2, 2): (1, 0), (3, 0): (-1, 0),
(3, 1): None, (3, 2): None}
assert policy_iteration(sequential_decision_environment_2) == {
(0, 0): (1, 0), (0, 1): (0, 1), (0, 2): (1, 0),
(1, 0): (1, 0), (1, 2): (1, 0), (2, 0): (1, 0),
(2, 1): (1, 0), (2, 2): (1, 0), (3, 0): (0, 1),
(3, 1): None, (3, 2): None}
def test_best_policy():
pi = best_policy(sequential_decision_environment, value_iteration(sequential_decision_environment, .01))
assert sequential_decision_environment.to_arrows(pi) == [['>', '>', '>', '.'],
['^', None, '^', '.'],
['^', '>', '^', '<']]
pi_1 = best_policy(sequential_decision_environment_1, value_iteration(sequential_decision_environment_1, .01))
assert sequential_decision_environment_1.to_arrows(pi_1) == [['>', '>', '>', '.'],
['^', None, '^', '.'],
['^', '>', '^', '<']]
pi_2 = best_policy(sequential_decision_environment_2, value_iteration(sequential_decision_environment_2, .01))
assert sequential_decision_environment_2.to_arrows(pi_2) == [['>', '>', '>', '.'],
['^', None, '>', '.'],
['>', '>', '>', '^']]
pi_3 = best_policy(sequential_decision_environment_3, value_iteration(sequential_decision_environment_3, .01))
assert sequential_decision_environment_3.to_arrows(pi_3) == [['.', '>', '>', '>', '>', '>'],
['v', None, None, '>', '>', '^'],
['v', None, '.', '.', None, '^'],
['v', '<', 'v', None, None, '^'],
['<', '<', '<', '<', '<', '.']]
def test_transition_model():
transition_model = {'a': {'plan1': [(0.2, 'a'), (0.3, 'b'), (0.3, 'c'), (0.2, 'd')],
'plan2': [(0.4, 'a'), (0.15, 'b'), (0.45, 'c')],
'plan3': [(0.2, 'a'), (0.5, 'b'), (0.3, 'c')],
},
'b': {'plan1': [(0.2, 'a'), (0.6, 'b'), (0.2, 'c'), (0.1, 'd')],
'plan2': [(0.6, 'a'), (0.2, 'b'), (0.1, 'c'), (0.1, 'd')],
'plan3': [(0.3, 'a'), (0.3, 'b'), (0.4, 'c')],
},
'c': {'plan1': [(0.3, 'a'), (0.5, 'b'), (0.1, 'c'), (0.1, 'd')],
'plan2': [(0.5, 'a'), (0.3, 'b'), (0.1, 'c'), (0.1, 'd')],
'plan3': [(0.1, 'a'), (0.3, 'b'), (0.1, 'c'), (0.5, 'd')],
}}
mdp = MDP(init="a", actlist={"plan1", "plan2", "plan3"}, terminals={"d"}, states={"a", "b", "c", "d"},
transitions=transition_model)
assert mdp.T("a", "plan3") == [(0.2, 'a'), (0.5, 'b'), (0.3, 'c')]
assert mdp.T("b", "plan2") == [(0.6, 'a'), (0.2, 'b'), (0.1, 'c'), (0.1, 'd')]
assert mdp.T("c", "plan1") == [(0.3, 'a'), (0.5, 'b'), (0.1, 'c'), (0.1, 'd')]
def test_pomdp_value_iteration():
t_prob = [[[0.65, 0.35], [0.65, 0.35]], [[0.65, 0.35], [0.65, 0.35]], [[1.0, 0.0], [0.0, 1.0]]]
e_prob = [[[0.5, 0.5], [0.5, 0.5]], [[0.5, 0.5], [0.5, 0.5]], [[0.8, 0.2], [0.3, 0.7]]]
rewards = [[5, -10], [-20, 5], [-1, -1]]
gamma = 0.95
actions = ('0', '1', '2')
states = ('0', '1')
pomdp = POMDP(actions, t_prob, e_prob, rewards, states, gamma)
utility = pomdp_value_iteration(pomdp, epsilon=5)
for _, v in utility.items():
sum_ = 0
for element in v:
sum_ += sum(element)
assert -9.76 < sum_ < -9.70 or 246.5 < sum_ < 248.5 or 0 < sum_ < 1
def test_pomdp_value_iteration2():
t_prob = [[[0.5, 0.5], [0.5, 0.5]], [[0.5, 0.5], [0.5, 0.5]], [[1.0, 0.0], [0.0, 1.0]]]
e_prob = [[[0.5, 0.5], [0.5, 0.5]], [[0.5, 0.5], [0.5, 0.5]], [[0.85, 0.15], [0.15, 0.85]]]
rewards = [[-100, 10], [10, -100], [-1, -1]]
gamma = 0.95
actions = ('0', '1', '2')
states = ('0', '1')
pomdp = POMDP(actions, t_prob, e_prob, rewards, states, gamma)
utility = pomdp_value_iteration(pomdp, epsilon=100)
for _, v in utility.items():
sum_ = 0
for element in v:
sum_ += sum(element)
assert -77.31 < sum_ < -77.25 or 799 < sum_ < 800
if __name__ == "__main__":
pytest.main()
| mit |
comjoy91/SouthKorean-legislativeElection-history | crawlers/election_results/crawlers/assembly/base.py | 1 | 9308 | #!/usr/bin/env python3
# -*- coding=utf-8 -*-
############### is proportional in parse!
import gevent
from gevent import monkey
import itertools
from urllib.parse import urljoin
from utils import flatten, get_json, get_xpath, parse_cell, sanitize, split
monkey.patch_all()
class BaseCrawler(object):
url_image_base = 'http://info.nec.go.kr'
attrs = []
attrs_district = ['district', 'electorates', 'counted_votes', 'cand_no', 'result', 'valid_votes', 'undervotes', 'blank_ballots']
attrs_result = ['name', 'vote']
attrs_exclude_parse_cell = ['image', 'cand_no', 'result']
def parse_proportional(self, url, city_name=None): #지금 이건 비례대표만 해당하는 거임 ㅇㅇㅇㅇㅇ
elems = get_xpath(url, '//td')
th_list = get_xpath(url, '//th')
for i in range(int(len(th_list))):
if th_list[i].get('colspan') != None:
num_ths_left = i
max_candidate_num = int(th_list[i].get('colspan')) - 1
break
if th_list[0].get('rowspan') != None: #nth!=20
party_name_list = th_list[6:(6+max_candidate_num)] #element: <th><strong>한나라당</strong></th>
td_head = 0
num_tds = 6 + max_candidate_num #저 6의 확장일반화 방법은 없는가.
num_rows = int(len(elems) / num_tds)
else: #nth == 20
max_candidate_num = max_candidate_num + 1
party_name_list = elems[num_ths_left:(num_ths_left+max_candidate_num)] #for n=20. element: <td><strong>한나라당</strong></td>
td_head = 1
num_tds = len(th_list) + max_candidate_num - 1
num_rows = int(len(elems) / num_tds) - 1
consti_list = []
candidate_num = max_candidate_num
for i in range(num_rows):
index = i + td_head
district = elems[index*num_tds]#.text # 여기 저장되는 district 이름은 '전체'(첫 줄)+기초자치단체명임 ㅇㅇ
electorates = elems[index*num_tds + 1]#.text
counted_vote = elems[index*num_tds + 2]#.text
votes_num_percent = elems[index*num_tds + num_ths_left : index*num_tds + num_ths_left+candidate_num] #element: <td>1,940,259<br>(42.28)</td>
cand_list = list(map(lambda x, y: dict(list(zip(self.attrs_result, [x, y]))), party_name_list, votes_num_percent)) #('name': <th><strong>한나라당</strong></th>, 'vote': <td>1,940,259<br>(42.28)</td>)
valid_vote = elems[index*num_tds + num_ths_left + max_candidate_num+0]#.text
undervote = elems[index*num_tds + num_ths_left + max_candidate_num+1]#.text
blank_ballot = elems[index*num_tds + num_ths_left + max_candidate_num+2]#.text
district_info = (district, electorates, counted_vote, candidate_num, cand_list, valid_vote, undervote, blank_ballot)
district_info = dict(list(zip(self.attrs_district, district_info)))
#if i==0: print(district_info['result'][0]['name'].find('strong').text)
consti_list.append(district_info)
consti_list = [self.parse_consti(consti, city_name=city_name) for consti in consti_list]
print(('crawled #%d - %s, %s(%d)...' % (self.nth, '비례대표', city_name, len(consti_list))))
return consti_list
def parse_constituency(self, url, city_name=None): #지금 이건 지역구만 해당하는 거임 ㅇㅇㅇㅇㅇ
tr_list = get_xpath(url, '//tr')
thead_list = get_xpath(url, '//th')
max_candidate_num = len(tr_list[2]) - len(thead_list)
for i in range(len(thead_list)):
if thead_list[i].get('colspan') != None:
num_ths_left = i
#max_candidate_num = int(thead_list[i].get('colspan')) - 1
break
consti_list = []
for i in range(len(tr_list)):
if len(tr_list[i]) < 2:
pass
elif tr_list[i][1].text == None: # 선거인수 칸이 blank인 줄을 찾으면, 그 칸 아래가 실득표수이므로...
candidate_num = 0
name_party_name = []
votes_num_percent = []
district = tr_list[i][0]#.text # 여기 저장되는 district 이름은 선거구 단위의 기초자치단체명임 ㅇㅇ
electorates = tr_list[i+1][num_ths_left-2]#.text
counted_vote = tr_list[i+1][num_ths_left-1]#.text
for j in range(max_candidate_num):
index = num_ths_left + j
if (tr_list[i][index].findtext('strong') != None) \
and (tr_list[i][index].findtext('strong') != '') \
and (tr_list[i][index].text != '계'):
candidate_num = candidate_num+1
name_party_name.append(tr_list[i][index]) #element: <td><strong>한나라당<br>김광영</strong></td>
votes_num_percent.append(tr_list[i+1][index]) #element: <td>3,050<br>(4.09)</td>
cand_list = list(map(lambda x, y: dict(list(zip(self.attrs_result, [x, y]))), name_party_name, votes_num_percent)) #('name': <td><strong>한나라당<br>김광영</strong></td>, 'vote': <td>3,050<br>(4.09)</td>)
valid_vote = tr_list[i+1][num_ths_left + max_candidate_num+0]#.text
undervote = tr_list[i+1][num_ths_left + max_candidate_num+1]#.text
blank_ballot = tr_list[i+1][num_ths_left + max_candidate_num+2]#.text
district_info = (district, electorates, counted_vote, candidate_num, cand_list, valid_vote, undervote, blank_ballot)
district_info = dict(list(zip(self.attrs_district, district_info)))
consti_list.append(district_info)
consti_list = [self.parse_consti(consti, city_name=city_name) for consti in consti_list]
print('crawled #%d - %s, %s(%d)...' % (self.nth, '지역구', city_name, len(consti_list)))
return consti_list
def parse(self, url, is_proportional, city_name=None):
if is_proportional: return self.parse_proportional(url, city_name)
else: return self.parse_constituency(url, city_name)
def parse_record(self, record, attr_list):
for attr in attr_list:
if attr not in self.attrs_exclude_parse_cell:
record[attr] = parse_cell(record[attr])
def parse_dict_record(self, record, attr_list): #parse_record와 비슷. 단, 받은 record(list type)의 element가 dict type.
for element in record:
for attr in attr_list:
if attr not in self.attrs_exclude_parse_cell:
element[attr] = parse_cell(element[attr])
def parse_consti(self, consti, city_name=None):
self.parse_record(consti, self.attrs_district)
self.parse_dict_record(consti['result'], self.attrs_result)
# never change the order
consti['assembly_no'] = self.nth
self.parse_district(consti, city_name)
self.parse_electorate(consti)
self.parse_counted_votes(consti)
self.parse_result(consti)
self.parse_valid_votes(consti)
self.parse_undervotes(consti)
self.parse_blank_ballots(consti)
return consti
def parse_district(self, consti, city_name):
if city_name:
consti['district'] = '%s %s' % (city_name, consti['district'])
def parse_electorate(self, consti):
if 'electorates' not in consti: return
if type(consti['electorates']) == type([]): #nth != 20
consti['electorates'] = sanitize(consti['electorates'][0])
else:
consti['electorates'] = sanitize(consti['electorates'])
consti['electorates'] = consti['electorates'].replace(',', '')
def parse_counted_votes(self, consti):
if 'counted_votes' not in consti: return
if type(consti['counted_votes']) == type([]): #nth != 20
consti['counted_votes'] = sanitize(consti['counted_votes'][0])
else:
consti['counted_votes'] = sanitize(consti['counted_votes'])
consti['counted_votes'] = consti['counted_votes'].replace(',', '')
def parse_result(self, consti):
if 'result' not in consti: return
for candi in consti['result']:
self.parse_candi(candi)
def parse_valid_votes(self, consti):
if 'valid_votes' not in consti: return
consti['valid_votes'] = consti['valid_votes'].replace(',', '')
def parse_undervotes(self, consti):
if 'undervotes' not in consti: return
consti['undervotes'] = consti['undervotes'].replace(',', '')
def parse_blank_ballots(self, consti):
if 'blank_ballots' not in consti: return
consti['blank_ballots'] = consti['blank_ballots'].replace(',', '')
def parse_candi(self, candi):
if self.is_proportional: #is_proportional
candi['party_name_kr'] = sanitize(candi['name'])
del candi['name']
else: #!is_proportional
[candi['party_name_kr'], candi['name_kr']] = list(map(sanitize, candi['name'][:2]))
del candi['name']
[candi['votenum'], candi['voterate']] = list(map(sanitize, candi['vote'][:2]))
candi['votenum'] = candi['votenum'].replace(',', '')
del candi['vote']
class MultiCityCrawler(BaseCrawler):
def city_codes(self):
list_ = get_json(self.url_city_codes_json)['jsonResult']['body']
return [(x['CODE'], x['NAME']) for x in list_]
def url_list(self, city_code):
return self.url_list_base + str(city_code)
def crawl(self):
# 지역구 대표
jobs = []
is_proportional = self.is_proportional
if is_proportional:
voting_system = "proportional"
else:
voting_system = "constituency"
print("Waiting to connect http://info.nec.go.kr server (%s)..." % voting_system)
for city_code, city_name in self.city_codes():
req_url = self.url_list(city_code)
job = gevent.spawn(self.parse, req_url, is_proportional, city_name)
jobs.append(job)
gevent.joinall(jobs)
every_result = [{'voting_system':voting_system, 'results':flatten(job.get() for job in jobs)}]
# 비례대표
if hasattr(self, 'prop_crawler'):
prop_result = self.prop_crawler.crawl()
every_result.extend(prop_result)
return every_result
class SinglePageCrawler(BaseCrawler):
def crawl(self):
people = self.parse(self.url_list)
return people
| apache-2.0 |
Jyrsa/hoppy.fi | beerstatus/tasks.py | 1 | 11309 | import logging
from django.conf import settings
from django.template.defaultfilters import slugify
from django.utils import timezone
from huey.djhuey import db_periodic_task
from huey.djhuey import crontab
from huey.djhuey import db_task
import pytz
import requests
import json
import datetime
from beerstatus import models
from lxml import html
from django.utils.timezone import utc
import re
LOGGER = logging.getLogger(__name__)
@db_periodic_task(crontab(hour='*/6'))
def refresh_beer_availability_statuses():
LOGGER.info("starting beer status updates")
for beer in models.Beer.objects.all().filter(active=True):
for city_id, city in models.SUPPORTED_ALKO_CITIES:
refresh_beer_availability(beer.pk, city_id)
LOGGER.info("finished scheduling beer status updates")
def parse_alko_availability_date(value):
#alko servers are always in helsinki local time so references like
#yesterday etc. also follow that
helsinki = pytz.timezone("Europe/Helsinki")
date = datetime.datetime.utcnow().replace(tzinfo=utc).astimezone(helsinki)
if value == "today":
return date
if value == "yesterday":
return date - datetime.timedelta(days=1)
else:
try:
parts = value.split(".")[:2]
if len(parts) == 2:
day, month = parts
day, month = int(day), int(month)
date = datetime.datetime.now().date()
for i in range(7): #accept up to 1 week old info
potential = date - datetime.timedelta(days=i)
if potential.day == day and potential.month == month:
return potential
except ValueError:
pass
@db_task()
def refresh_beer_availability(beer_pk, cityID):
beer = models.Beer.objects.get(pk=beer_pk)
LOGGER.debug("refreshing beer %s status for %s" % (
beer.name,
cityID))
url = \
"http://www.alko.fi/api/product/Availability?productId=%s&cityId=%s&language=en"
url = url % (beer.alko_product_id, cityID)
response = requests.get(url)
all_available = 0
blob_list = response.json()
for entry in blob_list:
store_id = entry["StoreLink"].split("/")[-2]
try:
store = models.AlkoLocation.objects.get(store_id=store_id)
except models.AlkoLocation.DoesNotExist:
continue
date = parse_alko_availability_date(entry["LastUpdated"])
if not date:
LOGGER.error("unable to parse date '%s'" %
entry["LastUpdated"]
)
return
try:
amount = int(entry["Amount"])
except ValueError:
continue
all_available += amount
avail, created = models.BeerAvailability.objects.get_or_create(
beer=beer,
location=store,
count=amount,
date=date
)
avail.save()
LOGGER.debug(("finished refreshing beer %s status for %s, "
" found alltogether %d units") % (
beer.name,
cityID,
all_available))
def retrieve_etree(url):
""" returns an lxml etree for the url given
"""
#i would love to just request the floats in en-US
#but alko doesn't honor the value consistently
response = requests.get(url )
tree = html.fromstring(response.text)
return tree
def get_element_contents(tree, xpath, cure):
""" takes in the tree, xpath and a curing function that should clean the
value and/or coerce it into the right format (e.g. int(), float() or
a lambda).
raises an error if no element is found. Returns first element if several
are found.
"""
list_ = tree.xpath(xpath)
if len(list_) == 0:
raise ScrapeElementNotFoundException(
"element not found %s" % xpath
)
return cure( list_[0])
def get_from_alko_product_info_table(tree, name, cure_result):
product_info_table = "//*[contains(@class, 'product-column2')]/table/"
row_element = "tr[%d]/td[%d]/text()"
index = 1
cure = lambda x: x.strip().replace(":", "").lower()
try:
while True:
xpath = product_info_table + (row_element % (index, 1))
res = get_element_contents(tree, xpath, cure)
if res == name:
xpath = product_info_table + (row_element % (index, 2))
return get_element_contents(tree, xpath, cure_result)
index += 1
except ScrapeElementNotFoundException:
return None
class ScrapeInconsistencyException(BaseException):
pass
class ScrapeElementNotFoundException(BaseException):
pass
@db_periodic_task(crontab(hour='*/12'))
def update_beer_infos():
#loop through all after testing
for beer in models.Beer.objects.all():
update_beer_info(beer.pk)
@db_task()
def update_beer_info(beer_pk):
""" retrieves beer page from alko, updates model fields and saves model.
Also retrieves name so that beer lists can be bootstrapped with just alko
product IDs in the future. Doesn't update name if one exists.
"""
beer = models.Beer.objects.get(pk=beer_pk)
beer_url ="http://www.alko.fi/en/products/%s/" % beer.alko_product_id
tree = retrieve_etree(beer_url)
#be careful when retrieving these xpaths using chrome's dev tools
#chrome adds a tbody in the table that isn't present in the source and
#that lxml doesn't add
product_id = get_from_alko_product_info_table(
tree,
"product number",
lambda x: x.strip()
)
if not product_id:
LOGGER.error("unable to find product id on page %s" %beer_url)
return
if product_id != beer.alko_product_id:
raise ScrapeInconsistencyException(("attempted to get beer info for"
"product %s but received info for product %s instead") %
(beer.alko_product_id, product_id))
product_category = get_element_contents(
tree,
"//*[contains(@class, 'product-info-category')]/text()",
lambda x: x.strip())
if product_category != "beer":
raise ScrapeInconsistencyException(("attempted to get a beer but %s "
"is of category %s ") % (product_id, product_category))
abv = get_from_alko_product_info_table(
tree,
"alcohol",
lambda x: float(x.replace('%', '').replace(",",".").strip())
)
if abv < 0 or abv > 50:
#ToDo accept no ebu as at least some beers don't have it
raise ScrapeInconsistencyException(("abv is not logica for a beer"
"produdct %d" % abv))
ebu = get_from_alko_product_info_table(
tree,
"bitterness",
lambda x: x.replace('EBU', '').replace(",", ".").strip()
)
#not 100% of beer have ebu defined so it's allowed to be null
if ebu:
ebu = float(ebu)
if ebu < 0 :
raise ScrapeInconsistencyException(("ebu is not logical for a beer"
"produdct %d" % ebu))
else:
ebu = 0
#toDo make sure adjacent element says style
style = get_from_alko_product_info_table(
tree,
"beer style",
lambda x: x.strip()
)
#style info isn't in model yet so don't include it
price = get_element_contents(
tree,
("//*[contains(@class, 'price')]/span[1]/text()"),
lambda x: float(x.strip().replace(",","."))
)
if price <= 0:
raise ScrapeInconsistencyException(("beer price is <= 0."
" There's no such thing as free beer!"))
volume = get_element_contents(
tree,
("//*[contains(@class, 'product-details')]/text()[1]"),
lambda x: float(x.strip().replace(",","."))
)
if volume <= 0 or volume > 10:
raise ScrapeInconsistencyException(("Beer volume is not credible!"
" There's no such thing as jottalitran tuopponen!"))
name = get_element_contents(
tree,
("//*[contains(@class, 'product-info')]/h1/text()"),
lambda x: x.strip()
)
if name <= 0 or volume > 200:
raise ScrapeInconsistencyException(("Beer name is incompatible: !"
" %" ) % name)
beer.abv = abv
beer.ebu = ebu
beer.price = price
beer.volume = volume
beer.style = style
if not beer.name:
beer.name = name
beer.slug = slugify(name)
beer.save()
def find_alko_id_by_name(name):
name = name.replace("Alko", "").strip()
response = requests.get(
"http://www.alko.fi/api/find/stores?Language=en&Page=0&PageSize=20&ProductIds=&Query=%s"
% name)
blob = response.json()
results = blob.get("Results", [])
if len(results) == 1:
url = results[0]["Url"]
match = re.search(r"\d{4}", url)
if match:
return match.group(0)
return None
@db_periodic_task(crontab(hour="*/12"))
def update_all_alko_infos():
for alko in models.AlkoLocation.objects.all():
update_alko_info(alko.pk)
@db_task()
def update_alko_info(alko_pk):
alko = models.AlkoLocation.objects.get(pk=alko_pk)
store_id = find_alko_id_by_name(alko.name) or alko.store_id
if not store_id:
logging.warning("no store_id found for store name "\
+ alko.name)
return #should this be a loud failure?
alko.store_id = store_id
alko.save() #save here so in case page is different
#from standard we at least get id
url ="http://www.alko.fi/en/shops/%s/" % store_id
tree = retrieve_etree(url)
streetaddr = ""
try:
streetaddr = get_element_contents(tree,
("//*[contains(@class,'store-contact')]/span[1]/div/span[1]/text()"),
lambda x: x)
except ScrapeElementNotFoundException:
pass
areacode = ""
try:
areacode = get_element_contents(tree,
("//*[contains(@class,'store-contact')]/span[1]/div/span[2]/text()"),
lambda x: x)
except ScrapeElementNotFoundException:
pass
city = ""
try:
city = get_element_contents(tree,
("//*[contains(@class,'store-contact')]/span[1]/div/span[3]/text()"),
lambda x: x)
except ScrapeElementNotFoundException:
pass
address = "%s, %s %s" % (
streetaddr,
areacode,
city
)
name = get_element_contents(tree,
("//*[contains(@class, 'basic-store-info')]/h1/text()"),
lambda x: x)
# if name.length > 0:
# alko.name = name
# todo: should the name be used?
if len(address) > 4:
alko.address = address
alko.save()
| mit |
acidjunk/django-scrumboard | scrumtools/apps/scrumboard/views.py | 1 | 7382 | from django.views.generic import ListView, TemplateView
from django.views.generic.detail import DetailView
from django.views.generic.edit import CreateView, UpdateView, DeleteView
from django.forms.models import modelform_factory
from django.core.urlresolvers import reverse_lazy
from django.shortcuts import render
from django.contrib.auth.decorators import login_required
from scrumtools.apps.scrumboard.models import Project, Status, Sprint, Story, Task
from django.utils import timezone
import json
import urllib2
#dashboard
@login_required
def dashboard(request):
todo_list = []
progress_list = []
test_list = []
done_list = []
# for i in data:
# todo_list.append({'title': i['number'], 'content': i['title']})
# progress_list.append({'title': i['number'], 'content': i['title']})
# test_list.append({'title': i['number'], 'content': i['title']})
#done_list.append({'title': i['number'], 'content': i['title']})
context_dict = {
'todo_list': todo_list,
'progress_list': progress_list,
'test_list': test_list,
'done_list': done_list,
}
return render(request, 'scrumboard/dashboard.html', context_dict)
#Projects
class ProjectList(ListView):
model = Project
paginate_by = 10
class ProjectCreate(CreateView):
model = Project
success_url = reverse_lazy('scrumboard:project-list')
template_name = 'scrumboard/form.html'
class ProjectUpdate(UpdateView):
model = Project
success_url = reverse_lazy('scrumboard:project-list')
template_name = 'scrumboard/form.html'
class ProjectDelete(DeleteView):
model = Project
success_url = reverse_lazy('scrumboard:project-list')
template_name = 'scrumboard/confirm_delete.html'
class ProjectDetail(DetailView):
model = Project
#Statusses
class StatusList(ListView):
model = Status
paginate_by = 10
class StatusCreate(CreateView):
model = Status
fields = ['name','order']
success_url = reverse_lazy('scrumboard:status-list')
template_name = 'scrumboard/form.html'
class StatusUpdate(UpdateView):
model = Status
fields = ['name','order']
success_url = reverse_lazy('scrumboard:status-list')
template_name = 'scrumboard/form.html'
class StatusDelete(DeleteView):
model = Status
fields = ['name','order']
success_url = reverse_lazy('scrumboard:status-list')
template_name = 'scrumboard/confirm_delete.html'
class StatusDetail(DetailView):
model = Status
#Sprints
class SprintList(ListView):
model = Sprint
paginate_by = 10
class SprintCreate(CreateView):
model = Sprint
success_url = reverse_lazy('scrumboard:sprint-list')
template_name = 'scrumboard/form.html'
class SprintUpdate(UpdateView):
model = Sprint
success_url = reverse_lazy('scrumboard:sprint-list')
template_name = 'scrumboard/form.html'
class SprintDelete(DeleteView):
model = Sprint
success_url = reverse_lazy('scrumboard:sprint-list')
template_name = 'scrumboard/confirm_delete.html'
class SprintDetail(DetailView):
model = Sprint
#Stories
class StoryList(ListView):
model = Story
paginate_by = 10
class StoryCreate(CreateView):
model = Story
fields = ['project', 'name', 'description', 'sprint']
#form_class = modelform_factory(Story)
success_url = reverse_lazy('scrumboard:story-list')
template_name = 'scrumboard/form.html'
class StoryUpdate(UpdateView):
model = Story
fields = ['project', 'name', 'description', 'sprint']
success_url = reverse_lazy('scrumboard:story-list')
template_name = 'scrumboard/form.html'
class StoryDelete(DeleteView):
model = Story
fields = ['project', 'name', 'description', 'sprint']
success_url = reverse_lazy('scrumboard:story-list')
template_name = 'scrumboard/confirm_delete.html'
class StoryDetail(DetailView):
model = Story
fields = ['project', 'name', 'description', 'sprint']
#Tasks
class TaskList(ListView):
model = Task
paginate_by = 10
class TaskCreate(CreateView):
model = Task
success_url = reverse_lazy('scrumboard:task-list')
template_name = 'scrumboard/form.html'
class TaskUpdate(UpdateView):
model = Task
success_url = reverse_lazy('scrumboard:task-list')
template_name = 'scrumboard/form.html'
class TaskDelete(DeleteView):
model = Task
success_url = reverse_lazy('scrumboard:task-list')
template_name = 'scrumboard/confirm_delete.html'
class TaskDetail(DetailView):
model = Task
class Import(TemplateView): # import
template_name = 'scrumboard/import.html'
def get(self, request, *args, **kwargs):
# Todo: make this dynamic
project = Project.objects.get(pk=1)
# Todo:
status = Status.objects.get(pk=1)
print project, status
data = json.load(urllib2.urlopen("https://api.github.com/repos/acidjunk/django-scrumboard/issues"))
for item in data:
# insert to DB
task = Task()
task.project = project
task.status = status
task.name = 'Github issues: %s' % item['number']
task.github_id = item['number']
task.description = item['body']
task.created_on = timezone.now()
task.modified_on = timezone.now()
task.save()
context = {'data': data}
return self.render_to_response(context)
# for i in data:
# task = Task()
# task.Project = "Project 1"
# task.name = i['number']
# task.description = i['title']
# task.Status = "Stat1"
# task.Sprint = "Meer koffie"
# task.Story = ""
# task.Story_points = 1
# task.estimated_days = 5
# task.created_on = "2015-01-01" # date(2015,1,1)
# task.modified_on = "2015-05-03" # date(2015,5,3)
# # task.assigned
# task.started = "2015-01-01"
# task.due = "2015-05-03"
# task.completed = "2015-08-08"
# task.save()
def select_project(request):
# if this is a POST request we need to process the form data
if request.method == 'POST':
# create a form instance and populate it with data from the request:
form = ProjectForm(request.POST)
# check whether it's valid:
if form.is_valid():
request.session['selected_project']=request.POST.get('project_name', None)
# if a GET (or any other method) we'll create a blank form
else:
form = ProjectForm(initial={'project_name': request.session['selected_project']})
context_dict = {
'form':form
}
return render(request, 'scrumboard/select_project.html', context_dict)
def select_sprint(request):
# if this is a POST request we need to process the form data
if request.method == 'POST':
# create a form instance and populate it with data from the request:
form = SprintForm(request.POST)
# check whether it's valid:
if form.is_valid():
request.session['selected_sprint']=request.POST.get('sprint_name', None)
# if a GET (or any other method) we'll create a blank form
else:
form = SprintForm(initial={'sprint_name': request.session['selected_sprint']})
context_dict = {
'form':form
}
return render(request, 'scrumboard/select_sprint.html', context_dict) | gpl-3.0 |
sjperkins/tensorflow | tensorflow/python/kernel_tests/sparse_split_op_test.py | 138 | 13629 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for SparseReorder."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.ops import sparse_ops
from tensorflow.python.platform import test
class SparseSplitOpTest(test.TestCase):
def _SparseTensor_4x6(self):
# [0 | |2 | |4 |5 ]
# [ |11| |13|14| ]
# [20| | |23| |25]
# [30| |32|33| |35]
ind = np.array([[0, 0], [0, 2], [0, 4], [0, 5], [1, 1], [1, 3], [1, 4],
[2, 0], [2, 3], [2, 5], [3, 0], [3, 2], [3, 3],
[3, 5]]).astype(np.int64)
val = np.array(
[0, 2, 4, 5, 11, 13, 14, 20, 23, 25, 30, 32, 33, 35]).astype(np.int64)
shape = np.array([4, 6]).astype(np.int64)
return sparse_tensor.SparseTensor(ind, val, shape)
def _SparseTensor_5x7(self):
# [0 | |2 | |4 |5 | ]
# [ |11| |13|14| |16]
# [20| | |23| |25| ]
# [30| |32|33| |35| ]
# [ |41| | |44| |46]
ind = np.array([[0, 0], [0, 2], [0, 4], [0, 5], [1, 1], [1, 3], [1, 4],
[1, 6], [2, 0], [2, 3], [2, 5], [3, 0], [3, 2], [3, 3],
[3, 5], [4, 1], [4, 4], [4, 6]]).astype(np.int64)
val = np.array(
[0, 2, 4, 5, 11, 13, 14, 16, 20, 23, 25, 30, 32, 33, 35, 41, 44,
46]).astype(np.int64)
shape = np.array([5, 7]).astype(np.int64)
return sparse_tensor.SparseTensor(ind, val, shape)
def _SparseTensorValue_3x4x2(self):
# slice(:,:, 0)
# ['a0'| |'b0'| ]
# [ |'c0'| |'d0']
# [ | |'e0'| ]
# slice(:,:, 1)
# ['a1'| |'b1'| ]
# [ |'c1'| |'d1']
# [ | |'e1'| ]
ind = np.array([[0, 0, 0], [0, 0, 1], [0, 2, 0], [0, 2, 1], [1, 1, 0],
[1, 1, 1], [1, 3, 0], [1, 3, 1], [2, 2, 0],
[2, 2, 1]]).astype(np.int64)
val = np.array(['a0', 'a1', 'b0', 'b1', 'c0', 'c1', 'd0', 'd1', 'e0', 'e1'])
shape = np.array([3, 4, 2]).astype(np.int64)
return sparse_tensor.SparseTensorValue(ind, val, shape)
def _SparseTensor_3x4x2(self):
return sparse_tensor.SparseTensor.from_value(self._SparseTensorValue_3x4x2(
))
def testSplitMatrixRows(self):
with self.test_session(use_gpu=False):
sp_tensors = sparse_ops.sparse_split(
sp_input=self._SparseTensor_4x6(), num_split=2, axis=0)
self.assertAllEqual(len(sp_tensors), 2)
self.assertAllEqual(sp_tensors[0].indices.eval(), [[0, 0], [0, 2], [0, 4],
[0, 5], [1, 1], [1, 3],
[1, 4]])
self.assertAllEqual(sp_tensors[0].values.eval(), [0, 2, 4, 5, 11, 13, 14])
self.assertAllEqual(sp_tensors[0].dense_shape.eval(), [2, 6])
self.assertAllEqual(sp_tensors[1].indices.eval(), [[0, 0], [0, 3], [0, 5],
[1, 0], [1, 2], [1, 3],
[1, 5]])
self.assertAllEqual(sp_tensors[1].values.eval(),
[20, 23, 25, 30, 32, 33, 35])
self.assertAllEqual(sp_tensors[1].dense_shape.eval(), [2, 6])
def testSplitMatrixUnevenCols(self):
with self.test_session(use_gpu=False):
sp_tensors_3 = sparse_ops.sparse_split(
sp_input=self._SparseTensor_5x7(), num_split=3, axis=1)
self.assertAllEqual(len(sp_tensors_3), 3)
self.assertAllEqual(sp_tensors_3[0].indices.eval(),
[[0, 0], [0, 2], [1, 1], [2, 0], [3, 0], [3, 2],
[4, 1]])
self.assertAllEqual(sp_tensors_3[0].values.eval(),
[0, 2, 11, 20, 30, 32, 41])
self.assertAllEqual(sp_tensors_3[0].dense_shape.eval(), [5, 3])
self.assertAllEqual(sp_tensors_3[1].indices.eval(),
[[0, 1], [1, 0], [1, 1], [2, 0], [3, 0], [4, 1]])
self.assertAllEqual(sp_tensors_3[1].values.eval(),
[4, 13, 14, 23, 33, 44])
self.assertAllEqual(sp_tensors_3[1].dense_shape.eval(), [5, 2])
self.assertAllEqual(sp_tensors_3[2].indices.eval(),
[[0, 0], [1, 1], [2, 0], [3, 0], [4, 1]])
self.assertAllEqual(sp_tensors_3[2].values.eval(), [5, 16, 25, 35, 46])
self.assertAllEqual(sp_tensors_3[2].dense_shape.eval(), [5, 2])
sp_tensors_4 = sparse_ops.sparse_split(
sp_input=self._SparseTensor_5x7(), num_split=4, axis=1)
self.assertAllEqual(len(sp_tensors_4), 4)
self.assertAllEqual(sp_tensors_4[0].indices.eval(),
[[0, 0], [1, 1], [2, 0], [3, 0], [4, 1]])
self.assertAllEqual(sp_tensors_4[0].values.eval(), [0, 11, 20, 30, 41])
self.assertAllEqual(sp_tensors_4[0].dense_shape.eval(), [5, 2])
self.assertAllEqual(sp_tensors_4[1].indices.eval(),
[[0, 0], [1, 1], [2, 1], [3, 0], [3, 1]])
self.assertAllEqual(sp_tensors_4[1].values.eval(), [2, 13, 23, 32, 33])
self.assertAllEqual(sp_tensors_4[1].dense_shape.eval(), [5, 2])
self.assertAllEqual(sp_tensors_4[2].indices.eval(),
[[0, 0], [0, 1], [1, 0], [2, 1], [3, 1], [4, 0]])
self.assertAllEqual(sp_tensors_4[2].values.eval(), [4, 5, 14, 25, 35, 44])
self.assertAllEqual(sp_tensors_4[2].dense_shape.eval(), [5, 2])
self.assertAllEqual(sp_tensors_4[3].indices.eval(), [[1, 0], [4, 0]])
self.assertAllEqual(sp_tensors_4[3].values.eval(), [16, 46])
self.assertAllEqual(sp_tensors_4[3].dense_shape.eval(), [5, 1])
def testSplitMatrixUnevenRows(self):
with self.test_session(use_gpu=False):
sp_tensors_2 = sparse_ops.sparse_split(
sp_input=self._SparseTensor_5x7(), num_split=2, axis=0)
self.assertAllEqual(sp_tensors_2[0].indices.eval(),
[[0, 0], [0, 2], [0, 4], [0, 5], [1, 1], [1, 3],
[1, 4], [1, 6], [2, 0], [2, 3], [2, 5]])
self.assertAllEqual(sp_tensors_2[0].values.eval(),
[0, 2, 4, 5, 11, 13, 14, 16, 20, 23, 25])
self.assertAllEqual(sp_tensors_2[0].dense_shape.eval(), [3, 7])
self.assertAllEqual(sp_tensors_2[1].indices.eval(),
[[0, 0], [0, 2], [0, 3], [0, 5], [1, 1], [1, 4],
[1, 6]])
self.assertAllEqual(sp_tensors_2[1].values.eval(),
[30, 32, 33, 35, 41, 44, 46])
self.assertAllEqual(sp_tensors_2[1].dense_shape.eval(), [2, 7])
self.assertAllEqual(len(sp_tensors_2), 2)
sp_tensors_3 = sparse_ops.sparse_split(
sp_input=self._SparseTensor_5x7(), num_split=3, axis=0)
self.assertAllEqual(len(sp_tensors_3), 3)
self.assertAllEqual(sp_tensors_3[0].indices.eval(),
[[0, 0], [0, 2], [0, 4], [0, 5], [1, 1], [1, 3],
[1, 4], [1, 6]])
self.assertAllEqual(sp_tensors_3[0].values.eval(),
[0, 2, 4, 5, 11, 13, 14, 16])
self.assertAllEqual(sp_tensors_3[0].dense_shape.eval(), [2, 7])
self.assertAllEqual(sp_tensors_3[1].values.eval(),
[20, 23, 25, 30, 32, 33, 35])
self.assertAllEqual(sp_tensors_3[1].dense_shape.eval(), [2, 7])
self.assertAllEqual(sp_tensors_3[2].indices.eval(), [[0, 1], [0, 4],
[0, 6]])
self.assertAllEqual(sp_tensors_3[2].values.eval(), [41, 44, 46])
self.assertAllEqual(sp_tensors_3[2].dense_shape.eval(), [1, 7])
return
def testSplitAllRows(self):
with self.test_session(use_gpu=False):
sp_tensors = sparse_ops.sparse_split(
sp_input=self._SparseTensor_4x6(), num_split=4, axis=0)
self.assertAllEqual(len(sp_tensors), 4)
self.assertAllEqual(sp_tensors[0].indices.eval(), [[0, 0], [0, 2], [0, 4],
[0, 5]])
self.assertAllEqual(sp_tensors[0].values.eval(), [0, 2, 4, 5])
self.assertAllEqual(sp_tensors[0].dense_shape.eval(), [1, 6])
self.assertAllEqual(sp_tensors[1].indices.eval(), [[0, 1], [0, 3], [0,
4]])
self.assertAllEqual(sp_tensors[1].values.eval(), [11, 13, 14])
self.assertAllEqual(sp_tensors[1].dense_shape.eval(), [1, 6])
self.assertAllEqual(sp_tensors[2].indices.eval(), [[0, 0], [0, 3], [0,
5]])
self.assertAllEqual(sp_tensors[2].values.eval(), [20, 23, 25])
self.assertAllEqual(sp_tensors[2].dense_shape.eval(), [1, 6])
self.assertAllEqual(sp_tensors[3].indices.eval(), [[0, 0], [0, 2], [0, 3],
[0, 5]])
self.assertAllEqual(sp_tensors[3].values.eval(), [30, 32, 33, 35])
self.assertAllEqual(sp_tensors[3].dense_shape.eval(), [1, 6])
def testSplitColumns(self):
with self.test_session(use_gpu=False):
sparse_tensors = sparse_ops.sparse_split(
sp_input=self._SparseTensor_4x6(), num_split=3, axis=1)
self.assertAllEqual(len(sparse_tensors), 3)
self.assertAllEqual(sparse_tensors[0].indices.eval(), [[0, 0], [1, 1],
[2, 0], [3, 0]])
self.assertAllEqual(sparse_tensors[0].values.eval(), [0, 11, 20, 30])
self.assertAllEqual(sparse_tensors[0].dense_shape.eval(), [4, 2])
self.assertAllEqual(sparse_tensors[1].indices.eval(),
[[0, 0], [1, 1], [2, 1], [3, 0], [3, 1]])
self.assertAllEqual(sparse_tensors[1].values.eval(), [2, 13, 23, 32, 33])
self.assertAllEqual(sparse_tensors[1].dense_shape.eval(), [4, 2])
self.assertAllEqual(sparse_tensors[2].indices.eval(),
[[0, 0], [0, 1], [1, 0], [2, 1], [3, 1]])
self.assertAllEqual(sparse_tensors[2].values.eval(), [4, 5, 14, 25, 35])
self.assertAllEqual(sparse_tensors[2].dense_shape.eval(), [4, 2])
def testSplitAllColumns(self):
with self.test_session(use_gpu=False):
sparse_tensors = sparse_ops.sparse_split(
sp_input=self._SparseTensor_4x6(), num_split=6, axis=1)
self.assertAllEqual(len(sparse_tensors), 6)
self.assertAllEqual(sparse_tensors[0].indices.eval(), [[0, 0], [2, 0],
[3, 0]])
self.assertAllEqual(sparse_tensors[0].values.eval(), [0, 20, 30])
self.assertAllEqual(sparse_tensors[0].dense_shape.eval(), [4, 1])
self.assertAllEqual(sparse_tensors[1].indices.eval(), [[1, 0]])
self.assertAllEqual(sparse_tensors[1].values.eval(), [11])
self.assertAllEqual(sparse_tensors[1].dense_shape.eval(), [4, 1])
self.assertAllEqual(sparse_tensors[2].indices.eval(), [[0, 0], [3, 0]])
self.assertAllEqual(sparse_tensors[2].values.eval(), [2, 32])
self.assertAllEqual(sparse_tensors[2].dense_shape.eval(), [4, 1])
self.assertAllEqual(sparse_tensors[3].indices.eval(), [[1, 0], [2, 0],
[3, 0]])
self.assertAllEqual(sparse_tensors[3].dense_shape.eval(), [4, 1])
self.assertAllEqual(sparse_tensors[3].values.eval(), [13, 23, 33])
self.assertAllEqual(sparse_tensors[4].indices.eval(), [[0, 0], [1, 0]])
self.assertAllEqual(sparse_tensors[4].values.eval(), [4, 14])
self.assertAllEqual(sparse_tensors[4].dense_shape.eval(), [4, 1])
self.assertAllEqual(sparse_tensors[5].indices.eval(), [[0, 0], [2, 0],
[3, 0]])
self.assertAllEqual(sparse_tensors[5].values.eval(), [5, 25, 35])
self.assertAllEqual(sparse_tensors[5].dense_shape.eval(), [4, 1])
def testSliceConcat(self):
for sp_input in (self._SparseTensorValue_3x4x2(),
self._SparseTensor_3x4x2()):
with self.test_session(use_gpu=False):
sparse_tensors = sparse_ops.sparse_split(
sp_input=sp_input, num_split=2, axis=1)
concat_tensor = sparse_ops.sparse_concat(1, sparse_tensors)
expected_output = self._SparseTensor_3x4x2()
self.assertAllEqual(concat_tensor.indices.eval(),
expected_output.indices.eval())
def testArgumentErrors(self):
with self.assertRaisesRegexp(ValueError, 'Keyword arguments are required'):
sparse_ops.sparse_split(3, 2, 1)
with self.assertRaisesRegexp(ValueError, 'sp_input is required'):
sparse_ops.sparse_split()
with self.assertRaisesRegexp(ValueError, 'num_split is required'):
sparse_ops.sparse_split(sp_input=1)
with self.assertRaisesRegexp(ValueError, 'axis is required'):
sparse_ops.sparse_split(num_split=2, sp_input=1)
if __name__ == '__main__':
test.main()
| apache-2.0 |
ClearCorp/odoo-costa-rica | TODO-9.0/l10n_cr_hr_payroll_pay_generator_promerica/__init__.py | 3 | 1059 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Addons modules by CLEARCORP S.A.
# Copyright (C) 2009-TODAY CLEARCORP S.A. (<http://clearcorp.co.cr>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import report
import wizard
| agpl-3.0 |
danbryce/dreal | benchmarks/network/water/water-triple-sat.py | 11 | 6919 |
from gen import *
##########
# shared #
##########
flow_var[0] = """
(declare-fun tau () Real)
(declare-fun x1 () Real)
(declare-fun x2 () Real)
(declare-fun x3 () Real)
"""
flow_dec[0] = """
(define-ode flow_1 ((= d/dt[x1] (/ (- 5 (* (* 0.5 (^ (* 2 9.80665) 0.5)) (^ x1 0.5))) 2))
(= d/dt[x2] (/ (+ 3 (* (* 0.5 (^ (* 2 9.80665) 0.5)) (- (^ x1 0.5) (^ x2 0.5)))) 4))
(= d/dt[x3] (/ (+ 4 (* (* 0.5 (^ (* 2 9.80665) 0.5)) (- (^ x2 0.5) (^ x3 0.5)))) 3))
(= d/dt[tau] 1)))
(define-ode flow_2 ((= d/dt[x1] (/ (- 5 (* (* 0.5 (^ (* 2 9.80665) 0.5)) (^ x1 0.5))) 2))
(= d/dt[x2] (/ (+ 3 (* (* 0.5 (^ (* 2 9.80665) 0.5)) (- (^ x1 0.5) (^ x2 0.5)))) 4))
(= d/dt[x3] (/ (* (* 0.5 (^ (* 2 9.80665) 0.5)) (- (^ x2 0.5) (^ x3 0.5))) 3))
(= d/dt[tau] 1)))
(define-ode flow_3 ((= d/dt[x1] (/ (- 5 (* (* 0.5 (^ (* 2 9.80665) 0.5)) (^ x1 0.5))) 2))
(= d/dt[x2] (/ (* (* 0.5 (^ (* 2 9.80665) 0.5)) (- (^ x1 0.5) (^ x2 0.5))) 4))
(= d/dt[x3] (/ (+ 4 (* (* 0.5 (^ (* 2 9.80665) 0.5)) (- (^ x2 0.5) (^ x3 0.5)))) 3))
(= d/dt[tau] 1)))
(define-ode flow_4 ((= d/dt[x1] (/ (- 5 (* (* 0.5 (^ (* 2 9.80665) 0.5)) (^ x1 0.5))) 2))
(= d/dt[x2] (/ (* (* 0.5 (^ (* 2 9.80665) 0.5)) (- (^ x1 0.5) (^ x2 0.5))) 4))
(= d/dt[x3] (/ (* (* 0.5 (^ (* 2 9.80665) 0.5)) (- (^ x2 0.5) (^ x3 0.5))) 3))
(= d/dt[tau] 1)))
(define-ode flow_5 ((= d/dt[x1] (/ (* (* -0.5 (^ (* 2 9.80665) 0.5)) (^ x1 0.5)) 2))
(= d/dt[x2] (/ (+ 3 (* (* 0.5 (^ (* 2 9.80665) 0.5)) (- (^ x1 0.5) (^ x2 0.5)))) 4))
(= d/dt[x3] (/ (+ 4 (* (* 0.5 (^ (* 2 9.80665) 0.5)) (- (^ x2 0.5) (^ x3 0.5)))) 3))
(= d/dt[tau] 1)))
(define-ode flow_6 ((= d/dt[x1] (/ (* (* -0.5 (^ (* 2 9.80665) 0.5)) (^ x1 0.5)) 2))
(= d/dt[x2] (/ (+ 3 (* (* 0.5 (^ (* 2 9.80665) 0.5)) (- (^ x1 0.5) (^ x2 0.5)))) 4))
(= d/dt[x3] (/ (* (* 0.5 (^ (* 2 9.80665) 0.5)) (- (^ x2 0.5) (^ x3 0.5))) 3))
(= d/dt[tau] 1)))
(define-ode flow_7 ((= d/dt[x1] (/ (* (* -0.5 (^ (* 2 9.80665) 0.5)) (^ x1 0.5)) 2))
(= d/dt[x2] (/ (* (* 0.5 (^ (* 2 9.80665) 0.5)) (- (^ x1 0.5) (^ x2 0.5))) 4))
(= d/dt[x3] (/ (+ 4 (* (* 0.5 (^ (* 2 9.80665) 0.5)) (- (^ x2 0.5) (^ x3 0.5)))) 3))
(= d/dt[tau] 1)))
(define-ode flow_8 ((= d/dt[x1] (/ (* (* -0.5 (^ (* 2 9.80665) 0.5)) (^ x1 0.5)) 2))
(= d/dt[x2] (/ (* (* 0.5 (^ (* 2 9.80665) 0.5)) (- (^ x1 0.5) (^ x2 0.5))) 4))
(= d/dt[x3] (/ (* (* 0.5 (^ (* 2 9.80665) 0.5)) (- (^ x2 0.5) (^ x3 0.5))) 3))
(= d/dt[tau] 1)))
"""
state_dec[0] = """
(declare-fun time_{0} () Real)
(declare-fun tau_{0}_0 () Real)
(declare-fun tau_{0}_t () Real)
(declare-fun mode1_{0} () Bool)
(declare-fun x1_{0}_0 () Real)
(declare-fun x1_{0}_t () Real)
(declare-fun mode2_{0} () Bool)
(declare-fun x2_{0}_0 () Real)
(declare-fun x2_{0}_t () Real)
(declare-fun mode3_{0} () Bool)
(declare-fun x3_{0}_0 () Real)
(declare-fun x3_{0}_t () Real)
"""
state_val[0] = """
(assert (<= 0 time_{0})) (assert (<= time_{0} 1))
(assert (<= 0 tau_{0}_0)) (assert (<= tau_{0}_0 1))
(assert (<= 0 tau_{0}_t)) (assert (<= tau_{0}_t 1))
(assert (<= 0 x1_{0}_0)) (assert (<= x1_{0}_0 10))
(assert (<= 0 x1_{0}_t)) (assert (<= x1_{0}_t 10))
(assert (<= 0 x2_{0}_0)) (assert (<= x2_{0}_0 10))
(assert (<= 0 x2_{0}_t)) (assert (<= x2_{0}_t 10))
(assert (<= 0 x3_{0}_0)) (assert (<= x3_{0}_0 10))
(assert (<= 0 x3_{0}_t)) (assert (<= x3_{0}_t 10))
"""
cont_cond[0] = ["""
(assert (and (>= tau_{0}_0 0) (<= tau_{0}_0 1)
(>= tau_{0}_t 0) (<= tau_{0}_t 1)
(forall_t 1 [0 time_{0}] (>= tau_{0}_t 0))
(forall_t 2 [0 time_{0}] (<= tau_{0}_t 1))))
(assert (or (and (= mode1_{0} true) (= mode2_{0} true) (= mode3_{0} true)
(= [x1_{0}_t x2_{0}_t x3_{0}_t tau_{0}_t]
(integral 0. time_{0} [x1_{0}_0 x2_{0}_0 x3_{0}_0 tau_{0}_0] flow_1)))
(and (= mode1_{0} true) (= mode2_{0} true) (= mode3_{0} false)
(= [x1_{0}_t x2_{0}_t x3_{0}_t tau_{0}_t]
(integral 0. time_{0} [x1_{0}_0 x2_{0}_0 x3_{0}_0 tau_{0}_0] flow_2)))
(and (= mode1_{0} true) (= mode2_{0} false) (= mode3_{0} true)
(= [x1_{0}_t x2_{0}_t x3_{0}_t tau_{0}_t]
(integral 0. time_{0} [x1_{0}_0 x2_{0}_0 x3_{0}_0 tau_{0}_0] flow_3)))
(and (= mode1_{0} true) (= mode2_{0} false) (= mode3_{0} false)
(= [x1_{0}_t x2_{0}_t x3_{0}_t tau_{0}_t]
(integral 0. time_{0} [x1_{0}_0 x2_{0}_0 x3_{0}_0 tau_{0}_0] flow_4)))
(and (= mode1_{0} false) (= mode2_{0} true) (= mode3_{0} true)
(= [x1_{0}_t x2_{0}_t x3_{0}_t tau_{0}_t]
(integral 0. time_{0} [x1_{0}_0 x2_{0}_0 x3_{0}_0 tau_{0}_0] flow_5)))
(and (= mode1_{0} false) (= mode2_{0} true) (= mode3_{0} false)
(= [x1_{0}_t x2_{0}_t x3_{0}_t tau_{0}_t]
(integral 0. time_{0} [x1_{0}_0 x2_{0}_0 x3_{0}_0 tau_{0}_0] flow_6)))
(and (= mode1_{0} false) (= mode2_{0} false) (= mode3_{0} true)
(= [x1_{0}_t x2_{0}_t x3_{0}_t tau_{0}_t]
(integral 0. time_{0} [x1_{0}_0 x2_{0}_0 x3_{0}_0 tau_{0}_0] flow_7)))
(and (= mode1_{0} false) (= mode2_{0} false) (= mode3_{0} false)
(= [x1_{0}_t x2_{0}_t x3_{0}_t tau_{0}_t]
(integral 0. time_{0} [x1_{0}_0 x2_{0}_0 x3_{0}_0 tau_{0}_0] flow_8)))))"""]
jump_cond[0] = ["""
(assert (and (= tau_{0}_t 1) (= tau_{1}_0 0)))
(assert (and (= x1_{1}_0 x1_{0}_t)))
(assert (or (and (< x1_{0}_t 5) (= mode1_{1} true))
(and (>= x1_{0}_t 5) (= mode1_{1} false))))
(assert (and (= x2_{1}_0 x2_{0}_t)))
(assert (or (and (< x2_{0}_t 5) (= mode2_{1} true))
(and (>= x2_{0}_t 5) (= mode2_{1} false))))
(assert (and (= x3_{1}_0 x3_{0}_t)))
(assert (or (and (< x3_{0}_t 5) (= mode3_{1} true))
(and (>= x3_{0}_t 5) (= mode3_{1} false))))"""]
#############
# Init/Goal #
#############
init_cond = """
(assert (= tau_{0}_0 0))
(assert (= mode1_{0} true))
(assert (and (>= x1_{0}_0 (- 5 0.1)) (<= x1_{0}_0 (+ 5 0.1))))
(assert (= mode2_{0} true))
(assert (and (>= x2_{0}_0 (- 5 0.1)) (<= x2_{0}_0 (+ 5 0.1))))
(assert (= mode3_{0} true))
(assert (and (>= x3_{0}_0 (- 5 0.1)) (<= x3_{0}_0 (+ 5 0.1))))
"""
goal_cond = """
(assert (or (< x1_{0}_t (- 5 0.1)) (> x1_{0}_t (+ 5 0.1))))
(assert (or (< x2_{0}_t (- 5 0.1)) (> x2_{0}_t (+ 5 0.1))))
(assert (or (< x3_{0}_t (- 5 0.1)) (> x3_{0}_t (+ 5 0.1))))
"""
import sys
try:
bound = int(sys.argv[1])
except:
print("Usage:", sys.argv[0], "<Bound>")
else:
generate(bound, 1, [0], 0, init_cond, goal_cond)
| gpl-2.0 |
dalejung/edamame | edamame/tools/follow.py | 1 | 9062 | import inspect
import gc
import sys
import os.path
import difflib
from collections import OrderedDict
import pandas as pd
from pandas.core.common import in_ipnb
def is_property(code):
"""
Using some CPython gc magics, check if a code object is a property
gc idea taken from trace.py from stdlib
"""
## use of gc.get_referrers() was suggested by Michael Hudson
# all functions which refer to this code object
gc.collect()
code_refs = gc.get_referrers(code)
funcs = [f for f in code_refs
if inspect.isfunction(f)]
if len(funcs) != 1:
return False
# property object will reference the original func
props = [p for p in gc.get_referrers(funcs[0])
if isinstance(p, property)]
return len(props) == 1
def is_class_dict(dct):
if not isinstance(dct, dict):
return False
if '__dict__' not in dct or not inspect.isgetsetdescriptor(dct['__dict__']):
return False
return True
def get_parent(code):
funcs = [f for f in gc.get_referrers(code)
if inspect.isfunction(f)]
if len(funcs) != 1:
return None
refs = [f for f in gc.get_referrers(funcs[0])]
for ref in refs:
# assume if that if a dict is pointed to by a class,
# that dict is the __dict__
if isinstance(ref, dict):
parents = [p for p in gc.get_referrers(ref) if isinstance(p, type)]
if len(parents) == 1:
return parents[0].__name__
if inspect.ismethod(ref):
return ref.__qualname__.rsplit('.', 1)[0]
return None
class Follow(object):
"""
Follows execution path.
Meant as a quick way to see what a function does.
In [2]: with Follow() as f:
...: df.sum()
...:
In [3]: f.pprint(depth=2)
stat_func generic.py:3542
_reduce frame.py:3995
_get_axis_number generic.py:285
_get_agg_axis frame.py:4128
as_matrix generic.py:1938
"""
def __init__(self, depth=1, silent=False, parent=False):
self.depth = depth
self.silent = silent
self.timings = []
self.frame_cache = {}
self._caller_cache = {}
self.parent = parent
self.stack_depth_cache = {}
def current_depth(self, frame):
current_depth = None
i = 0
f = frame.f_back
while f:
i += 1
parent_depth = self.stack_depth_cache.get(id(f), None)
if parent_depth is not None:
current_depth = i + parent_depth
break
# if we're already past depth, don't bother finding real depth
if i > self.depth:
return None
f = f.f_back
# should always at least get back to base parent
return current_depth
def trace_dispatch(self, frame, event, arg):
if len(self.stack_depth_cache) == 0:
# __enter__ is the intial frame
self.stack_depth_cache[id(frame.f_back)] = 0
# the lower parts get heavy. don't do anything or frames deeper
# than depth
current_depth = self.current_depth(frame)
if current_depth is None:
return
if current_depth > self.depth:
return
if event not in ['call', 'c_call']:
return
# skip built in funcs
if inspect.isbuiltin(arg):
return
# skip properties, we're only really interested in function calls
# this will unfortunently skip any important logic that is wrapped
# in property logic
code = frame.f_code
if is_property(code):
return
# note that get_parent is supa slow.
parent_name = None
if self.parent:
parent_name = get_parent(code)
indent, first_parent = self.indent_level(frame)
f = frame.f_back
if event == "c_call":
func_name = arg.__name__
fn = (indent, "", 0, func_name, id(frame),id(first_parent), None)
elif event == 'call':
fcode = frame.f_code
fn = (indent, fcode.co_filename, fcode.co_firstlineno,
fcode.co_name, id(frame), id(first_parent), parent_name)
self.timings.append(fn)
def indent_level(self, frame):
i = 0
f = frame.f_back
first_parent = f
while f:
if id(f) in self.frame_cache:
i += 1
f = f.f_back
if i == 0:
# clear out the frame cache
self.frame_cache = {id(frame): True}
else:
self.frame_cache[id(frame)] = True
return i, first_parent
def to_frame(self):
data = self.timings
cols = ['indent', 'filename', 'lineno', 'func_name', 'frame_id',
'parent_id', 'parent_name']
df = pd.DataFrame(data, columns=cols)
df.loc[:, 'filename'] = df.filename.apply(lambda s: os.path.basename(s))
return df
def __enter__(self):
sys.setprofile(self.trace_dispatch)
return self
def __exit__(self, type, value, traceback):
sys.setprofile(None)
if not self.silent:
self.pprint(self.depth)
def file_module_function_of(self, frame):
code = frame.f_code
filename = code.co_filename
if filename:
modulename = modname(filename)
else:
modulename = None
funcname = code.co_name
clsname = None
if code in self._caller_cache:
if self._caller_cache[code] is not None:
clsname = self._caller_cache[code]
else:
self._caller_cache[code] = None
## use of gc.get_referrers() was suggested by Michael Hudson
# all functions which refer to this code object
funcs = [f for f in gc.get_referrers(code)
if inspect.isfunction(f)]
# require len(func) == 1 to avoid ambiguity caused by calls to
# new.function(): "In the face of ambiguity, refuse the
# temptation to guess."
if len(funcs) == 1:
dicts = [d for d in gc.get_referrers(funcs[0])
if isinstance(d, dict)]
if len(dicts) == 1:
classes = [c for c in gc.get_referrers(dicts[0])
if hasattr(c, "__bases__")]
if len(classes) == 1:
# ditto for new.classobj()
clsname = classes[0].__name__
# cache the result - assumption is that new.* is
# not called later to disturb this relationship
# _caller_cache could be flushed if functions in
# the new module get called.
self._caller_cache[code] = clsname
if clsname is not None:
funcname = "%s.%s" % (clsname, funcname)
return filename, modulename, funcname
def gen_output(self, depth=None):
df = self.to_frame()
mask = df.filename == ''
mask = mask | df.func_name.isin(['<lambda>', '<genexpr>'])
mask = mask | df.func_name.str.startswith('__')
if depth:
mask = mask | (df.indent > depth)
MSG_FORMAT = "{indent}{func_name}{class_name} <{filename}:{lineno}>"
df = df.loc[~mask]
def format(row):
indent = row[0]
filename = row[1]
lineno = row[2]
func_name = row[3]
class_name = row[6] or ''
if class_name:
class_name = '::{class_name}'.format(class_name=class_name)
msg = MSG_FORMAT.format(indent=" "*indent*4, func_name=func_name,
filename=filename, lineno=lineno,
class_name=class_name)
return msg
df = df.reset_index(drop=True)
output = df.apply(format, axis=1, raw=True)
return output.tolist()
def pprint(self, depth=None):
output = self.gen_output(depth=depth)
print(("-" * 40))
print(("Follow Path (depth {depth}):".format(depth=depth)))
print(("-" * 40))
print(("\n".join(output)))
def diff(self, right, depth):
if in_ipnb():
return self._html_diff(right=right, depth=depth)
else:
return self._text_diff(right=right, depth=depth)
def _text_diff(self, right, depth):
output = self.gen_output(depth)
output2 = right.gen_output(depth)
htmldiff = difflib.HtmlDiff()
return '\n'.join(difflib.ndiff(output, output2))
def _html_diff(self, right, depth):
from IPython.core.display import HTML
output = self.gen_output(depth)
output2 = right.gen_output(depth)
htmldiff = difflib.HtmlDiff()
diff = htmldiff.make_table(output, output2)
return HTML(diff)
| mit |
JavML/django | tests/order_with_respect_to/tests.py | 137 | 4286 | from __future__ import unicode_literals
from operator import attrgetter
from django.db import models
from django.test import TestCase
from .models import Answer, Post, Question
class OrderWithRespectToTests(TestCase):
# Hook to allow subclasses to run these tests with alternate models.
Answer = Answer
Question = Question
@classmethod
def setUpTestData(cls):
cls.q1 = cls.Question.objects.create(text="Which Beatle starts with the letter 'R'?")
cls.Answer.objects.create(text="John", question=cls.q1)
cls.Answer.objects.create(text="Paul", question=cls.q1)
cls.Answer.objects.create(text="George", question=cls.q1)
cls.Answer.objects.create(text="Ringo", question=cls.q1)
def test_default_to_insertion_order(self):
# Answers will always be ordered in the order they were inserted.
self.assertQuerysetEqual(
self.q1.answer_set.all(), [
"John", "Paul", "George", "Ringo",
],
attrgetter("text"),
)
def test_previous_and_next_in_order(self):
# We can retrieve the answers related to a particular object, in the
# order they were created, once we have a particular object.
a1 = self.q1.answer_set.all()[0]
self.assertEqual(a1.text, "John")
self.assertEqual(a1.get_next_in_order().text, "Paul")
a2 = list(self.q1.answer_set.all())[-1]
self.assertEqual(a2.text, "Ringo")
self.assertEqual(a2.get_previous_in_order().text, "George")
def test_item_ordering(self):
# We can retrieve the ordering of the queryset from a particular item.
a1 = self.q1.answer_set.all()[1]
id_list = [o.pk for o in self.q1.answer_set.all()]
self.assertSequenceEqual(a1.question.get_answer_order(), id_list)
# It doesn't matter which answer we use to check the order, it will
# always be the same.
a2 = self.Answer.objects.create(text="Number five", question=self.q1)
self.assertListEqual(
list(a1.question.get_answer_order()), list(a2.question.get_answer_order())
)
def test_change_ordering(self):
# The ordering can be altered
a = self.Answer.objects.create(text="Number five", question=self.q1)
# Swap the last two items in the order list
id_list = [o.pk for o in self.q1.answer_set.all()]
x = id_list.pop()
id_list.insert(-1, x)
# By default, the ordering is different from the swapped version
self.assertNotEqual(list(a.question.get_answer_order()), id_list)
# Change the ordering to the swapped version -
# this changes the ordering of the queryset.
a.question.set_answer_order(id_list)
self.assertQuerysetEqual(
self.q1.answer_set.all(), [
"John", "Paul", "George", "Number five", "Ringo"
],
attrgetter("text")
)
class OrderWithRespectToTests2(TestCase):
# Provide the Post model as a class attribute so that we can subclass this
# test case in contenttypes_tests.test_order_with_respect_to and run these
# tests with alternative implementations of Post.
Post = Post
def test_recursive_ordering(self):
p1 = self.Post.objects.create(title="1")
p2 = self.Post.objects.create(title="2")
p1_1 = self.Post.objects.create(title="1.1", parent=p1)
p1_2 = self.Post.objects.create(title="1.2", parent=p1)
self.Post.objects.create(title="2.1", parent=p2)
p1_3 = self.Post.objects.create(title="1.3", parent=p1)
self.assertSequenceEqual(p1.get_post_order(), [p1_1.pk, p1_2.pk, p1_3.pk])
def test_duplicate_order_field(self):
class Bar(models.Model):
class Meta:
app_label = 'order_with_respect_to'
class Foo(models.Model):
bar = models.ForeignKey(Bar, models.CASCADE)
order = models.OrderWrt()
class Meta:
order_with_respect_to = 'bar'
app_label = 'order_with_respect_to'
count = 0
for field in Foo._meta.local_fields:
if isinstance(field, models.OrderWrt):
count += 1
self.assertEqual(count, 1)
| bsd-3-clause |
Nuevosmedios/django-badger | badger/migrations/0012_auto__add_field_badge_assignment_badge.py | 1 | 10979 | # -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'Badge.assignment_badge'
db.add_column(u'badger_badge', 'assignment_badge',
self.gf('django.db.models.fields.NullBooleanField')(default=False, null=True, blank=True),
keep_default=False)
def backwards(self, orm):
# Deleting field 'Badge.assignment_badge'
db.delete_column(u'badger_badge', 'assignment_badge')
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Group']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Permission']"}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'badger.award': {
'Meta': {'ordering': "['-modified', '-created']", 'object_name': 'Award'},
'badge': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['badger.Badge']"}),
'claim_code': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '32', 'db_index': 'True', 'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'creator': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'award_creator'", 'null': 'True', 'to': u"orm['auth.User']"}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'hidden': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.files.ImageField', [], {'max_length': '256', 'null': 'True', 'blank': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'award_user'", 'to': u"orm['auth.User']"})
},
u'badger.badge': {
'Meta': {'ordering': "['-modified', '-created']", 'unique_together': "(('title', 'slug'),)", 'object_name': 'Badge'},
'assignment_badge': ('django.db.models.fields.NullBooleanField', [], {'default': 'False', 'null': 'True', 'blank': 'True'}),
'badgeType': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '3', 'null': 'True', 'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'creator': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']", 'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'group_badge': ('django.db.models.fields.NullBooleanField', [], {'default': 'False', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.files.ImageField', [], {'max_length': '256', 'null': 'True', 'blank': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'nominations_accepted': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'nominations_autoapproved': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'prerequisites': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': u"orm['badger.Badge']", 'null': 'True', 'blank': 'True'}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '50'}),
'title': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
'unique': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'weight': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'})
},
u'badger.deferredaward': {
'Meta': {'ordering': "['-modified', '-created']", 'object_name': 'DeferredAward'},
'badge': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['badger.Badge']"}),
'claim_code': ('django.db.models.fields.CharField', [], {'default': "'t9vj7h'", 'unique': 'True', 'max_length': '32', 'db_index': 'True'}),
'claim_group': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '32', 'null': 'True', 'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'creator': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']", 'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'email': ('django.db.models.fields.EmailField', [], {'db_index': 'True', 'max_length': '75', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'reusable': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
u'badger.nomination': {
'Meta': {'object_name': 'Nomination'},
'accepted': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'approver': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'nomination_approver'", 'null': 'True', 'to': u"orm['auth.User']"}),
'award': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['badger.Award']", 'null': 'True', 'blank': 'True'}),
'badge': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['badger.Badge']"}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'creator': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'nomination_creator'", 'null': 'True', 'to': u"orm['auth.User']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'nominee': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'nomination_nominee'", 'to': u"orm['auth.User']"}),
'rejected_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'nomination_rejected_by'", 'null': 'True', 'to': u"orm['auth.User']"}),
'rejected_reason': ('django.db.models.fields.TextField', [], {'blank': 'True'})
},
u'badger.progress': {
'Meta': {'unique_together': "(('badge', 'user'),)", 'object_name': 'Progress'},
'badge': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['badger.Badge']"}),
'counter': ('django.db.models.fields.FloatField', [], {'default': '0', 'null': 'True', 'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'modified': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'notes': ('badger.models.JSONField', [], {'null': 'True', 'blank': 'True'}),
'percent': ('django.db.models.fields.FloatField', [], {'default': '0'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'progress_user'", 'to': u"orm['auth.User']"})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
}
}
complete_apps = ['badger'] | bsd-3-clause |
eneldoserrata/marcos_openerp | addons/report_intrastat/__init__.py | 64 | 1094 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import report_intrastat
import report
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
argriffing/arb | doc/source/conf.py | 1 | 8167 | # -*- coding: utf-8 -*-
#
# Arb documentation build configuration file, created by
# sphinx-quickstart on Thu Oct 11 09:33:44 2012.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys, os
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration -----------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.mathjax']
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Arb'
copyright = u'2012-2016, Fredrik Johansson'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
for _line in open("../../arb/version.c").readlines():
if _line.startswith("const char * arb_version"):
_i1 = _line.find('"')
_i2 = _line.find('"', _i1 + 1)
version = _line[_i1+1:_i2]
release = version
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all documents.
default_role = 'math'
latex_preamble = r"""
\usepackage{amsmath,amssymb}
\usepackage{breakurl}
\setcounter{tocdepth}{2}
"""
primary_domain = 'c'
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# -- Options for HTML output ---------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
#html_theme = 'pyramid'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
html_theme_options = {'sidebarwidth' : 300}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
html_logo = "_static/arbwhite.png"
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
html_favicon = "_static/arb.ico"
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'Arbdoc'
# -- Options for LaTeX output --------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
'papersize': 'a4paper',
'fontpkg': '',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
'preamble': '\\usepackage{lmodern}\n\\setcounter{tocdepth}{2}\n\\urlstyle{tt}',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, documentclass [howto/manual]).
latex_documents = [
('index', 'Arb.tex', u'Arb Documentation',
u'Fredrik Johansson', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
latex_logo = "_static/arbtext.pdf"
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output --------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'arb', u'Arb Documentation',
[u'Fredrik Johansson'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output ------------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'Arb', u'Arb Documentation',
u'Fredrik Johansson', 'Arb', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
| lgpl-2.1 |
frdb194/django | tests/template_backends/test_dummy.py | 306 | 3603 | # coding: utf-8
from __future__ import unicode_literals
from django.forms import CharField, Form, Media
from django.http import HttpRequest
from django.middleware.csrf import CsrfViewMiddleware, get_token
from django.template import TemplateDoesNotExist, TemplateSyntaxError
from django.template.backends.dummy import TemplateStrings
from django.test import SimpleTestCase
class TemplateStringsTests(SimpleTestCase):
engine_class = TemplateStrings
backend_name = 'dummy'
options = {}
@classmethod
def setUpClass(cls):
super(TemplateStringsTests, cls).setUpClass()
params = {
'DIRS': [],
'APP_DIRS': True,
'NAME': cls.backend_name,
'OPTIONS': cls.options,
}
cls.engine = cls.engine_class(params)
def test_from_string(self):
template = self.engine.from_string("Hello!\n")
content = template.render()
self.assertEqual(content, "Hello!\n")
def test_get_template(self):
template = self.engine.get_template('template_backends/hello.html')
content = template.render({'name': 'world'})
self.assertEqual(content, "Hello world!\n")
def test_get_template_non_existing(self):
with self.assertRaises(TemplateDoesNotExist) as e:
self.engine.get_template('template_backends/non_existing.html')
self.assertEqual(e.exception.backend, self.engine)
def test_get_template_syntax_error(self):
# There's no way to trigger a syntax error with the dummy backend.
# The test still lives here to factor it between other backends.
if self.backend_name == 'dummy':
self.skipTest("test doesn't apply to dummy backend")
with self.assertRaises(TemplateSyntaxError):
self.engine.get_template('template_backends/syntax_error.html')
def test_html_escaping(self):
template = self.engine.get_template('template_backends/hello.html')
context = {'name': '<script>alert("XSS!");</script>'}
content = template.render(context)
self.assertIn('<script>', content)
self.assertNotIn('<script>', content)
def test_django_html_escaping(self):
if self.backend_name == 'dummy':
self.skipTest("test doesn't apply to dummy backend")
class TestForm(Form):
test_field = CharField()
media = Media(js=['my-script.js'])
form = TestForm()
template = self.engine.get_template('template_backends/django_escaping.html')
content = template.render({'media': media, 'test_form': form})
expected = '{}\n\n{}\n\n{}'.format(media, form, form['test_field'])
self.assertHTMLEqual(content, expected)
def test_csrf_token(self):
request = HttpRequest()
CsrfViewMiddleware().process_view(request, lambda r: None, (), {})
template = self.engine.get_template('template_backends/csrf.html')
content = template.render(request=request)
expected = (
'<input type="hidden" name="csrfmiddlewaretoken" '
'value="{}" />'.format(get_token(request)))
self.assertHTMLEqual(content, expected)
def test_no_directory_traversal(self):
with self.assertRaises(TemplateDoesNotExist):
self.engine.get_template('../forbidden/template_backends/hello.html')
def test_non_ascii_characters(self):
template = self.engine.get_template('template_backends/hello.html')
content = template.render({'name': 'Jérôme'})
self.assertEqual(content, "Hello Jérôme!\n")
| bsd-3-clause |
robbiet480/home-assistant | homeassistant/components/zha/climate.py | 3 | 20591 | """
Climate on Zigbee Home Automation networks.
For more details on this platform, please refer to the documentation
at https://home-assistant.io/components/zha.climate/
"""
from datetime import datetime, timedelta
import enum
import functools
import logging
from random import randint
from typing import List, Optional, Tuple
from homeassistant.components.climate import ClimateEntity
from homeassistant.components.climate.const import (
ATTR_HVAC_MODE,
ATTR_TARGET_TEMP_HIGH,
ATTR_TARGET_TEMP_LOW,
CURRENT_HVAC_COOL,
CURRENT_HVAC_FAN,
CURRENT_HVAC_HEAT,
CURRENT_HVAC_IDLE,
CURRENT_HVAC_OFF,
DOMAIN,
FAN_AUTO,
FAN_ON,
HVAC_MODE_COOL,
HVAC_MODE_DRY,
HVAC_MODE_FAN_ONLY,
HVAC_MODE_HEAT,
HVAC_MODE_HEAT_COOL,
HVAC_MODE_OFF,
PRESET_AWAY,
PRESET_NONE,
SUPPORT_FAN_MODE,
SUPPORT_PRESET_MODE,
SUPPORT_TARGET_TEMPERATURE,
SUPPORT_TARGET_TEMPERATURE_RANGE,
)
from homeassistant.const import ATTR_TEMPERATURE, PRECISION_HALVES, TEMP_CELSIUS
from homeassistant.core import callback
from homeassistant.helpers.dispatcher import async_dispatcher_connect
from homeassistant.helpers.event import async_track_time_interval
import homeassistant.util.dt as dt_util
from .core import discovery
from .core.const import (
CHANNEL_FAN,
CHANNEL_THERMOSTAT,
DATA_ZHA,
DATA_ZHA_DISPATCHERS,
SIGNAL_ADD_ENTITIES,
SIGNAL_ATTR_UPDATED,
)
from .core.registries import ZHA_ENTITIES
from .entity import ZhaEntity
DEPENDENCIES = ["zha"]
ATTR_SYS_MODE = "system_mode"
ATTR_RUNNING_MODE = "running_mode"
ATTR_SETPT_CHANGE_SRC = "setpoint_change_source"
ATTR_SETPT_CHANGE_AMT = "setpoint_change_amount"
ATTR_OCCUPANCY = "occupancy"
ATTR_PI_COOLING_DEMAND = "pi_cooling_demand"
ATTR_PI_HEATING_DEMAND = "pi_heating_demand"
ATTR_OCCP_COOL_SETPT = "occupied_cooling_setpoint"
ATTR_OCCP_HEAT_SETPT = "occupied_heating_setpoint"
ATTR_UNOCCP_HEAT_SETPT = "unoccupied_heating_setpoint"
ATTR_UNOCCP_COOL_SETPT = "unoccupied_cooling_setpoint"
STRICT_MATCH = functools.partial(ZHA_ENTITIES.strict_match, DOMAIN)
RUNNING_MODE = {0x00: HVAC_MODE_OFF, 0x03: HVAC_MODE_COOL, 0x04: HVAC_MODE_HEAT}
class ThermostatFanMode(enum.IntEnum):
"""Fan channel enum for thermostat Fans."""
OFF = 0x00
ON = 0x04
AUTO = 0x05
class RunningState(enum.IntFlag):
"""ZCL Running state enum."""
HEAT = 0x0001
COOL = 0x0002
FAN = 0x0004
HEAT_STAGE_2 = 0x0008
COOL_STAGE_2 = 0x0010
FAN_STAGE_2 = 0x0020
FAN_STAGE_3 = 0x0040
SEQ_OF_OPERATION = {
0x00: (HVAC_MODE_OFF, HVAC_MODE_COOL), # cooling only
0x01: (HVAC_MODE_OFF, HVAC_MODE_COOL), # cooling with reheat
0x02: (HVAC_MODE_OFF, HVAC_MODE_HEAT), # heating only
0x03: (HVAC_MODE_OFF, HVAC_MODE_HEAT), # heating with reheat
# cooling and heating 4-pipes
0x04: (HVAC_MODE_OFF, HVAC_MODE_HEAT_COOL, HVAC_MODE_COOL, HVAC_MODE_HEAT),
# cooling and heating 4-pipes
0x05: (HVAC_MODE_OFF, HVAC_MODE_HEAT_COOL, HVAC_MODE_COOL, HVAC_MODE_HEAT),
0x06: (HVAC_MODE_COOL, HVAC_MODE_HEAT, HVAC_MODE_OFF), # centralite specific
0x07: (HVAC_MODE_HEAT_COOL, HVAC_MODE_OFF), # centralite specific
}
class SystemMode(enum.IntEnum):
"""ZCL System Mode attribute enum."""
OFF = 0x00
HEAT_COOL = 0x01
COOL = 0x03
HEAT = 0x04
AUX_HEAT = 0x05
PRE_COOL = 0x06
FAN_ONLY = 0x07
DRY = 0x08
SLEEP = 0x09
HVAC_MODE_2_SYSTEM = {
HVAC_MODE_OFF: SystemMode.OFF,
HVAC_MODE_HEAT_COOL: SystemMode.HEAT_COOL,
HVAC_MODE_COOL: SystemMode.COOL,
HVAC_MODE_HEAT: SystemMode.HEAT,
HVAC_MODE_FAN_ONLY: SystemMode.FAN_ONLY,
HVAC_MODE_DRY: SystemMode.DRY,
}
SYSTEM_MODE_2_HVAC = {
SystemMode.OFF: HVAC_MODE_OFF,
SystemMode.HEAT_COOL: HVAC_MODE_HEAT_COOL,
SystemMode.COOL: HVAC_MODE_COOL,
SystemMode.HEAT: HVAC_MODE_HEAT,
SystemMode.AUX_HEAT: HVAC_MODE_HEAT,
SystemMode.PRE_COOL: HVAC_MODE_COOL, # this is 'precooling'. is it the same?
SystemMode.FAN_ONLY: HVAC_MODE_FAN_ONLY,
SystemMode.DRY: HVAC_MODE_DRY,
SystemMode.SLEEP: HVAC_MODE_OFF,
}
ZCL_TEMP = 100
_LOGGER = logging.getLogger(__name__)
async def async_setup_entry(hass, config_entry, async_add_entities):
"""Set up the Zigbee Home Automation sensor from config entry."""
entities_to_create = hass.data[DATA_ZHA][DOMAIN]
unsub = async_dispatcher_connect(
hass,
SIGNAL_ADD_ENTITIES,
functools.partial(
discovery.async_add_entities, async_add_entities, entities_to_create
),
)
hass.data[DATA_ZHA][DATA_ZHA_DISPATCHERS].append(unsub)
@STRICT_MATCH(channel_names=CHANNEL_THERMOSTAT, aux_channels=CHANNEL_FAN)
class Thermostat(ZhaEntity, ClimateEntity):
"""Representation of a ZHA Thermostat device."""
DEFAULT_MAX_TEMP = 35
DEFAULT_MIN_TEMP = 7
_domain = DOMAIN
value_attribute = 0x0000
def __init__(self, unique_id, zha_device, channels, **kwargs):
"""Initialize ZHA Thermostat instance."""
super().__init__(unique_id, zha_device, channels, **kwargs)
self._thrm = self.cluster_channels.get(CHANNEL_THERMOSTAT)
self._preset = PRESET_NONE
self._presets = []
self._supported_flags = SUPPORT_TARGET_TEMPERATURE
self._fan = self.cluster_channels.get(CHANNEL_FAN)
@property
def current_temperature(self):
"""Return the current temperature."""
if self._thrm.local_temp is None:
return None
return self._thrm.local_temp / ZCL_TEMP
@property
def device_state_attributes(self):
"""Return device specific state attributes."""
data = {}
if self.hvac_mode:
mode = SYSTEM_MODE_2_HVAC.get(self._thrm.system_mode, "unknown")
data[ATTR_SYS_MODE] = f"[{self._thrm.system_mode}]/{mode}"
if self._thrm.occupancy is not None:
data[ATTR_OCCUPANCY] = self._thrm.occupancy
if self._thrm.occupied_cooling_setpoint is not None:
data[ATTR_OCCP_COOL_SETPT] = self._thrm.occupied_cooling_setpoint
if self._thrm.occupied_heating_setpoint is not None:
data[ATTR_OCCP_HEAT_SETPT] = self._thrm.occupied_heating_setpoint
if self._thrm.pi_heating_demand is not None:
data[ATTR_PI_HEATING_DEMAND] = self._thrm.pi_heating_demand
if self._thrm.pi_cooling_demand is not None:
data[ATTR_PI_COOLING_DEMAND] = self._thrm.pi_cooling_demand
unoccupied_cooling_setpoint = self._thrm.unoccupied_cooling_setpoint
if unoccupied_cooling_setpoint is not None:
data[ATTR_UNOCCP_HEAT_SETPT] = unoccupied_cooling_setpoint
unoccupied_heating_setpoint = self._thrm.unoccupied_heating_setpoint
if unoccupied_heating_setpoint is not None:
data[ATTR_UNOCCP_COOL_SETPT] = unoccupied_heating_setpoint
return data
@property
def fan_mode(self) -> Optional[str]:
"""Return current FAN mode."""
if self._thrm.running_state is None:
return FAN_AUTO
if self._thrm.running_state & (
RunningState.FAN | RunningState.FAN_STAGE_2 | RunningState.FAN_STAGE_3
):
return FAN_ON
return FAN_AUTO
@property
def fan_modes(self) -> Optional[List[str]]:
"""Return supported FAN modes."""
if not self._fan:
return None
return [FAN_AUTO, FAN_ON]
@property
def hvac_action(self) -> Optional[str]:
"""Return the current HVAC action."""
if (
self._thrm.pi_heating_demand is None
and self._thrm.pi_cooling_demand is None
):
return self._rm_rs_action
return self._pi_demand_action
@property
def _rm_rs_action(self) -> Optional[str]:
"""Return the current HVAC action based on running mode and running state."""
running_mode = self._thrm.running_mode
if running_mode == SystemMode.HEAT:
return CURRENT_HVAC_HEAT
if running_mode == SystemMode.COOL:
return CURRENT_HVAC_COOL
running_state = self._thrm.running_state
if running_state and running_state & (
RunningState.FAN | RunningState.FAN_STAGE_2 | RunningState.FAN_STAGE_3
):
return CURRENT_HVAC_FAN
if self.hvac_mode != HVAC_MODE_OFF and running_mode == SystemMode.OFF:
return CURRENT_HVAC_IDLE
return CURRENT_HVAC_OFF
@property
def _pi_demand_action(self) -> Optional[str]:
"""Return the current HVAC action based on pi_demands."""
heating_demand = self._thrm.pi_heating_demand
if heating_demand is not None and heating_demand > 0:
return CURRENT_HVAC_HEAT
cooling_demand = self._thrm.pi_cooling_demand
if cooling_demand is not None and cooling_demand > 0:
return CURRENT_HVAC_COOL
if self.hvac_mode != HVAC_MODE_OFF:
return CURRENT_HVAC_IDLE
return CURRENT_HVAC_OFF
@property
def hvac_mode(self) -> Optional[str]:
"""Return HVAC operation mode."""
return SYSTEM_MODE_2_HVAC.get(self._thrm.system_mode)
@property
def hvac_modes(self) -> Tuple[str, ...]:
"""Return the list of available HVAC operation modes."""
return SEQ_OF_OPERATION.get(self._thrm.ctrl_seqe_of_oper, (HVAC_MODE_OFF,))
@property
def precision(self):
"""Return the precision of the system."""
return PRECISION_HALVES
@property
def preset_mode(self) -> Optional[str]:
"""Return current preset mode."""
return self._preset
@property
def preset_modes(self) -> Optional[List[str]]:
"""Return supported preset modes."""
return self._presets
@property
def supported_features(self):
"""Return the list of supported features."""
features = self._supported_flags
if HVAC_MODE_HEAT_COOL in self.hvac_modes:
features |= SUPPORT_TARGET_TEMPERATURE_RANGE
if self._fan is not None:
self._supported_flags |= SUPPORT_FAN_MODE
return features
@property
def target_temperature(self):
"""Return the temperature we try to reach."""
temp = None
if self.hvac_mode == HVAC_MODE_COOL:
if self.preset_mode == PRESET_AWAY:
temp = self._thrm.unoccupied_cooling_setpoint
else:
temp = self._thrm.occupied_cooling_setpoint
elif self.hvac_mode == HVAC_MODE_HEAT:
if self.preset_mode == PRESET_AWAY:
temp = self._thrm.unoccupied_heating_setpoint
else:
temp = self._thrm.occupied_heating_setpoint
if temp is None:
return temp
return round(temp / ZCL_TEMP, 1)
@property
def target_temperature_high(self):
"""Return the upper bound temperature we try to reach."""
if self.hvac_mode != HVAC_MODE_HEAT_COOL:
return None
if self.preset_mode == PRESET_AWAY:
temp = self._thrm.unoccupied_cooling_setpoint
else:
temp = self._thrm.occupied_cooling_setpoint
if temp is None:
return temp
return round(temp / ZCL_TEMP, 1)
@property
def target_temperature_low(self):
"""Return the lower bound temperature we try to reach."""
if self.hvac_mode != HVAC_MODE_HEAT_COOL:
return None
if self.preset_mode == PRESET_AWAY:
temp = self._thrm.unoccupied_heating_setpoint
else:
temp = self._thrm.occupied_heating_setpoint
if temp is None:
return temp
return round(temp / ZCL_TEMP, 1)
@property
def temperature_unit(self):
"""Return the unit of measurement used by the platform."""
return TEMP_CELSIUS
@property
def max_temp(self) -> float:
"""Return the maximum temperature."""
temps = []
if HVAC_MODE_HEAT in self.hvac_modes:
temps.append(self._thrm.max_heat_setpoint_limit)
if HVAC_MODE_COOL in self.hvac_modes:
temps.append(self._thrm.max_cool_setpoint_limit)
if not temps:
return self.DEFAULT_MAX_TEMP
return round(max(temps) / ZCL_TEMP, 1)
@property
def min_temp(self) -> float:
"""Return the minimum temperature."""
temps = []
if HVAC_MODE_HEAT in self.hvac_modes:
temps.append(self._thrm.min_heat_setpoint_limit)
if HVAC_MODE_COOL in self.hvac_modes:
temps.append(self._thrm.min_cool_setpoint_limit)
if not temps:
return self.DEFAULT_MIN_TEMP
return round(min(temps) / ZCL_TEMP, 1)
async def async_added_to_hass(self):
"""Run when about to be added to hass."""
await super().async_added_to_hass()
await self.async_accept_signal(
self._thrm, SIGNAL_ATTR_UPDATED, self.async_attribute_updated
)
async def async_attribute_updated(self, record):
"""Handle attribute update from device."""
if (
record.attr_name in (ATTR_OCCP_COOL_SETPT, ATTR_OCCP_HEAT_SETPT)
and self.preset_mode == PRESET_AWAY
):
# occupancy attribute is an unreportable attribute, but if we get
# an attribute update for an "occupied" setpoint, there's a chance
# occupancy has changed
occupancy = await self._thrm.get_occupancy()
if occupancy is True:
self._preset = PRESET_NONE
self.debug("Attribute '%s' = %s update", record.attr_name, record.value)
self.async_write_ha_state()
async def async_set_fan_mode(self, fan_mode: str) -> None:
"""Set fan mode."""
if fan_mode not in self.fan_modes:
self.warning("Unsupported '%s' fan mode", fan_mode)
return
if fan_mode == FAN_ON:
mode = ThermostatFanMode.ON
else:
mode = ThermostatFanMode.AUTO
await self._fan.async_set_speed(mode)
async def async_set_hvac_mode(self, hvac_mode: str) -> None:
"""Set new target operation mode."""
if hvac_mode not in self.hvac_modes:
self.warning(
"can't set '%s' mode. Supported modes are: %s",
hvac_mode,
self.hvac_modes,
)
return
if await self._thrm.async_set_operation_mode(HVAC_MODE_2_SYSTEM[hvac_mode]):
self.async_write_ha_state()
async def async_set_preset_mode(self, preset_mode: str) -> None:
"""Set new preset mode."""
if preset_mode not in self.preset_modes:
self.debug("preset mode '%s' is not supported", preset_mode)
return
if self.preset_mode not in (preset_mode, PRESET_NONE):
if not await self.async_preset_handler(self.preset_mode, enable=False):
self.debug("Couldn't turn off '%s' preset", self.preset_mode)
return
if preset_mode != PRESET_NONE:
if not await self.async_preset_handler(preset_mode, enable=True):
self.debug("Couldn't turn on '%s' preset", preset_mode)
return
self._preset = preset_mode
self.async_write_ha_state()
async def async_set_temperature(self, **kwargs):
"""Set new target temperature."""
low_temp = kwargs.get(ATTR_TARGET_TEMP_LOW)
high_temp = kwargs.get(ATTR_TARGET_TEMP_HIGH)
temp = kwargs.get(ATTR_TEMPERATURE)
hvac_mode = kwargs.get(ATTR_HVAC_MODE)
if hvac_mode is not None:
await self.async_set_hvac_mode(hvac_mode)
thrm = self._thrm
if self.hvac_mode == HVAC_MODE_HEAT_COOL:
success = True
if low_temp is not None:
low_temp = int(low_temp * ZCL_TEMP)
success = success and await thrm.async_set_heating_setpoint(
low_temp, self.preset_mode == PRESET_AWAY
)
self.debug("Setting heating %s setpoint: %s", low_temp, success)
if high_temp is not None:
high_temp = int(high_temp * ZCL_TEMP)
success = success and await thrm.async_set_cooling_setpoint(
high_temp, self.preset_mode == PRESET_AWAY
)
self.debug("Setting cooling %s setpoint: %s", low_temp, success)
elif temp is not None:
temp = int(temp * ZCL_TEMP)
if self.hvac_mode == HVAC_MODE_COOL:
success = await thrm.async_set_cooling_setpoint(
temp, self.preset_mode == PRESET_AWAY
)
elif self.hvac_mode == HVAC_MODE_HEAT:
success = await thrm.async_set_heating_setpoint(
temp, self.preset_mode == PRESET_AWAY
)
else:
self.debug("Not setting temperature for '%s' mode", self.hvac_mode)
return
else:
self.debug("incorrect %s setting for '%s' mode", kwargs, self.hvac_mode)
return
if success:
self.async_write_ha_state()
async def async_preset_handler(self, preset: str, enable: bool = False) -> bool:
"""Set the preset mode via handler."""
handler = getattr(self, f"async_preset_handler_{preset}")
return await handler(enable)
@STRICT_MATCH(
channel_names={CHANNEL_THERMOSTAT, "sinope_manufacturer_specific"},
manufacturers="Sinope Technologies",
)
class SinopeTechnologiesThermostat(Thermostat):
"""Sinope Technologies Thermostat."""
manufacturer = 0x119C
update_time_interval = timedelta(minutes=randint(45, 75))
def __init__(self, unique_id, zha_device, channels, **kwargs):
"""Initialize ZHA Thermostat instance."""
super().__init__(unique_id, zha_device, channels, **kwargs)
self._presets = [PRESET_AWAY, PRESET_NONE]
self._supported_flags |= SUPPORT_PRESET_MODE
self._manufacturer_ch = self.cluster_channels["sinope_manufacturer_specific"]
@callback
def _async_update_time(self, timestamp=None) -> None:
"""Update thermostat's time display."""
secs_2k = (
dt_util.now().replace(tzinfo=None) - datetime(2000, 1, 1, 0, 0, 0, 0)
).total_seconds()
self.debug("Updating time: %s", secs_2k)
self._manufacturer_ch.cluster.create_catching_task(
self._manufacturer_ch.cluster.write_attributes(
{"secs_since_2k": secs_2k}, manufacturer=self.manufacturer
)
)
async def async_added_to_hass(self):
"""Run when about to be added to Hass."""
await super().async_added_to_hass()
async_track_time_interval(
self.hass, self._async_update_time, self.update_time_interval
)
self._async_update_time()
async def async_preset_handler_away(self, is_away: bool = False) -> bool:
"""Set occupancy."""
mfg_code = self._zha_device.manufacturer_code
res = await self._thrm.write_attributes(
{"set_occupancy": 0 if is_away else 1}, manufacturer=mfg_code
)
self.debug("set occupancy to %s. Status: %s", 0 if is_away else 1, res)
return res
@STRICT_MATCH(
channel_names=CHANNEL_THERMOSTAT,
aux_channels=CHANNEL_FAN,
manufacturers="Zen Within",
)
class ZenWithinThermostat(Thermostat):
"""Zen Within Thermostat implementation."""
@property
def _rm_rs_action(self) -> Optional[str]:
"""Return the current HVAC action based on running mode and running state."""
running_state = self._thrm.running_state
if running_state is None:
return None
if running_state & (RunningState.HEAT | RunningState.HEAT_STAGE_2):
return CURRENT_HVAC_HEAT
if running_state & (RunningState.COOL | RunningState.COOL_STAGE_2):
return CURRENT_HVAC_COOL
if running_state & (
RunningState.FAN | RunningState.FAN_STAGE_2 | RunningState.FAN_STAGE_3
):
return CURRENT_HVAC_FAN
if self.hvac_mode != HVAC_MODE_OFF:
return CURRENT_HVAC_IDLE
return CURRENT_HVAC_OFF
@STRICT_MATCH(
channel_names=CHANNEL_THERMOSTAT,
aux_channels=CHANNEL_FAN,
manufacturers="Centralite",
models="3157100",
)
class CentralitePearl(ZenWithinThermostat):
"""Centralite Pearl Thermostat implementation."""
| apache-2.0 |
compas-dev/compas | src/compas/numerical/ga/ga.py | 1 | 31945 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import re
import random
import json
import copy
__all__ = ['ga']
TPL = """
GA summary
==========
- fitness function name: {}
- fitnes function type : {}
- number of generations : {}
- number of individuals : {}
- number of variables : {}
- optimal individual : {}
- optimal fitness value : {}
"""
def ga(fit_function,
fit_type,
num_var,
boundaries,
num_gen=100,
num_pop=100,
num_elite=10,
mutation_probability=0.01,
n_cross=1,
num_bin_dig=None,
num_pop_init=None,
num_gen_init_pop=None,
start_from_gen=False,
min_fit=None,
fit_name=None,
fargs=None,
fkwargs=None,
output_path=None,
input_path=None,
print_refresh=1):
"""Genetic Algorithm optimisation.
Parameters
----------
fit_function : callable
The function used by the :class'GA' to determine the fitness value. The function
must have as a first argument a list of variables that determine the
fitness value. Other arguments and keyword arguments can be used to feed
the function relevant data.
fit_type : str
String that indicates if the fitness function is to be minimized or maximized.
"min" for minimization and "max" for maximization.
num_var : int
The number of variables used by the fitness function.
boundaries : list
The minimum and vaximum values each variable is allowed to have. Must be
a ``num_var`` long list of tuples in the form [(min, max),...].
num_gen : int, optional [100]
The maximum number of generations.
num_pop : int, optional [100]
The number of individuals in the population. Must be an even number.
num_elite : int, optional [10]
The number of individuals in the elite population. Must be an even number.
mutation_probablity : float, optional [0.001]
Float from 0 to 1. Percentage of genes that will be mutated.
n_cross: int, optional [1]
Number of crossover points used in the crossover operator.
num_bin_dig : list, optional [None]
Number of genes used to codify each variable. Must be a ``num_var`` long
list of intergers. If None is given, each variable will be coded with a
8 digit binary number, corresponding to 256 steps.
num_pop_init : int, optional [None]
The number of individuals in the population for the first ``num_gen_init_pop``
generations.
num_gen_init_pop : int, optional
The number of generations to keep a ``num_pop_init`` size population for.
start_from_get : int, optional [None]
The generation number to restart a previous optimization process.
min_fit : float, optional [None]
A target fitness value. If the GA finds a solution with a fitness value
equal or better than ``min_fit``, the optimization is stopped.
fit_name : str, optional [None]
The name of the optimisation. If None is given, the name of the fitness
function is used.
fargs : list, optional [None]
Arguments fo be fed to the fitness function.
fkwargs : dict, optional [None]
Keyword arguments to be fed to the fitness function.
output_path : str, optional [None]
Path for the optimization result files.
input_path : str, optional [None]
Path to the fitness function file.
print_refresh : int
Print current generation summary every ``print_refresh`` generations.
Returns
-------
ga_ : object
The resulting :class'GA' instance.
Notes
-----
For more info, see [1]_.
References
----------
.. [1] Holland, J. H., *Adaptation in Natural and Artificial Systems*, 1st edn,
The University of Michigan, Ann Arbor, 1975.
Examples
--------
>>>
"""
ga_ = GA()
ga_.fit_name = fit_name or fit_function.__name__
ga_.fit_type = fit_type
ga_.num_gen = num_gen
ga_.num_pop = num_pop
ga_.num_pop_init = num_pop_init
ga_.num_gen_init_pop = num_gen_init_pop
ga_.num_elite = num_elite
ga_.num_var = num_var
ga_.mutation_probability = mutation_probability
ga_.ncross = n_cross
ga_.start_from_gen = start_from_gen
ga_.min_fit = min_fit
ga_.boundaries = boundaries
ga_.num_bin_dig = num_bin_dig or [8] * num_var
ga_.max_bin_dig = max(ga_.num_bin_dig)
ga_.total_bin_dig = sum(ga_.num_bin_dig)
ga_.fargs = fargs or {}
ga_.fkwargs = fkwargs or {}
ga_.fit_function = fit_function
ga_.output_path = output_path or ''
ga_.input_path = input_path or ''
ga_.print_refresh = print_refresh
ga_.ga_optimize()
return ga_
class GA(object):
"""This class contains a binary coded, single objective genetic algorithm.
Attributes
----------
best_fit : float
The fitness value of the best performing solution for the current generation.
best_individual_index: int
The index of the best performing individual for the current generation.
boundaries : dict
This dictionary contains all the max and min bounds for each optimization variable.
``GA.boundaries[index] = [min,max]``.
current_pop : dict
This dictionary contains the coded, decoded and scaled population of the current
generation, as well as their fitness values.
elite_pop : dict
This dictionary contains the coded, decoded and scaled data for the elite
population of the current generation, as well as their fitness values.
end_gen : int
The maximum number of generations the GA is allowed to run.
fit_function: function
The fitness function.
fit_name : str
The name of the python file containing the fitness function (without extension).
fit_type : str
String that indicates if the fitness function is to be minimized or maximized.
"min" for minimization and "max" for maximization.
input_path: str
Path to the fitness function file.
fkwargs : dict
This dictionary will be passed as a keyword argument to all fitness functions.
It can be used to pass required data, objects, that are not related to the
optimmization variables but are required to run the fitness function.
max_bin_digit : int
The maximum number of binary digits that are used to code a variable values.
The number of binary digits assigned to code a variable determine the number
of discrete steps inside the variable bounds. For example, an 8 digit binary
number will produce 256 steps.
min_fit : float
An end condition related to fitness value. If it is set, the GA will stop
when any individual achieves a fitness equal or better that ``GA.min_fit``. If
it is not set, the GA will end after ``GA.num_gen`` generations.
min_fit_flag : bool
Flag the GA uses to determine if the ``GA.min_fit`` value has been achieved.
mutation_probability : float
Determines the probability that the mutation operator will mutate each gene.
For each gene a random number ``x`` between 0 and 1 is generated, if ``x``
is higher than ``GA.mutation_probability`` it will be mutated.
num_bin_dig : list
List of the number of binary digits for each variable. The number of binary
digits assigned to code a variable determine the number of discrete steps
inside the variable bounds. For example, an 8 digit binary number will
produce 256 steps.
num_elite : int
The number of top performing individuals in the population that are not subject
to genetic operators, but are simply copied to the next generation.
num_gen : int
The number of genertions the GA will run.
num_pop : int
The number of individuals per generation.
num_var : int
The number of variables in the optimization problem.
output_path : str
The path to which the GA outputs result files.
start_from_gen : int
The generation from which the GA will restart. If this number is given, the GA
will look for generation output files in the ``GA.input_path`` and if found,
the GA will resume optimization from the ``GA.start_from_gen`` generation.
total_bin_dig : int
The total number of binary digits. It is the sum of the ``GA.num_bin_dig`` of
all variables.
ind_fit_dict : dict
This dictionary keeps track of already evaluated solutions to avoid dupplicate
fitness function calls.
"""
def __init__(self):
""" Initializes the GA object."""
self.fkwargs = {}
self.fargs = {}
self.best_fit = None
self.best_individual_index = None
self.boundaries = {}
self.current_pop = {'binary': [], 'decoded': [], 'scaled': [], 'fit_value': []}
self.elite_pop = {'binary': [], 'decoded': [], 'scaled': [], 'fit_value': []}
self.end_gen = None
self.fit_function = None
self.fit_name = ''
self.fit_type = None
self.input_path = None
self.max_bin_dig = []
self.min_fit = None
self.min_fit_flag = False
self.mutation_probability = 0
self.n_cross = 1
self.num_bin_dig = 0
self.num_elite = 0
self.num_gen = 0
self.num_gen_init_pop = 1
self.num_pop = 0
self.num_pop_init = None
self.num_pop_temp = None
self.num_var = 0
self.output_path = []
self.start_from_gen = False
self.total_bin_dig = 0
self.check_diversity = False
self.ind_fit_dict = {}
self.print_refresh = 1
def __str__(self):
"""Compile a summary of the GA."""
fit_name = self.fit_name
fit_type = self.fit_type
num_gen = self.num_gen
num_pop = self.num_pop
num_var = self.num_var
best = self.best_individual_index, self.current_pop['scaled'][self.best_individual_index]
try:
fit = self.current_pop['fit_value'][self.best_individual_index]
except(Exception):
fit = None
return TPL.format(fit_name, fit_type, num_gen, num_pop, num_var, best, fit)
def summary(self):
"""Print a summary of the GA."""
print(self)
def ga_optimize(self):
""" This is the main optimization function, this function permorms the GA optimization,
performing all genetic operators.
"""
self.write_ga_json_file()
if self.num_pop_init:
self.num_pop_temp = copy.deepcopy(self.num_pop)
self.num_pop = self.num_pop_init
if self.start_from_gen:
self.current_pop = self.get_pop_from_pop_file(self.start_from_gen)
start_gen_number = self.start_from_gen + 1
else:
self.current_pop['binary'] = self.generate_random_bin_pop()
start_gen_number = 0
for generation in range(start_gen_number, self.num_gen):
self.current_pop['decoded'] = self.decode_binary_pop(self.current_pop['binary'])
self.current_pop['scaled'] = self.scale_population(self.current_pop['decoded'])
if generation == 0:
num = self.num_pop
self.current_pop['fit_value'] = [[]] * num
else:
num = self.num_pop - self.num_elite
for i in range(num):
self.current_pop['fit_value'][i] = self.evaluate_fitness(i)
if self.num_pop_init and generation >= self.num_gen_init_pop:
self.num_pop = self.num_pop_temp
self.current_pop = self.select_elite_pop(self.current_pop, num_elite=self.num_pop)
self.write_out_file(generation)
if self.min_fit:
self.update_min_fit_flag()
else:
self.get_best_fit()
if generation % self.print_refresh == 0:
print('generation ', generation, ' best fit ', self.best_fit, 'min fit', self.min_fit)
if self.check_diversity:
print('num repeated individuals', self.check_pop_diversity())
if generation < self.num_gen - 1 and self.min_fit_flag is False:
self.elite_pop = self.select_elite_pop(self.current_pop)
self.tournament_selection() # n-e
self.create_mating_pool() # n-e
self.npoint_crossover() # n-e
self.random_mutation() # n-e
self.add_elite_to_current() # n
else:
self.end_gen = generation
self.get_best_individual_index()
self.write_ga_json_file()
print(self)
break
def evaluate_fitness(self, index):
chromo = ''.join(str(y) for x in self.current_pop['binary'][index] for y in x)
fit = self.ind_fit_dict.setdefault(chromo, None)
if not fit:
fit = self.fit_function(self.current_pop['scaled'][index], *self.fargs, **self.fkwargs)
self.ind_fit_dict[chromo] = fit
return fit
def check_pop_diversity(self):
seen = []
all_ = []
for ind in self.current_pop['scaled']:
if ind not in seen:
seen.append(ind)
all_.append(ind)
return len(all_) - len(seen)
def decode_binary_pop(self, bin_pop):
"""Decodes the binary population, from binary to unscaled variable values
Parameters
----------
bin_pop: list
The binary population list.
Returns
-------
decoded_pop:
The decoded population list.
"""
decoded_pop = [[[]] * self.num_var for i in range(self.num_pop)]
for j in range(self.num_pop):
for i in range(self.num_var):
value = 0
chrom = bin_pop[j][i]
for u, gene in enumerate(chrom):
if gene == 1:
value = value + 2**u
decoded_pop[j][i] = value
return decoded_pop
def generate_random_bin_pop(self):
""" Generates random binary population of ``GA.num_pop`` size.
Returns
-------
random_bin_pop: list
A list containing a random binary population.
"""
random_bin_pop = [[[]] * self.num_var for i in range(self.num_pop)]
for j in range(self.num_pop):
for i in range(self.num_var):
random_bin_pop[j][i] = [random.randint(0, 1) for u in range(self.num_bin_dig[i])]
return random_bin_pop
def scale_population(self, decoded_pop):
"""Scales the decoded population, variable values are scaled according to each
of their bounds contained in ``GA.boundaries``.
Parameters
----------
decoded_pop: list
The decoded population list.
Returns
-------
scaled_pop: list
The scaled ppopulation list.
"""
scaled_pop = [[[]] * self.num_var for i in range(self.num_pop)]
for j in range(self.num_pop):
for i in range(self.num_var):
maxbin = float((2 ** self.num_bin_dig[i]) - 1)
scaled_pop[j][i] = self.boundaries[i][0] + (self.boundaries[i][1] - self.boundaries[i][0]) * decoded_pop[j][i] / maxbin
return scaled_pop
def tournament_selection(self):
"""Performs the tournament selection operator on the current population.
"""
pop_a = []
pop_b = []
indices = range(self.num_pop)
pop_a = random.sample(indices, self.num_pop - self.num_elite)
pop_b = random.sample(indices, self.num_pop - self.num_elite)
self.mp_indices = []
for i in range(self.num_pop - self.num_elite):
fit_a = self.current_pop['fit_value'][pop_a[i]]
fit_b = self.current_pop['fit_value'][pop_b[i]]
if self.fit_type == 'min':
if fit_a < fit_b:
self.mp_indices.append(pop_a[i])
else:
self.mp_indices.append(pop_b[i])
elif self.fit_type == 'max':
if fit_a > fit_b:
self.mp_indices.append(pop_a[i])
else:
self.mp_indices.append(pop_b[i])
def select_elite_pop(self, pop, num_elite=None):
"""Saves the elite population in the elite population dictionary
Parameters
----------
pop: dict
A population dictionary
Returns
-------
elite_pop: dict
The elite population dictionary.
"""
if self.fit_type == 'min':
sorted_ind = self.get_sorting_indices(self.current_pop['fit_value'])
elif self.fit_type == 'max':
sorted_ind = self.get_sorting_indices(self.current_pop['fit_value'], reverse=True)
else:
raise ValueError('User selected fit_type is wrong. Use "min" or "max" only')
if not num_elite:
num_elite = self.num_elite
elite_pop = {'binary': [], 'decoded': [], 'scaled': [], 'fit_value': []}
for i in range(num_elite):
elite_pop['binary'].append(pop['binary'][sorted_ind[i]])
elite_pop['decoded'].append(pop['decoded'][sorted_ind[i]])
elite_pop['scaled'].append(pop['scaled'][sorted_ind[i]])
elite_pop['fit_value'].append(pop['fit_value'][sorted_ind[i]])
return elite_pop
def get_sorting_indices(self, floats, reverse=False):
"""Reurns the indices that would sort a list of floats. If floats are
repeated in the list, only one instance is considered. The index of
repeaded floats are included in the end of the index list.
Parameters
----------
floats: list
The list of floats to be sorted.
reverse: bool
If true the sorting will be done from top to bottom.
Returns
-------
sorting_index: list
The list of indices that would sort the given list of floats.
"""
l_ = []
if reverse:
x = float('-inf')
else:
x = float('inf')
for i in floats:
if i in l_:
l_.append(x)
else:
l_.append(i)
sorting_index = [i for (v, i) in sorted((v, i) for (i, v) in enumerate(l_))]
if reverse is True:
sorting_index = list(reversed(sorting_index))
return sorting_index
def create_mating_pool(self):
"""Creates two lists of cromosomes to be used by the crossover operator.
"""
self.mating_pool_a = []
self.mating_pool_b = []
for i in range(int((self.num_pop - self.num_elite) / 2)):
chrom_a = []
chrom_b = []
for j in range(self.num_var):
chrom_a += self.current_pop['binary'][self.mp_indices[i]][j]
chrom_b += self.current_pop['binary'][self.mp_indices[i + (int((self.num_pop - self.num_elite) / 2))]][j]
self.mating_pool_a.append(chrom_a)
self.mating_pool_b.append(chrom_b)
def simple_crossover(self):
"""Performs the simple crossover operator. Individuals in ``GA.mating_pool_a`` are
combined with individuals in ``GA.mating_pool_b`` using a single, randomly selected
crossover point.
"""
self.current_pop = {'binary': [], 'decoded': [], 'scaled': [], 'fit_value': []}
self.current_pop['binary'] = [[[]] * self.num_var for i in range(self.num_pop)]
for j in range(int((self.num_pop - self.num_elite) / 2)):
cross = random.randint(1, self.total_bin_dig - 1)
a = self.mating_pool_a[j]
b = self.mating_pool_b[j]
c = a[:cross] + b[cross:]
d = b[:cross] + a[cross:]
for i in range(self.num_var):
variable_a = c[:self.num_bin_dig[i]]
variable_b = d[:self.num_bin_dig[i]]
del c[:self.num_bin_dig[i]]
del d[:self.num_bin_dig[i]]
self.current_pop['binary'][j][i] = variable_a
self.current_pop['binary'][j + (int((self.num_pop - self.num_elite) / 2))][i] = variable_b
def npoint_crossover(self):
"""Performs the n-point crossover operator. Individuals in ``GA.mating_pool_a`` are
combined with individuals in ``GA.mating_pool_b`` using ne, randomly selected
crossover points.
"""
self.current_pop = {'binary': [], 'decoded': [], 'scaled': [], 'fit_value': []}
self.current_pop['binary'] = [[[]] * self.num_var for i in range(self.num_pop)]
for j in range(int((self.num_pop - self.num_elite) / 2)):
a = self.mating_pool_a[j]
b = self.mating_pool_b[j]
cross_list = sorted(random.sample(range(1, self.total_bin_dig - 1), self.n_cross))
for cross in cross_list:
c = a[:cross] + b[cross:]
d = b[:cross] + a[cross:]
a = d
b = c
for i in range(self.num_var):
variable_a = a[:self.num_bin_dig[i]]
variable_b = b[:self.num_bin_dig[i]]
self.current_pop['binary'][j][i] = variable_a
self.current_pop['binary'][j + (int((self.num_pop - self.num_elite) / 2))][i] = variable_b
def random_mutation(self):
"""This mutation operator replaces a gene from 0 to 1 or viceversa
with a probability of ``GA.mutation_probability``.
"""
for i in range(self.num_pop - self.num_elite):
for j in range(self.num_var):
for u in range(self.num_bin_dig[j]):
random_value = random.random()
if random_value < (self.mutation_probability):
if self.current_pop['binary'][i][j][u] == 0:
self.current_pop['binary'][i][j][u] = 1
else:
self.current_pop['binary'][i][j][u] = 0
def code_decoded(self, decoded_pop):
"""Returns a binary coded population from a decoded population
Parameters
----------
decoded_pop: dict
The decoded population dictionary to be coded
Returns
-------
binary_pop: dict
The binary population dictionary.
"""
binary_pop = [[[]] * self.num_var for i in range(self.num_pop)]
for i in range(self.num_pop):
for j in range(self.num_var):
bin_list = []
temp_bin = bin(decoded_pop[i][j])[2:]
temp_bin = temp_bin[::-1]
digit_dif = self.num_bin_dig[j] - len(temp_bin)
for h in range(digit_dif):
temp_bin = temp_bin + '0'
for k in range(self.num_bin_dig[j]):
bin_list.append(int(temp_bin[k]))
binary_pop[i][j] = bin_list
return binary_pop
def unscale_pop(self, scaled_pop):
"""Returns an unscaled population from a scaled one. The variable values are scaled
from 0 to x, where x is the highest number described by the number of binary digits
used to encode that variable. For example, if ``GA.num_bin_dig`` for a variable is 8, that
variable will be scaled back from its bounds to its corresponding value from 0 to 255.
Parameters
----------
scaled_pop: dict
the scaled population dictionary.
Returns
-------
unscaled_pop: dict
The unscaled population dictionary.
"""
unscaled_pop = {}
for i in range(self.num_pop):
unscaled_pop[i] = {}
for j in range(self.num_var):
bin_dig = self.num_bin_dig[j]
bounds = self.boundaries[j]
max_unscaled_value = self.get_max_value_from_bin_big(bin_dig)
dom = abs(bounds[1] - bounds[0])
value_s = scaled_pop[i][j]
value = (value_s - bounds[0]) / float(dom)
unscaled = int(value * max_unscaled_value)
unscaled_pop[i][j] = unscaled
return unscaled_pop
def get_max_value_from_bin_big(self, bin_dig):
"""Returns the highest number described by a ``GA.bin_dig`` long binary number.
Parameters
----------
bin_dig: int
The number of digits in the binary number.
Returns
-------
value: int
The highest number described by a ``GA.bin_dig`` long binary number.
"""
binary = ''
for i in range(bin_dig):
binary += '1'
value = 0
for i in range(bin_dig):
value = value + 2**i
return value
def write_out_file(self, generation):
"""Writes the population data for a given generation.
Parameters
----------
generation: int
The generation number.
"""
filename = 'generation_' + "%05d" % generation + '_population' + ".txt"
pf_file = open(self.output_path + (str(filename)), "w")
pf_file.write('Generation \n')
pf_file.write(str(generation) + '\n')
pf_file.write('\n')
pf_file.write('Number of individuals per generation\n')
pf_file.write(str(self.num_pop))
pf_file.write('\n')
pf_file.write('\n')
pf_file.write('Population scaled variables \n')
for i in range(self.num_pop):
pf_file.write(str(i) + ',')
for f in range(self.num_var):
pf_file.write(str(self.current_pop['scaled'][i][f]))
pf_file.write(',')
pf_file.write('\n')
pf_file.write('\n')
pf_file.write('Population fitness value \n')
for i in range(self.num_pop):
pf_file.write(str(i) + ',')
pf_file.write(str(self.current_pop['fit_value'][i]))
pf_file.write('\n')
pf_file.write('\n')
pf_file.write('\n')
pf_file.close()
def add_elite_to_current(self):
"""Adds the elite population to the current population dictionary.
"""
self.current_pop['decoded'] = [[[]] * self.num_var for i in range(self.num_pop)]
self.current_pop['decoded'] = [[[]] * self.num_var for i in range(self.num_pop)]
self.current_pop['scaled'] = [[[]] * self.num_var for i in range(self.num_pop)]
self.current_pop['fit_value'] = [[]] * self.num_pop
for i in range(self.num_elite):
self.current_pop['binary'][self.num_pop - self.num_elite + i] = self.elite_pop['binary'][i]
self.current_pop['decoded'][self.num_pop - self.num_elite + i] = self.elite_pop['decoded'][i]
self.current_pop['scaled'][self.num_pop - self.num_elite + i] = self.elite_pop['scaled'][i]
self.current_pop['fit_value'][self.num_pop - self.num_elite + i] = self.elite_pop['fit_value'][i]
def make_ga_input_data(self):
"""Returns a dictionary containing the most relavant genetic data present in the instance
of ``GA``. This is the data required to restart a genetic optimization process or to
launch a visualization using ``compas_ga.visualization.ga_visualization``.
Returns
-------
data: dict
A dictionary containing genetic data.
"""
data = {'num_var': self.num_var,
'num_pop': self.num_pop,
'num_gen': self.num_gen,
'boundaries': self.boundaries,
'num_bin_dig': self.num_bin_dig,
'mutation_probability': self.mutation_probability,
'fit_name': self.fit_name,
'fit_type': self.fit_type,
'start_from_gen': self.start_from_gen,
'max_bin_dig': self.max_bin_dig,
'total_bin_dig': self.total_bin_dig,
'output_path': self.output_path,
'num_elite': self.num_elite,
'min_fit': self.min_fit,
'end_gen': self.end_gen,
'best_individual_index': self.best_individual_index
}
return data
def write_ga_json_file(self):
"""Writes a JSON file containing the most relevant data for GA optimization and
visualization using ``compas_ga.visualization.ga_visualization``.
"""
data = self.make_ga_input_data()
filename = self.fit_name + '.json'
with open(self.output_path + filename, 'w') as fh:
json.dump(data, fh)
def update_min_fit_flag(self):
"""Checks if the minimum desired fitness value has been achieved during optimization
and saves the result in ``GA.min_fit_flag``.
"""
values = self.current_pop['fit_value']
if self.fit_type == 'min':
self.best_fit = min(values)
if self.best_fit <= self.min_fit:
self.min_fit_flag = True
elif self.fit_type == 'max':
self.best_fit = max(values)
if self.best_fit >= self.min_fit:
self.min_fit_flag = True
def get_best_fit(self):
"""Saves the best fitness value in ``GA.best_fit``
"""
if self.fit_type == 'min':
self.best_fit = min(self.current_pop['fit_value'])
elif self.fit_type == 'max':
self.best_fit = max(self.current_pop['fit_value'])
def get_pop_from_pop_file(self, gen):
"""Reads the population file corresponding to the ``gen`` generation and returns
the saved population data. The population file must be in ``GA.input_path``.
Parameters
----------
gen: int
The generation index.
Returns
-------
file_pop: dict
The population dictionary contained in the file.
"""
filename = 'generation_' + "%05d" % gen + '_population' + ".txt"
filename = self.input_path + filename
pf_file = open(filename, 'r')
lines = pf_file.readlines()
pf_file.close()
file_pop = {}
file_pop['scaled'] = [[[]] * self.num_var for i in range(self.num_pop)]
file_pop['fit_value'] = [[[]] * self.num_var for i in range(self.num_pop)]
for i in range(self.num_pop):
line_scaled = lines[i + 7]
line_fit = lines[i + 9 + self.num_pop]
string_scaled = re.split(',', line_scaled)
string_fit = re.split(',', line_fit)
string_fit = string_fit[1]
del string_scaled[-1]
del string_scaled[0]
scaled = [float(j) for j in string_scaled]
fit_value = float(string_fit)
file_pop['fit_value'][i] = fit_value
for j in range(len(scaled)):
file_pop['scaled'][i][j] = scaled[j]
file_pop['decoded'] = self.unscale_pop(file_pop['scaled'])
file_pop['binary'] = self.code_decoded(file_pop['decoded'])
return file_pop
def get_best_individual_index(self):
"""Saves the index of the best performing individual of the current population
in ``GA.best_individual_index``.
"""
fit_values = [self.current_pop['fit_value'][i] for i in range(len(self.current_pop['fit_value']))]
if self.fit_type == 'min':
indices = self.get_sorting_indices(fit_values)
elif self.fit_type == 'max':
indices = self.get_sorting_indices(fit_values, reverse=True)
self.best_individual_index = indices[0]
| mit |
bbbenja/SickRage | lib/sqlalchemy/dialects/mysql/pyodbc.py | 79 | 2617 | # mysql/pyodbc.py
# Copyright (C) 2005-2014 the SQLAlchemy authors and contributors <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
"""
.. dialect:: mysql+pyodbc
:name: PyODBC
:dbapi: pyodbc
:connectstring: mysql+pyodbc://<username>:<password>@<dsnname>
:url: http://pypi.python.org/pypi/pyodbc/
Limitations
-----------
The mysql-pyodbc dialect is subject to unresolved character encoding issues
which exist within the current ODBC drivers available.
(see http://code.google.com/p/pyodbc/issues/detail?id=25). Consider usage
of OurSQL, MySQLdb, or MySQL-connector/Python.
"""
from .base import MySQLDialect, MySQLExecutionContext
from ...connectors.pyodbc import PyODBCConnector
from ... import util
import re
class MySQLExecutionContext_pyodbc(MySQLExecutionContext):
def get_lastrowid(self):
cursor = self.create_cursor()
cursor.execute("SELECT LAST_INSERT_ID()")
lastrowid = cursor.fetchone()[0]
cursor.close()
return lastrowid
class MySQLDialect_pyodbc(PyODBCConnector, MySQLDialect):
supports_unicode_statements = False
execution_ctx_cls = MySQLExecutionContext_pyodbc
pyodbc_driver_name = "MySQL"
def __init__(self, **kw):
# deal with http://code.google.com/p/pyodbc/issues/detail?id=25
kw.setdefault('convert_unicode', True)
super(MySQLDialect_pyodbc, self).__init__(**kw)
def _detect_charset(self, connection):
"""Sniff out the character set in use for connection results."""
# Prefer 'character_set_results' for the current connection over the
# value in the driver. SET NAMES or individual variable SETs will
# change the charset without updating the driver's view of the world.
#
# If it's decided that issuing that sort of SQL leaves you SOL, then
# this can prefer the driver value.
rs = connection.execute("SHOW VARIABLES LIKE 'character_set%%'")
opts = dict([(row[0], row[1]) for row in self._compat_fetchall(rs)])
for key in ('character_set_connection', 'character_set'):
if opts.get(key, None):
return opts[key]
util.warn("Could not detect the connection character set. Assuming latin1.")
return 'latin1'
def _extract_error_code(self, exception):
m = re.compile(r"\((\d+)\)").search(str(exception.args))
c = m.group(1)
if c:
return int(c)
else:
return None
dialect = MySQLDialect_pyodbc
| gpl-3.0 |
google-code-export/evennia | game/gamesrc/objects/examples/object.py | 3 | 8567 | """
Template for Objects
Copy this module up one level and name it as you like, then
use it as a template to create your own Objects.
To make the default commands default to creating objects of your new
type (and also change the "fallback" object used when typeclass
creation fails), change settings.BASE_OBJECT_TYPECLASS to point to
your new class, e.g.
settings.BASE_OBJECT_TYPECLASS = "game.gamesrc.objects.myobj.MyObj"
Note that objects already created in the database will not notice
this change, you have to convert them manually e.g. with the
@typeclass command.
"""
from ev import Object as DefaultObject
class Object(DefaultObject):
"""
This is the root typeclass object, implementing an in-game Evennia
game object, such as having a location, being able to be
manipulated or looked at, etc. If you create a new typeclass, it
must always inherit from this object (or any of the other objects
in this file, since they all actually inherit from BaseObject, as
seen in src.object.objects).
The BaseObject class implements several hooks tying into the game
engine. By re-implementing these hooks you can control the
system. You should never need to re-implement special Python
methods, such as __init__ and especially never __getattribute__ and
__setattr__ since these are used heavily by the typeclass system
of Evennia and messing with them might well break things for you.
* Base properties defined/available on all Objects
key (string) - name of object
name (string)- same as key
aliases (list of strings) - aliases to the object. Will be saved to
database as AliasDB entries but returned as strings.
dbref (int, read-only) - unique #id-number. Also "id" can be used.
dbobj (Object, read-only) - link to database model. dbobj.typeclass points
back to this class
typeclass (Object, read-only) - this links back to this class as an
identified only. Use self.swap_typeclass() to switch.
date_created (string) - time stamp of object creation
permissions (list of strings) - list of permission strings
player (Player) - controlling player (if any, only set together with
sessid below)
sessid (int, read-only) - session id (if any, only set together with
player above)
location (Object) - current location. Is None if this is a room
home (Object) - safety start-location
sessions (list of Sessions, read-only) - returns all sessions connected
to this object
has_player (bool, read-only)- will only return *connected* players
contents (list of Objects, read-only) - returns all objects inside this
object (including exits)
exits (list of Objects, read-only) - returns all exits from this
object, if any
destination (Object) - only set if this object is an exit.
is_superuser (bool, read-only) - True/False if this user is a superuser
* Handlers available
locks - lock-handler: use locks.add() to add new lock strings
db - attribute-handler: store/retrieve database attributes on this
self.db.myattr=val, val=self.db.myattr
ndb - non-persistent attribute handler: same as db but does not create
a database entry when storing data
scripts - script-handler. Add new scripts to object with scripts.add()
cmdset - cmdset-handler. Use cmdset.add() to add new cmdsets to object
nicks - nick-handler. New nicks with nicks.add().
* Helper methods (see src.objects.objects.py for full headers)
search(ostring, global_search=False, attribute_name=None,
use_nicks=False, location=None, ignore_errors=False, player=False)
execute_cmd(raw_string)
msg(text=None, **kwargs)
msg_contents(message, exclude=None, from_obj=None, **kwargs)
move_to(destination, quiet=False, emit_to_obj=None, use_destination=True)
copy(new_key=None)
delete()
is_typeclass(typeclass, exact=False)
swap_typeclass(new_typeclass, clean_attributes=False, no_default=True)
access(accessing_obj, access_type='read', default=False)
check_permstring(permstring)
* Hooks (these are class methods, so args should start with self):
basetype_setup() - only called once, used for behind-the-scenes
setup. Normally not modified.
basetype_posthook_setup() - customization in basetype, after the object
has been created; Normally not modified.
at_object_creation() - only called once, when object is first created.
Object customizations go here.
at_object_delete() - called just before deleting an object. If returning
False, deletion is aborted. Note that all objects
inside a deleted object are automatically moved
to their <home>, they don't need to be removed here.
at_init() - called whenever typeclass is cached from memory,
at least once every server restart/reload
at_cmdset_get() - this is called just before the command handler
requests a cmdset from this object
at_pre_puppet(player)- (player-controlled objects only) called just
before puppeting
at_post_puppet() - (player-controlled objects only) called just
after completing connection player<->object
at_pre_unpuppet() - (player-controlled objects only) called just
before un-puppeting
at_post_unpuppet(player) - (player-controlled objects only) called just
after disconnecting player<->object link
at_server_reload() - called before server is reloaded
at_server_shutdown() - called just before server is fully shut down
at_access(result, accessing_obj, access_type) - called with the result
of a lock access check on this object. Return value
does not affect check result.
at_before_move(destination) - called just before moving object
to the destination. If returns False, move is cancelled.
announce_move_from(destination) - called in old location, just
before move, if obj.move_to() has quiet=False
announce_move_to(source_location) - called in new location, just
after move, if obj.move_to() has quiet=False
at_after_move(source_location) - always called after a move has
been successfully performed.
at_object_leave(obj, target_location) - called when an object leaves
this object in any fashion
at_object_receive(obj, source_location) - called when this object receives
another object
at_before_traverse(traversing_object) - (exit-objects only)
called just before an object traverses this object
at_after_traverse(traversing_object, source_location) - (exit-objects only)
called just after a traversal has happened.
at_failed_traverse(traversing_object) - (exit-objects only) called if
traversal fails and property err_traverse is not defined.
at_msg_receive(self, msg, from_obj=None, **kwargs) - called when a message
(via self.msg()) is sent to this obj.
If returns false, aborts send.
at_msg_send(self, msg, to_obj=None, **kwargs) - called when this objects
sends a message to someone via self.msg().
return_appearance(looker) - describes this object. Used by "look"
command by default
at_desc(looker=None) - called by 'look' whenever the
appearance is requested.
at_get(getter) - called after object has been picked up.
Does not stop pickup.
at_drop(dropper) - called when this object has been dropped.
at_say(speaker, message) - by default, called if an object inside this
object speaks
"""
pass
| bsd-3-clause |
shsmith/electrum-server | run_electrum_server.py | 6 | 10908 | #!/usr/bin/env python
# Copyright(C) 2011-2016 Thomas Voegtlin
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import argparse
import ConfigParser
import logging
import socket
import sys
import time
import threading
import json
import os
import imp
if os.path.dirname(os.path.realpath(__file__)) == os.getcwd():
imp.load_module('electrumserver', *imp.find_module('src'))
from electrumserver import storage, networks, utils
from electrumserver.processor import Dispatcher, print_log
from electrumserver.server_processor import ServerProcessor
from electrumserver.blockchain_processor import BlockchainProcessor
from electrumserver.stratum_tcp import TcpServer
logging.basicConfig()
if sys.maxsize <= 2**32:
print "Warning: it looks like you are using a 32bit system. You may experience crashes caused by mmap"
if os.getuid() == 0:
print "Do not run this program as root!"
print "Run the install script to create a non-privileged user."
sys.exit()
def attempt_read_config(config, filename):
try:
with open(filename, 'r') as f:
config.readfp(f)
except IOError:
pass
def load_banner(config):
try:
with open(config.get('server', 'banner_file'), 'r') as f:
config.set('server', 'banner', f.read())
except IOError:
pass
def setup_network_params(config):
type = config.get('network', 'type')
params = networks.params.get(type)
utils.PUBKEY_ADDRESS = int(params.get('pubkey_address'))
utils.SCRIPT_ADDRESS = int(params.get('script_address'))
storage.GENESIS_HASH = params.get('genesis_hash')
if config.has_option('network', 'pubkey_address'):
utils.PUBKEY_ADDRESS = config.getint('network', 'pubkey_address')
if config.has_option('network', 'script_address'):
utils.SCRIPT_ADDRESS = config.getint('network', 'script_address')
if config.has_option('network', 'genesis_hash'):
storage.GENESIS_HASH = config.get('network', 'genesis_hash')
def create_config(filename=None):
config = ConfigParser.ConfigParser()
# set some defaults, which will be overwritten by the config file
config.add_section('server')
config.set('server', 'banner', 'Welcome to Electrum!')
config.set('server', 'banner_file', '/etc/electrum.banner')
config.set('server', 'host', 'localhost')
config.set('server', 'electrum_rpc_port', '8000')
config.set('server', 'report_host', '')
config.set('server', 'stratum_tcp_port', '50001')
config.set('server', 'stratum_tcp_ssl_port', '50002')
config.set('server', 'report_stratum_tcp_port', '')
config.set('server', 'report_stratum_tcp_ssl_port', '')
config.set('server', 'ssl_certfile', '')
config.set('server', 'ssl_keyfile', '')
config.set('server', 'irc', 'no')
config.set('server', 'irc_nick', '')
config.set('server', 'coin', '')
config.set('server', 'donation_address', '')
config.set('server', 'max_subscriptions', '10000')
config.add_section('leveldb')
config.set('leveldb', 'path', '/dev/shm/electrum_db')
config.set('leveldb', 'pruning_limit', '100')
config.set('leveldb', 'reorg_limit', '100')
config.set('leveldb', 'utxo_cache', str(64*1024*1024))
config.set('leveldb', 'hist_cache', str(128*1024*1024))
config.set('leveldb', 'addr_cache', str(16*1024*1024))
config.set('leveldb', 'profiler', 'no')
# set network parameters
config.add_section('network')
config.set('network', 'type', 'bitcoin_main')
# try to find the config file in the default paths
if not filename:
for path in ('/etc/', ''):
filename = path + 'electrum.conf'
if os.path.isfile(filename):
break
if not os.path.isfile(filename):
print 'could not find electrum configuration file "%s"' % filename
sys.exit(1)
attempt_read_config(config, filename)
load_banner(config)
return config
def run_rpc_command(params, electrum_rpc_port):
cmd = params[0]
import xmlrpclib
server = xmlrpclib.ServerProxy('http://localhost:%d' % electrum_rpc_port)
func = getattr(server, cmd)
r = func(*params[1:])
if cmd == 'sessions':
now = time.time()
print 'type address sub version time'
for item in r:
print '%4s %21s %3s %7s %.2f' % (item.get('name'),
item.get('address'),
item.get('subscriptions'),
item.get('version'),
(now - item.get('time')),
)
elif cmd == 'debug':
print r
else:
print json.dumps(r, indent=4, sort_keys=True)
def cmd_banner_update():
load_banner(dispatcher.shared.config)
return True
def cmd_getinfo():
return {
'blocks': chain_proc.storage.height,
'peers': len(server_proc.peers),
'sessions': len(dispatcher.request_dispatcher.get_sessions()),
'watched': len(chain_proc.watched_addresses),
'cached': len(chain_proc.history_cache),
}
def cmd_sessions():
return map(lambda s: {"time": s.time,
"name": s.name,
"address": s.address,
"version": s.version,
"subscriptions": len(s.subscriptions)},
dispatcher.request_dispatcher.get_sessions())
def cmd_numsessions():
return len(dispatcher.request_dispatcher.get_sessions())
def cmd_peers():
return server_proc.peers.keys()
def cmd_numpeers():
return len(server_proc.peers)
hp = None
def cmd_guppy():
from guppy import hpy
global hp
hp = hpy()
def cmd_debug(s):
import traceback
import gc
if s:
try:
result = str(eval(s))
except:
err_lines = traceback.format_exc().splitlines()
result = '%s | %s' % (err_lines[-3], err_lines[-1])
return result
def get_port(config, name):
try:
return config.getint('server', name)
except:
return None
# share these as global, for 'debug' command
shared = None
chain_proc = None
server_proc = None
dispatcher = None
transports = []
tcp_server = None
ssl_server = None
def start_server(config):
global shared, chain_proc, server_proc, dispatcher
global tcp_server, ssl_server
utils.init_logger()
host = config.get('server', 'host')
stratum_tcp_port = get_port(config, 'stratum_tcp_port')
stratum_tcp_ssl_port = get_port(config, 'stratum_tcp_ssl_port')
ssl_certfile = config.get('server', 'ssl_certfile')
ssl_keyfile = config.get('server', 'ssl_keyfile')
setup_network_params(config)
if ssl_certfile is '' or ssl_keyfile is '':
stratum_tcp_ssl_port = None
print_log("Starting Electrum server on", host)
# Create hub
dispatcher = Dispatcher(config)
shared = dispatcher.shared
# handle termination signals
import signal
def handler(signum = None, frame = None):
print_log('Signal handler called with signal', signum)
shared.stop()
for sig in [signal.SIGTERM, signal.SIGHUP, signal.SIGQUIT]:
signal.signal(sig, handler)
# Create and register processors
chain_proc = BlockchainProcessor(config, shared)
dispatcher.register('blockchain', chain_proc)
server_proc = ServerProcessor(config, shared)
dispatcher.register('server', server_proc)
# Create various transports we need
if stratum_tcp_port:
tcp_server = TcpServer(dispatcher, host, stratum_tcp_port, False, None, None)
transports.append(tcp_server)
if stratum_tcp_ssl_port:
ssl_server = TcpServer(dispatcher, host, stratum_tcp_ssl_port, True, ssl_certfile, ssl_keyfile)
transports.append(ssl_server)
for server in transports:
server.start()
def stop_server():
shared.stop()
server_proc.join()
chain_proc.join()
print_log("Electrum Server stopped")
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--conf', metavar='path', default=None, help='specify a configuration file')
parser.add_argument('command', nargs='*', default=[], help='send a command to the server')
args = parser.parse_args()
config = create_config(args.conf)
electrum_rpc_port = get_port(config, 'electrum_rpc_port')
if len(args.command) >= 1:
try:
run_rpc_command(args.command, electrum_rpc_port)
except socket.error:
print "server not running"
sys.exit(1)
sys.exit(0)
try:
run_rpc_command(['getpid'], electrum_rpc_port)
is_running = True
except socket.error:
is_running = False
if is_running:
print "server already running"
sys.exit(1)
start_server(config)
from SimpleXMLRPCServer import SimpleXMLRPCServer
server = SimpleXMLRPCServer(('localhost', electrum_rpc_port), allow_none=True, logRequests=False)
server.register_function(lambda: os.getpid(), 'getpid')
server.register_function(shared.stop, 'stop')
server.register_function(cmd_getinfo, 'getinfo')
server.register_function(cmd_sessions, 'sessions')
server.register_function(cmd_numsessions, 'numsessions')
server.register_function(cmd_peers, 'peers')
server.register_function(cmd_numpeers, 'numpeers')
server.register_function(cmd_debug, 'debug')
server.register_function(cmd_guppy, 'guppy')
server.register_function(cmd_banner_update, 'banner_update')
server.socket.settimeout(1)
while not shared.stopped():
try:
server.handle_request()
except socket.timeout:
continue
except:
stop_server()
| mit |
kswiat/django | django/contrib/gis/geos/tests/test_geos_mutation.py | 112 | 5462 | # Copyright (c) 2008-2009 Aryeh Leib Taurog, all rights reserved.
# Modified from original contribution by Aryeh Leib Taurog, which was
# released under the New BSD license.
import unittest
from unittest import skipUnless
from .. import HAS_GEOS
if HAS_GEOS:
from .. import fromstr, LinearRing, LineString, MultiPoint, Point, Polygon
from ..error import GEOSIndexError
if HAS_GEOS:
def api_get_distance(x):
return x.distance(Point(-200, -200))
def api_get_buffer(x):
return x.buffer(10)
def api_get_geom_typeid(x):
return x.geom_typeid
def api_get_num_coords(x):
return x.num_coords
def api_get_centroid(x):
return x.centroid
def api_get_empty(x):
return x.empty
def api_get_valid(x):
return x.valid
def api_get_simple(x):
return x.simple
def api_get_ring(x):
return x.ring
def api_get_boundary(x):
return x.boundary
def api_get_convex_hull(x):
return x.convex_hull
def api_get_extent(x):
return x.extent
def api_get_area(x):
return x.area
def api_get_length(x):
return x.length
geos_function_tests = [val for name, val in vars().items()
if hasattr(val, '__call__')
and name.startswith('api_get_')]
@skipUnless(HAS_GEOS, "Geos is required.")
class GEOSMutationTest(unittest.TestCase):
"""
Tests Pythonic Mutability of Python GEOS geometry wrappers
get/set/delitem on a slice, normal list methods
"""
def test00_GEOSIndexException(self):
'Testing Geometry GEOSIndexError'
p = Point(1, 2)
for i in range(-2, 2):
p._checkindex(i)
self.assertRaises(GEOSIndexError, p._checkindex, 2)
self.assertRaises(GEOSIndexError, p._checkindex, -3)
def test01_PointMutations(self):
'Testing Point mutations'
for p in (Point(1, 2, 3), fromstr('POINT (1 2 3)')):
self.assertEqual(p._get_single_external(1), 2.0, 'Point _get_single_external')
# _set_single
p._set_single(0, 100)
self.assertEqual(p.coords, (100.0, 2.0, 3.0), 'Point _set_single')
# _set_list
p._set_list(2, (50, 3141))
self.assertEqual(p.coords, (50.0, 3141.0), 'Point _set_list')
def test02_PointExceptions(self):
'Testing Point exceptions'
self.assertRaises(TypeError, Point, range(1))
self.assertRaises(TypeError, Point, range(4))
def test03_PointApi(self):
'Testing Point API'
q = Point(4, 5, 3)
for p in (Point(1, 2, 3), fromstr('POINT (1 2 3)')):
p[0:2] = [4, 5]
for f in geos_function_tests:
self.assertEqual(f(q), f(p), 'Point ' + f.__name__)
def test04_LineStringMutations(self):
'Testing LineString mutations'
for ls in (LineString((1, 0), (4, 1), (6, -1)),
fromstr('LINESTRING (1 0,4 1,6 -1)')):
self.assertEqual(ls._get_single_external(1), (4.0, 1.0), 'LineString _get_single_external')
# _set_single
ls._set_single(0, (-50, 25))
self.assertEqual(ls.coords, ((-50.0, 25.0), (4.0, 1.0), (6.0, -1.0)), 'LineString _set_single')
# _set_list
ls._set_list(2, ((-50.0, 25.0), (6.0, -1.0)))
self.assertEqual(ls.coords, ((-50.0, 25.0), (6.0, -1.0)), 'LineString _set_list')
lsa = LineString(ls.coords)
for f in geos_function_tests:
self.assertEqual(f(lsa), f(ls), 'LineString ' + f.__name__)
def test05_Polygon(self):
'Testing Polygon mutations'
for pg in (Polygon(((1, 0), (4, 1), (6, -1), (8, 10), (1, 0)),
((5, 4), (6, 4), (6, 3), (5, 4))),
fromstr('POLYGON ((1 0,4 1,6 -1,8 10,1 0),(5 4,6 4,6 3,5 4))')):
self.assertEqual(pg._get_single_external(0),
LinearRing((1, 0), (4, 1), (6, -1), (8, 10), (1, 0)),
'Polygon _get_single_external(0)')
self.assertEqual(pg._get_single_external(1),
LinearRing((5, 4), (6, 4), (6, 3), (5, 4)),
'Polygon _get_single_external(1)')
# _set_list
pg._set_list(2, (((1, 2), (10, 0), (12, 9), (-1, 15), (1, 2)),
((4, 2), (5, 2), (5, 3), (4, 2))))
self.assertEqual(
pg.coords,
(((1.0, 2.0), (10.0, 0.0), (12.0, 9.0), (-1.0, 15.0), (1.0, 2.0)),
((4.0, 2.0), (5.0, 2.0), (5.0, 3.0), (4.0, 2.0))),
'Polygon _set_list')
lsa = Polygon(*pg.coords)
for f in geos_function_tests:
self.assertEqual(f(lsa), f(pg), 'Polygon ' + f.__name__)
def test06_Collection(self):
'Testing Collection mutations'
for mp in (MultiPoint(*map(Point, ((3, 4), (-1, 2), (5, -4), (2, 8)))),
fromstr('MULTIPOINT (3 4,-1 2,5 -4,2 8)')):
self.assertEqual(mp._get_single_external(2), Point(5, -4), 'Collection _get_single_external')
mp._set_list(3, map(Point, ((5, 5), (3, -2), (8, 1))))
self.assertEqual(mp.coords, ((5.0, 5.0), (3.0, -2.0), (8.0, 1.0)), 'Collection _set_list')
lsa = MultiPoint(*map(Point, ((5, 5), (3, -2), (8, 1))))
for f in geos_function_tests:
self.assertEqual(f(lsa), f(mp), 'MultiPoint ' + f.__name__)
| bsd-3-clause |
riccardomc/moto | moto/core/models.py | 20 | 5501 | from __future__ import unicode_literals
import functools
import inspect
import re
from httpretty import HTTPretty
from .responses import metadata_response
from .utils import convert_regex_to_flask_path
class MockAWS(object):
nested_count = 0
def __init__(self, backends):
self.backends = backends
if self.__class__.nested_count == 0:
HTTPretty.reset()
def __call__(self, func, reset=True):
if inspect.isclass(func):
return self.decorate_class(func)
return self.decorate_callable(func, reset)
def __enter__(self):
self.start()
def __exit__(self, *args):
self.stop()
def start(self, reset=True):
self.__class__.nested_count += 1
if reset:
for backend in self.backends.values():
backend.reset()
if not HTTPretty.is_enabled():
HTTPretty.enable()
for method in HTTPretty.METHODS:
backend = list(self.backends.values())[0]
for key, value in backend.urls.items():
HTTPretty.register_uri(
method=method,
uri=re.compile(key),
body=value,
)
# Mock out localhost instance metadata
HTTPretty.register_uri(
method=method,
uri=re.compile('http://169.254.169.254/latest/meta-data/.*'),
body=metadata_response
)
def stop(self):
self.__class__.nested_count -= 1
if self.__class__.nested_count < 0:
raise RuntimeError('Called stop() before start().')
if self.__class__.nested_count == 0:
HTTPretty.disable()
HTTPretty.reset()
def decorate_callable(self, func, reset):
def wrapper(*args, **kwargs):
self.start(reset=reset)
try:
result = func(*args, **kwargs)
finally:
self.stop()
return result
functools.update_wrapper(wrapper, func)
wrapper.__wrapped__ = func
return wrapper
def decorate_class(self, klass):
for attr in dir(klass):
if attr.startswith("_"):
continue
attr_value = getattr(klass, attr)
if not hasattr(attr_value, "__call__"):
continue
# Check if this is a classmethod. If so, skip patching
if inspect.ismethod(attr_value) and attr_value.__self__ is klass:
continue
try:
setattr(klass, attr, self(attr_value, reset=False))
except TypeError:
# Sometimes we can't set this for built-in types
continue
return klass
class Model(type):
def __new__(self, clsname, bases, namespace):
cls = super(Model, self).__new__(self, clsname, bases, namespace)
cls.__models__ = {}
for name, value in namespace.items():
model = getattr(value, "__returns_model__", False)
if model is not False:
cls.__models__[model] = name
for base in bases:
cls.__models__.update(getattr(base, "__models__", {}))
return cls
@staticmethod
def prop(model_name):
""" decorator to mark a class method as returning model values """
def dec(f):
f.__returns_model__ = model_name
return f
return dec
class BaseBackend(object):
def reset(self):
self.__dict__ = {}
self.__init__()
@property
def _url_module(self):
backend_module = self.__class__.__module__
backend_urls_module_name = backend_module.replace("models", "urls")
backend_urls_module = __import__(backend_urls_module_name, fromlist=['url_bases', 'url_paths'])
return backend_urls_module
@property
def urls(self):
"""
A dictionary of the urls to be mocked with this service and the handlers
that should be called in their place
"""
url_bases = self._url_module.url_bases
unformatted_paths = self._url_module.url_paths
urls = {}
for url_base in url_bases:
for url_path, handler in unformatted_paths.items():
url = url_path.format(url_base)
urls[url] = handler
return urls
@property
def url_paths(self):
"""
A dictionary of the paths of the urls to be mocked with this service and
the handlers that should be called in their place
"""
unformatted_paths = self._url_module.url_paths
paths = {}
for unformatted_path, handler in unformatted_paths.items():
path = unformatted_path.format("")
paths[path] = handler
return paths
@property
def url_bases(self):
"""
A list containing the url_bases extracted from urls.py
"""
return self._url_module.url_bases
@property
def flask_paths(self):
"""
The url paths that will be used for the flask server
"""
paths = {}
for url_path, handler in self.url_paths.items():
url_path = convert_regex_to_flask_path(url_path)
paths[url_path] = handler
return paths
def decorator(self, func=None):
if func:
return MockAWS({'global': self})(func)
else:
return MockAWS({'global': self})
| apache-2.0 |
rversteegen/commandergenius | project/jni/python/src/Tools/i18n/makelocalealias.py | 52 | 2007 | #!/usr/bin/env python
"""
Convert the X11 locale.alias file into a mapping dictionary suitable
for locale.py.
Written by Marc-Andre Lemburg <mal@genix.com>, 2004-12-10.
"""
import locale
# Location of the alias file
LOCALE_ALIAS = '/usr/lib/X11/locale/locale.alias'
def parse(filename):
f = open(filename)
lines = f.read().splitlines()
data = {}
for line in lines:
line = line.strip()
if not line:
continue
if line[:1] == '#':
continue
locale, alias = line.split()
# Strip ':'
if locale[-1] == ':':
locale = locale[:-1]
# Lower-case locale
locale = locale.lower()
# Ignore one letter locale mappings (except for 'c')
if len(locale) == 1 and locale != 'c':
continue
# Normalize encoding, if given
if '.' in locale:
lang, encoding = locale.split('.')[:2]
encoding = encoding.replace('-', '')
encoding = encoding.replace('_', '')
locale = lang + '.' + encoding
if encoding.lower() == 'utf8':
# Ignore UTF-8 mappings - this encoding should be
# available for all locales
continue
data[locale] = alias
return data
def pprint(data):
items = data.items()
items.sort()
for k,v in items:
print ' %-40s%r,' % ('%r:' % k, v)
def print_differences(data, olddata):
items = olddata.items()
items.sort()
for k, v in items:
if not data.has_key(k):
print '# removed %r' % k
elif olddata[k] != data[k]:
print '# updated %r -> %r to %r' % \
(k, olddata[k], data[k])
# Additions are not mentioned
if __name__ == '__main__':
data = locale.locale_alias.copy()
data.update(parse(LOCALE_ALIAS))
print_differences(data, locale.locale_alias)
print
print 'locale_alias = {'
pprint(data)
print '}'
| lgpl-2.1 |
vinchoi/fishplay | flask/lib/python2.7/site-packages/pip/_vendor/requests/packages/chardet/langhebrewmodel.py | 2763 | 11318 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Universal charset detector code.
#
# The Initial Developer of the Original Code is
# Simon Montagu
# Portions created by the Initial Developer are Copyright (C) 2005
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
# Shy Shalom - original C code
# Shoshannah Forbes - original C code (?)
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
# 255: Control characters that usually does not exist in any text
# 254: Carriage/Return
# 253: symbol (punctuation) that does not belong to word
# 252: 0 - 9
# Windows-1255 language model
# Character Mapping Table:
win1255_CharToOrderMap = (
255,255,255,255,255,255,255,255,255,255,254,255,255,254,255,255, # 00
255,255,255,255,255,255,255,255,255,255,255,255,255,255,255,255, # 10
253,253,253,253,253,253,253,253,253,253,253,253,253,253,253,253, # 20
252,252,252,252,252,252,252,252,252,252,253,253,253,253,253,253, # 30
253, 69, 91, 79, 80, 92, 89, 97, 90, 68,111,112, 82, 73, 95, 85, # 40
78,121, 86, 71, 67,102,107, 84,114,103,115,253,253,253,253,253, # 50
253, 50, 74, 60, 61, 42, 76, 70, 64, 53,105, 93, 56, 65, 54, 49, # 60
66,110, 51, 43, 44, 63, 81, 77, 98, 75,108,253,253,253,253,253, # 70
124,202,203,204,205, 40, 58,206,207,208,209,210,211,212,213,214,
215, 83, 52, 47, 46, 72, 32, 94,216,113,217,109,218,219,220,221,
34,116,222,118,100,223,224,117,119,104,125,225,226, 87, 99,227,
106,122,123,228, 55,229,230,101,231,232,120,233, 48, 39, 57,234,
30, 59, 41, 88, 33, 37, 36, 31, 29, 35,235, 62, 28,236,126,237,
238, 38, 45,239,240,241,242,243,127,244,245,246,247,248,249,250,
9, 8, 20, 16, 3, 2, 24, 14, 22, 1, 25, 15, 4, 11, 6, 23,
12, 19, 13, 26, 18, 27, 21, 17, 7, 10, 5,251,252,128, 96,253,
)
# Model Table:
# total sequences: 100%
# first 512 sequences: 98.4004%
# first 1024 sequences: 1.5981%
# rest sequences: 0.087%
# negative sequences: 0.0015%
HebrewLangModel = (
0,3,3,3,3,3,3,3,3,3,3,2,3,3,3,3,3,3,3,3,3,3,3,2,3,2,1,2,0,1,0,0,
3,0,3,1,0,0,1,3,2,0,1,1,2,0,2,2,2,1,1,1,1,2,1,1,1,2,0,0,2,2,0,1,
3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,2,2,2,2,
1,2,1,2,1,2,0,0,2,0,0,0,0,0,1,0,1,0,0,0,0,0,0,1,0,0,0,0,0,0,1,0,
3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,2,2,2,
1,2,1,3,1,1,0,0,2,0,0,0,1,0,1,0,1,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,
3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,1,0,1,2,2,1,3,
1,2,1,1,2,2,0,0,2,2,0,0,0,0,1,0,1,0,0,0,1,0,0,0,0,0,0,1,0,1,1,0,
3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,2,3,3,2,2,2,2,3,2,
1,2,1,2,2,2,0,0,1,0,0,0,0,0,1,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,1,0,
3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,2,3,3,2,3,2,2,3,2,2,2,1,2,2,2,2,
1,2,1,1,2,2,0,1,2,0,0,0,0,0,0,0,1,0,0,0,1,0,0,0,0,0,0,0,0,0,1,0,
3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,2,0,2,2,2,2,2,
0,2,0,2,2,2,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,1,0,
3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,2,3,0,2,2,2,
0,2,1,2,2,2,0,0,2,1,0,0,0,0,1,0,1,0,0,0,0,0,0,2,0,0,0,0,0,0,1,0,
3,3,3,3,3,3,3,3,3,3,3,2,3,3,3,3,3,3,3,3,3,3,3,3,3,2,1,2,3,2,2,2,
1,2,1,2,2,2,0,0,1,0,0,0,0,0,1,0,0,0,0,0,0,0,0,1,0,0,0,0,0,1,1,0,
3,3,3,3,3,3,3,3,3,2,3,3,3,2,3,3,3,3,3,3,3,3,3,3,3,3,3,1,0,2,0,2,
0,2,1,2,2,2,0,0,1,2,0,0,0,0,1,0,1,0,0,0,0,0,0,1,0,0,0,2,0,0,1,0,
3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,2,3,2,3,2,2,3,2,1,2,1,1,1,
0,1,1,1,1,1,3,0,1,0,0,0,0,2,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,
3,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,0,1,1,0,1,1,0,0,1,0,0,1,0,0,0,0,
0,0,1,0,0,0,0,0,2,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,3,2,2,2,2,2,2,2,
0,2,0,1,2,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,
3,3,3,3,3,3,3,3,3,2,3,3,3,2,1,2,3,3,2,3,3,3,3,2,3,2,1,2,0,2,1,2,
0,2,0,2,2,2,0,0,1,2,0,0,0,0,1,0,1,0,0,0,0,0,0,0,0,0,0,1,0,0,1,0,
3,3,3,3,3,3,3,3,3,2,3,3,3,1,2,2,3,3,2,3,2,3,2,2,3,1,2,2,0,2,2,2,
0,2,1,2,2,2,0,0,1,2,0,0,0,0,1,0,0,0,0,0,1,0,0,1,0,0,0,1,0,0,1,0,
3,3,3,3,3,3,3,3,3,3,3,3,3,2,3,3,3,2,3,3,2,2,2,3,3,3,3,1,3,2,2,2,
0,2,0,1,2,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,
3,3,3,3,3,3,3,3,3,3,3,3,3,3,2,2,3,3,3,2,3,2,2,2,1,2,2,0,2,2,2,2,
0,2,0,2,2,2,0,0,1,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,
3,3,3,3,3,3,3,3,3,3,3,2,3,3,3,1,3,2,3,3,2,3,3,2,2,1,2,2,2,2,2,2,
0,2,1,2,1,2,0,0,1,0,0,0,0,0,1,0,0,0,0,0,1,0,0,1,0,0,0,0,0,0,1,0,
3,3,3,3,3,3,2,3,2,3,3,2,3,3,3,3,2,3,2,3,3,3,3,3,2,2,2,2,2,2,2,1,
0,2,0,1,2,1,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,1,0,
3,3,3,3,3,3,3,3,3,2,1,2,3,3,3,3,3,3,3,2,3,2,3,2,1,2,3,0,2,1,2,2,
0,2,1,1,2,1,0,0,1,0,0,0,0,0,1,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,2,0,
3,3,3,3,3,3,3,3,3,2,3,3,3,3,2,1,3,1,2,2,2,1,2,3,3,1,2,1,2,2,2,2,
0,1,1,1,1,1,0,0,0,0,0,0,0,0,1,0,0,0,0,0,1,0,0,2,0,0,0,0,0,0,0,0,
3,3,3,3,3,3,3,3,3,3,0,2,3,3,3,1,3,3,3,1,2,2,2,2,1,1,2,2,2,2,2,2,
0,2,0,1,1,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,1,0,
3,3,3,3,3,3,2,3,3,3,2,2,3,3,3,2,1,2,3,2,3,2,2,2,2,1,2,1,1,1,2,2,
0,2,1,1,1,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,
3,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,1,0,1,0,0,0,1,0,0,0,0,0,
1,0,1,0,0,0,0,0,2,0,0,0,0,0,1,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,3,3,3,3,2,3,3,2,3,1,2,2,2,2,3,2,3,1,1,2,2,1,2,2,1,1,0,2,2,2,2,
0,1,0,1,2,2,0,0,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,1,0,
3,0,0,1,1,0,1,0,0,1,1,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,1,2,2,0,
0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,0,1,0,1,0,1,1,0,1,1,0,0,0,1,1,0,1,1,1,0,0,0,0,0,0,1,0,0,0,0,0,
0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,0,0,0,1,1,0,1,0,1,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,
3,2,2,1,2,2,2,2,2,2,2,1,2,2,1,2,2,1,1,1,1,1,1,1,1,2,1,1,0,3,3,3,
0,3,0,2,2,2,2,0,0,1,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,
2,2,2,3,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,1,2,2,1,2,2,2,1,1,1,2,0,1,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,2,2,2,2,2,2,2,2,2,2,1,2,2,2,2,2,2,2,2,2,2,2,0,2,2,0,0,0,0,0,0,
0,0,0,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,3,1,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,2,1,2,1,0,2,1,0,
0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,1,1,1,1,1,1,1,1,1,1,0,0,1,1,1,1,0,1,1,1,1,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,
0,3,1,1,2,2,2,2,2,1,2,2,2,1,1,2,2,2,2,2,2,2,1,2,2,1,0,1,1,1,1,0,
0,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
3,2,1,1,1,1,2,1,1,2,1,0,1,1,1,1,1,1,1,1,1,1,1,0,1,0,0,0,0,0,0,0,
0,0,2,0,0,0,0,0,0,0,0,1,1,0,0,0,0,1,1,0,0,1,1,0,0,0,0,0,0,1,0,0,
2,1,1,2,2,2,2,2,2,2,2,2,2,2,1,2,2,2,2,2,1,2,1,2,1,1,1,1,0,0,0,0,
0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1,2,1,2,2,2,2,2,2,2,2,2,2,1,2,1,2,1,1,2,1,1,1,2,1,2,1,2,0,1,0,1,
0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,3,1,2,2,2,1,2,2,2,2,2,2,2,2,1,2,1,1,1,1,1,1,2,1,2,1,1,0,1,0,1,
0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,1,2,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,2,2,
0,2,0,1,2,2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,
3,0,0,0,1,0,0,0,0,0,0,0,0,0,0,1,0,1,0,0,0,1,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,1,1,1,1,1,1,1,0,1,1,0,1,0,0,1,0,0,1,0,0,0,0,0,1,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,2,0,1,1,1,0,1,0,0,0,1,1,0,1,1,0,0,0,0,0,1,1,0,0,
0,1,1,1,2,1,2,2,2,0,2,0,2,0,1,1,2,1,1,1,1,2,1,0,1,1,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,
1,0,1,0,0,0,0,0,1,0,1,2,2,0,1,0,0,1,1,2,2,1,2,0,2,0,0,0,1,2,0,1,
2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,2,0,2,1,2,0,2,0,0,1,1,1,1,1,1,0,1,0,0,0,1,0,0,1,
2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,1,0,0,0,0,0,1,0,2,1,1,0,1,0,0,1,1,1,2,2,0,0,1,0,0,0,1,0,0,1,
1,1,2,1,0,1,1,1,0,1,0,1,1,1,1,0,0,0,1,0,1,0,0,0,0,0,0,0,0,2,2,1,
0,2,0,1,2,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,1,0,0,1,0,1,1,1,1,0,0,0,0,0,1,0,0,0,0,1,1,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1,1,1,1,1,1,1,1,1,2,1,0,1,1,1,1,1,1,1,1,1,1,1,0,1,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,1,1,1,0,0,0,0,1,1,1,0,1,1,0,1,0,0,0,1,1,0,1,
2,0,1,0,1,0,1,0,0,1,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,1,0,1,1,1,0,1,0,0,1,1,2,1,1,2,0,1,0,0,0,1,1,0,1,
1,0,0,1,0,0,1,0,0,0,1,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,1,0,1,1,2,0,1,0,0,0,0,2,1,1,2,0,2,0,0,0,1,1,0,1,
1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,1,0,2,1,1,0,1,0,0,2,2,1,2,1,1,0,1,0,0,0,1,1,0,1,
2,0,1,0,0,1,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,1,2,2,0,0,0,0,0,1,1,0,1,0,0,1,0,0,0,0,1,0,1,
1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,1,2,2,0,0,0,0,2,1,1,1,0,2,1,1,0,0,0,2,1,0,1,
1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,1,0,1,1,2,0,1,0,0,1,1,0,2,1,1,0,1,0,0,0,1,1,0,1,
2,2,1,1,1,0,1,1,0,1,1,0,1,0,0,0,0,0,0,1,0,0,0,1,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,1,0,2,1,1,0,1,0,0,1,1,0,1,2,1,0,2,0,0,0,1,1,0,1,
2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,2,0,0,0,0,0,
0,1,0,0,2,0,2,1,1,0,1,0,1,0,0,1,0,0,0,0,1,0,0,0,1,0,0,0,0,0,1,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1,0,0,1,0,0,1,0,0,1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,1,0,1,1,2,0,1,0,0,1,1,1,0,1,0,0,1,0,0,0,1,0,0,1,
1,0,0,1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1,0,0,0,0,0,0,0,1,0,1,1,0,0,1,0,0,2,1,1,1,1,1,0,1,0,0,0,0,1,0,1,
0,1,1,1,2,1,1,1,1,0,1,1,1,1,1,1,1,1,1,1,1,1,0,1,1,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
1,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,
0,0,0,0,0,0,0,0,0,0,1,2,1,0,0,0,0,0,1,1,1,1,1,0,1,0,0,0,1,1,0,0,
)
Win1255HebrewModel = {
'charToOrderMap': win1255_CharToOrderMap,
'precedenceMatrix': HebrewLangModel,
'mTypicalPositiveRatio': 0.984004,
'keepEnglishLetter': False,
'charsetName': "windows-1255"
}
# flake8: noqa
| gpl-3.0 |
frewsxcv/servo | tests/wpt/css-tests/tools/html5lib/html5lib/filters/lint.py | 979 | 4306 | from __future__ import absolute_import, division, unicode_literals
from gettext import gettext
_ = gettext
from . import _base
from ..constants import cdataElements, rcdataElements, voidElements
from ..constants import spaceCharacters
spaceCharacters = "".join(spaceCharacters)
class LintError(Exception):
pass
class Filter(_base.Filter):
def __iter__(self):
open_elements = []
contentModelFlag = "PCDATA"
for token in _base.Filter.__iter__(self):
type = token["type"]
if type in ("StartTag", "EmptyTag"):
name = token["name"]
if contentModelFlag != "PCDATA":
raise LintError(_("StartTag not in PCDATA content model flag: %(tag)s") % {"tag": name})
if not isinstance(name, str):
raise LintError(_("Tag name is not a string: %(tag)r") % {"tag": name})
if not name:
raise LintError(_("Empty tag name"))
if type == "StartTag" and name in voidElements:
raise LintError(_("Void element reported as StartTag token: %(tag)s") % {"tag": name})
elif type == "EmptyTag" and name not in voidElements:
raise LintError(_("Non-void element reported as EmptyTag token: %(tag)s") % {"tag": token["name"]})
if type == "StartTag":
open_elements.append(name)
for name, value in token["data"]:
if not isinstance(name, str):
raise LintError(_("Attribute name is not a string: %(name)r") % {"name": name})
if not name:
raise LintError(_("Empty attribute name"))
if not isinstance(value, str):
raise LintError(_("Attribute value is not a string: %(value)r") % {"value": value})
if name in cdataElements:
contentModelFlag = "CDATA"
elif name in rcdataElements:
contentModelFlag = "RCDATA"
elif name == "plaintext":
contentModelFlag = "PLAINTEXT"
elif type == "EndTag":
name = token["name"]
if not isinstance(name, str):
raise LintError(_("Tag name is not a string: %(tag)r") % {"tag": name})
if not name:
raise LintError(_("Empty tag name"))
if name in voidElements:
raise LintError(_("Void element reported as EndTag token: %(tag)s") % {"tag": name})
start_name = open_elements.pop()
if start_name != name:
raise LintError(_("EndTag (%(end)s) does not match StartTag (%(start)s)") % {"end": name, "start": start_name})
contentModelFlag = "PCDATA"
elif type == "Comment":
if contentModelFlag != "PCDATA":
raise LintError(_("Comment not in PCDATA content model flag"))
elif type in ("Characters", "SpaceCharacters"):
data = token["data"]
if not isinstance(data, str):
raise LintError(_("Attribute name is not a string: %(name)r") % {"name": data})
if not data:
raise LintError(_("%(type)s token with empty data") % {"type": type})
if type == "SpaceCharacters":
data = data.strip(spaceCharacters)
if data:
raise LintError(_("Non-space character(s) found in SpaceCharacters token: %(token)r") % {"token": data})
elif type == "Doctype":
name = token["name"]
if contentModelFlag != "PCDATA":
raise LintError(_("Doctype not in PCDATA content model flag: %(name)s") % {"name": name})
if not isinstance(name, str):
raise LintError(_("Tag name is not a string: %(tag)r") % {"tag": name})
# XXX: what to do with token["data"] ?
elif type in ("ParseError", "SerializeError"):
pass
else:
raise LintError(_("Unknown token type: %(type)s") % {"type": type})
yield token
| mpl-2.0 |
jdsika/TUM_HOly | openrave/sympy/assumptions/handlers/calculus.py | 4 | 4242 | """
This module contains query handlers responsible for calculus queries:
infinitesimal, bounded, etc.
"""
from sympy.logic.boolalg import conjuncts
from sympy.assumptions import Q, ask
from sympy.assumptions.handlers import CommonHandler
class AskInfinitesimalHandler(CommonHandler):
"""
Handler for key 'infinitesimal'
Test that a given expression is equivalent to an infinitesimal
number
"""
@staticmethod
def _number(expr, assumptions):
# helper method
return expr.evalf() == 0
@staticmethod
def Basic(expr, assumptions):
if expr.is_number:
return AskInfinitesimalHandler._number(expr, assumptions)
@staticmethod
def Mul(expr, assumptions):
"""
Infinitesimal*Bounded -> Infinitesimal
"""
if expr.is_number:
return AskInfinitesimalHandler._number(expr, assumptions)
result = False
for arg in expr.args:
if ask(Q.infinitesimal(arg), assumptions):
result = True
elif ask(Q.bounded(arg), assumptions):
continue
else: break
else:
return result
Add, Pow = Mul, Mul
@staticmethod
def Number(expr, assumptions):
return expr == 0
NumberSymbol = Number
@staticmethod
def ImaginaryUnit(expr, assumptions):
return False
class AskBoundedHandler(CommonHandler):
"""
Handler for key 'bounded'.
Test that an expression is bounded respect to all its variables.
Example of usage:
>>> from sympy import Symbol, Q
>>> from sympy.assumptions.handlers.calculus import AskBoundedHandler
>>> from sympy.abc import x
>>> a = AskBoundedHandler()
>>> a.Symbol(x, Q.positive(x))
False
>>> a.Symbol(x, Q.bounded(x))
True
"""
@staticmethod
def Symbol(expr, assumptions):
"""
Handles Symbol.
Example:
>>> from sympy import Symbol, Q
>>> from sympy.assumptions.handlers.calculus import AskBoundedHandler
>>> from sympy.abc import x
>>> a = AskBoundedHandler()
>>> a.Symbol(x, Q.positive(x))
False
>>> a.Symbol(x, Q.bounded(x))
True
"""
if Q.bounded(expr) in conjuncts(assumptions):
return True
return False
@staticmethod
def Add(expr, assumptions):
"""
Bounded + Bounded -> Bounded
Unbounded + Bounded -> Unbounded
Unbounded + Unbounded -> ?
"""
result = True
for arg in expr.args:
_bounded = ask(Q.bounded(arg), assumptions)
if _bounded: continue
elif _bounded is None: return
elif _bounded is False:
if result: result = False
else: return
return result
Mul = Add
@staticmethod
def Pow(expr, assumptions):
"""
Unbounded ** Whatever -> Unbounded
Bounded ** Unbounded -> Unbounded if base > 1
Bounded ** Unbounded -> Unbounded if base < 1
"""
base_bounded = ask(Q.bounded(expr.base), assumptions)
if not base_bounded:
return base_bounded
if ask(Q.bounded(expr.exp), assumptions) and base_bounded:
return True
if base_bounded and expr.base.is_number:
# We need to implement relations for this
if abs(expr.base) > 1:
return False
return True
@staticmethod
def log(expr, assumptions):
return ask(Q.bounded(expr.args[0]), assumptions)
exp = log
@staticmethod
def sin(expr, assumptions):
return True
cos = sin
@staticmethod
def Number(expr, assumptions):
return True
@staticmethod
def Infinity(expr, assumptions):
return False
@staticmethod
def NegativeInfinity(expr, assumptions):
return False
@staticmethod
def Pi(expr, assumptions):
return True
@staticmethod
def Exp1(expr, assumptions):
return True
@staticmethod
def ImaginaryUnit(expr, assumptions):
return True
@staticmethod
def sign(expr, assumptions):
return True
| mit |
sanjeevtripurari/zookeeper | src/contrib/zkpython/src/examples/watch_znode_for_changes.py | 138 | 6544 | #!/usr/bin/env python2.6
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" ZNode Change Watcher Skeleton Script
This script shows you how to write a python program that watches a specific
znode for changes and reacts to them.
Steps to understand how this script works:
1. start a standalone ZooKeeper server (by default it listens on localhost:2181)
Did you know you can deploy "local clusters" by using zkconf[1]?
[1] http://github.com/phunt/zkconf
2. enter the command line console
3. create the test node:
[zk: (CONNECTED) 1] create /watch-test dummy-data
Created /watch-test
4. in another shell start this script in verbose mode
$ python watch_znode_for_changes.py -v
# you should see a lot of log messages. have a look over them because
# you can easily understand how zookeeper works
5. update the node data:
[zk: (CONNECTED) 2] set /watch-test new-data
cZxid = 0xa0000001a
ctime = Fri Jul 09 19:14:45 EEST 2010
mZxid = 0xa0000001e
mtime = Fri Jul 09 19:18:18 EEST 2010
pZxid = 0xa0000001a
cversion = 0
dataVersion = 1
aclVersion = 0
ephemeralOwner = 0x0
dataLength = 8
numChildren = 0
... and you should see similar log messages:
2010-07-09 19:18:18,537:11542(0xb6ea5b70):ZOO_DEBUG@process_completions@1765: Calling a watcher for node [/watch-test], type = -1 event=ZOO_CHANGED_EVENT
2010-07-09 19:18:18,537 watch_znode_for_changes.py:83 - Running watcher: zh=0 event=3 state=3 path=/watch-test
2010-07-09 19:18:18,537:11542(0xb6ea5b70):ZOO_DEBUG@zoo_awget@2400: Sending request xid=0x4c374b33 for path [/watch-test] to 127.0.0.1:2181
2010-07-09 19:18:18,545:11542(0xb76a6b70):ZOO_DEBUG@zookeeper_process@1980: Queueing asynchronous response
2010-07-09 19:18:18,545:11542(0xb6ea5b70):ZOO_DEBUG@process_completions@1772: Calling COMPLETION_DATA for xid=0x4c374b33 rc=0
2010-07-09 19:18:18,545 watch_znode_for_changes.py:54 - This is where your application does work.
You can repeat this step multiple times.
6. that's all. in the end you can delete the node and you should see a ZOO_DELETED_EVENT
"""
import logging
import logging.handlers
import signal
import sys
import time
import threading
import zookeeper
from optparse import OptionParser
logger = logging.getLogger()
class MyClass(threading.Thread):
znode = '/watch-test'
def __init__(self, options, args):
threading.Thread.__init__(self)
logger.debug('Initializing MyClass thread.')
if options.verbose:
zookeeper.set_debug_level(zookeeper.LOG_LEVEL_DEBUG)
self.zh = zookeeper.init(options.servers)
if zookeeper.OK != zookeeper.aget(self.zh, self.znode,
self.watcher, self.handler):
logger.critical('Unable to get znode! Exiting.')
sys.exit(1)
def __del__(self):
zookeeper.close(self.zh)
def aget(self):
return zookeeper.aget(self.zh, self.znode, self.watcher, self.handler)
def handler(self, zh, rc, data, stat):
"""Handle zookeeper.aget() responses.
This code handles the zookeeper.aget callback. It does not handle watches.
Numeric arguments map to constants. See ``DATA`` in ``help(zookeeper)``
for more information.
Args:
zh Zookeeper handle that made this request.
rc Return code.
data Data stored in the znode.
Does not provide a return value.
"""
if zookeeper.OK == rc:
logger.debug('This is where your application does work.')
else:
if zookeeper.NONODE == rc:
# avoid sending too many requests if the node does not yet exists
logger.info('Node not found. Trying again to set the watch.')
time.sleep(1)
if zookeeper.OK != self.aget():
logger.critical('Unable to get znode! Exiting.')
sys.exit(1)
def watcher(self, zh, event, state, path):
"""Handle zookeeper.aget() watches.
This code is called when a znode changes and triggers a data watch.
It is not called to handle the zookeeper.aget call itself.
Numeric arguments map to constants. See ``DATA`` in ``help(zookeeper)``
for more information.
Args:
zh Zookeeper handle that set this watch.
event Event that caused the watch (often called ``type`` elsewhere).
state Connection state.
path Znode that triggered this watch.
Does not provide a return value.
"""
out = ['Running watcher:',
'zh=%d' % zh,
'event=%d' % event,
'state=%d' % state,
'path=%s' % path]
logger.debug(' '.join(out))
if event == zookeeper.CHANGED_EVENT and \
state == zookeeper.CONNECTED_STATE and \
self.znode == path:
if zookeeper.OK != self.aget():
logger.critical('Unable to get znode! Exiting.')
sys.exit(1)
def run(self):
while True:
time.sleep(86400)
def main(argv=None):
# Allow Ctrl-C
signal.signal(signal.SIGINT, signal.SIG_DFL)
parser = OptionParser()
parser.add_option('-v', '--verbose',
dest='verbose',
default=False,
action='store_true',
help='Verbose logging. (default: %default)')
parser.add_option('-s', '--servers',
dest='servers',
default='localhost:2181',
help='Comma-separated list of host:port pairs. (default: %default)')
(options, args) = parser.parse_args()
if options.verbose:
logger.setLevel(logging.DEBUG)
else:
logger.setLevel(logging.INFO)
formatter = logging.Formatter("%(asctime)s %(filename)s:%(lineno)d - %(message)s")
stream_handler = logging.StreamHandler()
stream_handler.setFormatter(formatter)
logger.addHandler(stream_handler)
logger.info('Starting Zookeeper python example: %s' % ' '.join(sys.argv))
mc = MyClass(options, args)
mc.start()
mc.join()
if __name__ == '__main__':
main()
| apache-2.0 |
v-a/check_mk | web/htdocs/index.py | 1 | 13788 | #!/usr/bin/python
# -*- encoding: utf-8; py-indent-offset: 4 -*-
# +------------------------------------------------------------------+
# | ____ _ _ __ __ _ __ |
# | / ___| |__ ___ ___| | __ | \/ | |/ / |
# | | | | '_ \ / _ \/ __| |/ / | |\/| | ' / |
# | | |___| | | | __/ (__| < | | | | . \ |
# | \____|_| |_|\___|\___|_|\_\___|_| |_|_|\_\ |
# | |
# | Copyright Mathias Kettner 2013 mk@mathias-kettner.de |
# +------------------------------------------------------------------+
#
# This file is part of Check_MK.
# The official homepage is at http://mathias-kettner.de/check_mk.
#
# check_mk is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by
# the Free Software Foundation in version 2. check_mk is distributed
# in the hope that it will be useful, but WITHOUT ANY WARRANTY; with-
# out even the implied warranty of MERCHANTABILITY or FITNESS FOR A
# PARTICULAR PURPOSE. See the GNU General Public License for more de-
# ails. You should have received a copy of the GNU General Public
# License along with GNU Make; see the file COPYING. If not, write
# to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor,
# Boston, MA 02110-1301 USA.
# Prepare builtin-scope for localization function _()
import __builtin__
__builtin__._ = lambda x: x
__builtin__.current_language = None
# Load modules
from mod_python import apache
import sys, os, pprint
from lib import *
import livestatus
import defaults, config, login, userdb, hooks, default_permissions
from html_mod_python import *
# Load page handlers
pagehandlers = {}
pagehandlers_dir = defaults.web_dir + "/plugins/pages"
for fn in os.listdir(pagehandlers_dir):
if fn.endswith(".py"):
execfile(pagehandlers_dir + "/" + fn)
# prepare local-structure within OMD sites
if defaults.omd_root:
local_module_path = defaults.omd_root + "/local/share/check_mk/web/htdocs"
local_locale_path = defaults.omd_root + "/local/share/check_mk/locale"
if local_module_path not in sys.path:
sys.path[0:0] = [ local_module_path, defaults.web_dir + "/htdocs" ]
local_pagehandlers_dir = defaults.omd_root + "/local/share/check_mk/web/plugins/pages"
if os.path.exists(local_pagehandlers_dir):
for fn in os.listdir(local_pagehandlers_dir):
if fn.endswith(".py"):
execfile(local_pagehandlers_dir + "/" + fn)
# Call the load_plugins() function in all modules
def load_all_plugins():
for module in [ hooks, userdb, views, sidebar, dashboard, wato, bi, mobile, notify ]:
try:
module.load_plugins # just check if this function exists
module.load_plugins()
except AttributeError:
pass
except Exception:
raise
__builtin__.load_all_plugins = load_all_plugins
# Main entry point for all HTTP-requests (called directly by mod_apache)
def handler(req, fields = None, profiling = True):
req.content_type = "text/html; charset=UTF-8"
req.header_sent = False
# Create an object that contains all data about the request and
# helper functions for creating valid HTML. Parse URI and
# store results in the request object for later usage.
html = html_mod_python(req, fields)
html.enable_debug = config.debug
html.id = {} # create unique ID for this request
__builtin__.html = html
response_code = apache.OK
try:
# Ajax-Functions want no HTML output in case of an error but
# just a plain server result code of 500
fail_silently = html.has_var("_ajaxid")
# Webservice functions may decide to get a normal result code
# but a text with an error message in case of an error
plain_error = html.has_var("_plain_error")
config.load_config() # load multisite.mk
if html.var("debug"): # Debug flag may be set via URL
config.debug = True
html.set_buffering(config.buffered_http_stream)
# profiling can be enabled in multisite.mk
if profiling and config.profile:
import cProfile # , pstats, sys, StringIO, tempfile
# the profiler looses the memory about all modules. We need to hand over
# the request object in the apache module.
# Ubuntu: install python-profiler when using this feature
profilefile = defaults.var_dir + "/web/multisite.profile"
retcode = cProfile.runctx(
"import index; "
"index.handler(profile_req, profile_fields, False)",
{'profile_req': req, 'profile_fields': html.fields}, {}, profilefile)
file(profilefile + ".py", "w").write(
"#!/usr/bin/python\n"
"import pstats\n"
"stats = pstats.Stats(%r)\n"
"stats.sort_stats('time').print_stats()\n" % profilefile)
os.chmod(profilefile + ".py", 0755)
release_all_locks()
return apache.OK
# Make sure all plugins are avaiable as early as possible. At least
# we need the plugins (i.e. the permissions declared in these) at the
# time before the first login for generating auth.php.
load_all_plugins()
# Detect mobile devices
if html.has_var("mobile"):
html.mobile = not not html.var("mobile")
else:
user_agent = html.req.headers_in.get('User-Agent', '')
html.mobile = mobile.is_mobile(user_agent)
# Redirect to mobile GUI if we are a mobile device and
# the URL is /
if html.myfile == "index" and html.mobile:
html.myfile = "mobile"
# Get page handler.
handler = pagehandlers.get(html.myfile, page_not_found)
# First initialization of the default permissions. Needs to be done before the auth_file
# (auth.php) ist written (it's done during showing the login page for the first time).
# Must be loaded before the "automation" call to have the general.* permissions available
# during automation action processing (e.g. hooks triggered by restart)
default_permissions.load()
# Special handling for automation.py. Sorry, this must be hardcoded
# here. Automation calls bybass the normal authentication stuff
if html.myfile == "automation":
try:
handler()
except Exception, e:
html.write(str(e))
release_all_locks()
return apache.OK
# Prepare output format
output_format = html.var("output_format", "html")
html.set_output_format(output_format)
# Is the user set by the webserver? otherwise use the cookie based auth
if not html.user or type(html.user) != str:
config.auth_type = 'cookie'
# When not authed tell the browser to ask for the password
html.user = login.check_auth()
if html.user == '':
if fail_silently:
# While api call don't show the login dialog
raise MKUnauthenticatedException(_('You are not authenticated.'))
# Redirect to the login-dialog with the current url as original target
# Never render the login form directly when accessing urls like "index.py"
# or "dashboard.py". This results in strange problems.
if html.myfile != 'login':
html.set_http_header('Location',
defaults.url_prefix + 'check_mk/login.py?_origtarget=%s' %
html.urlencode(html.makeuri([])))
raise apache.SERVER_RETURN, apache.HTTP_MOVED_TEMPORARILY
# Initialize the i18n for the login dialog. This might be overridden
# later after user login
load_language(html.var("lang", config.get_language()))
# This either displays the login page or validates the information submitted
# to the login form. After successful login a http redirect to the originally
# requested page is performed.
login.page_login(plain_error)
release_all_locks()
return apache.OK
# Call userdb page hooks which are executed on a regular base to e.g. syncronize
# information withough explicit user triggered actions
userdb.hook_page()
# Set all permissions, read site config, and similar stuff
config.login(html.user)
html.load_help_visible()
# Initialize the multiste i18n. This will be replaced by
# language settings stored in the user profile after the user
# has been initialized
load_language(html.var("lang", config.get_language()))
# All plugins might have to be reloaded due to a language change
load_all_plugins()
# Reload default permissions (maybe reload due to language change)
default_permissions.load()
# User allowed to login at all?
if not config.may("general.use"):
reason = _("You are not authorized to use Check_MK Multisite. Sorry. "
"You are logged in as <b>%s</b>.") % config.user_id
if len(config.user_role_ids):
reason += _("Your roles are <b>%s</b>. " % ", ".join(config.user_role_ids))
else:
reason += _("<b>You do not have any roles.</b> ")
reason += _("If you think this is an error, "
"please ask your administrator to check the permissions configuration.")
if config.auth_type == 'cookie':
reason += _('<p>You have been logged out. Please reload the page to re-authenticate.</p>')
login.del_auth_cookie()
raise MKAuthException(reason)
handler()
except MKUserError, e:
if plain_error:
html.write(_("User error") + ": %s\n" % e)
elif not fail_silently:
html.header("Invalid User Input")
html.show_error(unicode(e))
html.footer()
except MKAuthException, e:
if plain_error:
html.write(_("Authentication error") + ": %s\n" % e)
elif not fail_silently:
html.header(_("Permission denied"))
html.show_error(unicode(e))
html.footer()
except MKUnauthenticatedException, e:
if plain_error:
html.write(_("Missing authentication credentials") + ": %s\n" % e)
elif not fail_silently:
html.header(_("Not authenticated"))
html.show_error(unicode(e))
html.footer()
response_code = apache.HTTP_UNAUTHORIZED
except MKConfigError, e:
if plain_error:
html.write(_("Configuration error") + ": %s\n" % e)
elif not fail_silently:
html.header(_("Configuration Error"))
html.show_error(unicode(e))
html.footer()
apache.log_error(_("Configuration error: %s") % (e,), apache.APLOG_ERR)
except MKGeneralException, e:
if plain_error:
html.write(_("General error") + ": %s\n" % e)
elif not fail_silently:
html.header(_("Error"))
html.show_error(unicode(e))
html.footer()
# apache.log_error(_("Error: %s") % (e,), apache.APLOG_ERR)
except livestatus.MKLivestatusNotFoundError, e:
if plain_error:
html.write(_("Livestatus-data not found") + ": %s\n" % e)
elif not fail_silently:
html.header(_("Data not found"))
html.show_error(_("The following query produced no output:\n<pre>\n%s</pre>\n") % \
e.query)
html.footer()
response_code = apache.HTTP_NOT_FOUND
except livestatus.MKLivestatusException, e:
if plain_error:
html.write(_("Livestatus problem") + ": %s\n" % e)
elif not fail_silently:
html.header(_("Livestatus problem"))
html.show_error(_("Livestatus problem: %s") % e)
html.footer()
else:
response_code = apache.HTTP_BAD_GATEWAY
except (apache.SERVER_RETURN,
(apache.SERVER_RETURN, apache.HTTP_UNAUTHORIZED),
(apache.SERVER_RETURN, apache.HTTP_MOVED_TEMPORARILY)):
release_all_locks()
html.live = None
raise
except Exception, e:
html.unplug()
apache.log_error("%s %s %s" % (req.uri, _('Internal error') + ':', e), apache.APLOG_ERR) # log in all cases
if plain_error:
html.write(_("Internal error") + ": %s\n" % e)
elif not fail_silently:
html.header(_("Internal error"))
if config.debug:
html.show_error("%s: %s<pre>%s</pre>" %
(_('Internal error') + ':', e, format_exception()))
else:
url = html.makeuri([("debug", "1")])
html.show_error("%s: %s (<a href=\"%s\">%s</a>)" % (_('Internal error') + ':', e, url, _('Retry with debug mode')))
html.footer()
response_code = apache.OK
release_all_locks()
html.live = None # disconnects from livestatus
return response_code
def page_not_found():
if html.has_var("_plain_error"):
html.write(_("Page not found"))
else:
html.header(_("Page not found"))
html.show_error(_("This page was not found. Sorry."))
html.footer()
| gpl-2.0 |
Hasimir/brython | www/src/Lib/site-packages/simpleaio/coroutines.py | 5 | 1700 | import functools
from .futures import *
CO_GENERATOR = 0x20
def _gen():
yield 10
_GEN_TYPE = type(_gen())
def isgenerator_function(object):
return bool(hasattr(object,'__code__') and (object.__code__.co_flags & CO_GENERATOR))
def iscoroutinefunction(func):
"""Return True if func is a decorated coroutine function."""
return getattr(func, '_is_coroutine', False)
def isgenerator(object):
return isinstance(object, _GEN_TYPE)
def iscoroutine(obj):
"""Return True if obj is a coroutine object."""
return isgenerator(obj) or iscoroutinefunction(obj)
def coroutine(func):
"""Decorator to mark coroutines."""
if isgenerator_function(func):
coro = func
else:
@functools.wraps(func)
def coro(*args, **kw):
res = func(*args, **kw)
if isinstance(res, futures.Future) or isgenerator(res):
res = yield from res
res.gi_frame = None
return res
coro.gi_frame = None
coro._is_coroutine = True # For iscoroutinefunction().
return coro
def _format_coroutine(coro):
try:
assert iscoroutine(coro)
coro_name = getattr(coro, '__qualname__', coro.__name__)
filename = coro.__code__.co_filename
if coro.gi_frame is not None:
lineno = coro.gi_frame.f_lineno
coro_repr = ('%s() running at %s:%s'
% (coro_name, filename, lineno))
else:
lineno = coro.__code__.co_firstlineno
coro_repr = ('%s() done, defined at %s:%s'
% (coro_name, filename, lineno))
except:
coro_repr = "Coroutine: %s" % coro_name
return coro_repr
| bsd-3-clause |
cmelange/ansible | lib/ansible/modules/cloud/vmware/vmware_maintenancemode.py | 27 | 6009 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2015, VMware, Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: vmware_maintenancemode
short_description: Place a host into maintenance mode
description:
- Place an ESXI host into maintenance mode
- Support for VSAN compliant maintenance mode when selected
author: "Jay Jahns <jjahns@vmware.com>"
version_added: "2.1"
notes:
- Tested on vSphere 5.5 and 6.0
requirements:
- "python >= 2.6"
- PyVmomi
options:
esxi_hostname:
description:
- Name of the host as defined in vCenter
required: True
vsan_mode:
description:
- Specify which VSAN compliant mode to enter
choices:
- 'ensureObjectAccessibility'
- 'evacuateAllData'
- 'noAction'
required: False
evacuate:
description:
- If True, evacuate all powered off VMs
choices:
- True
- False
default: False
required: False
timeout:
description:
- Specify a timeout for the operation
required: False
default: 0
state:
description:
- Enter or exit maintenance mode
choices:
- present
- absent
default: present
required: False
extends_documentation_fragment: vmware.documentation
'''
EXAMPLES = '''
- name: Enter VSAN-Compliant Maintenance Mode
local_action:
module: vmware_maintenancemode
hostname: vc_host
username: vc_user
password: vc_pass
esxi_hostname: esxi.host.example
vsan: ensureObjectAccessibility
evacuate: yes
timeout: 3600
state: present
'''
RETURN = '''
hostsystem:
description: Name of vim reference
returned: always
type: string
sample: "'vim.HostSystem:host-236'"
hostname:
description: Name of host in vCenter
returned: always
type: string
sample: "esxi.local.domain"
status:
description: Action taken
return: always
type: string
sample: "ENTER"
'''
try:
from pyVmomi import vim
HAS_PYVMOMI = True
except ImportError:
HAS_PYVMOMI = False
def EnterMaintenanceMode(module, host):
if host.runtime.inMaintenanceMode:
module.exit_json(
changed=False,
hostsystem=str(host),
hostname=module.params['esxi_hostname'],
status='NO_ACTION',
msg='Host already in maintenance mode')
spec = vim.host.MaintenanceSpec()
if module.params['vsan']:
spec.vsanMode = vim.vsan.host.DecommissionMode()
spec.vsanMode.objectAction = module.params['vsan']
try:
task = host.EnterMaintenanceMode_Task(
module.params['timeout'],
module.params['evacuate'],
spec)
success, result = wait_for_task(task)
return dict(changed=success,
hostsystem=str(host),
hostname=module.params['esxi_hostname'],
status='ENTER',
msg='Host entered maintenance mode')
except TaskError:
module.fail_json(
msg='Host failed to enter maintenance mode')
def ExitMaintenanceMode(module, host):
if not host.runtime.inMaintenanceMode:
module.exit_json(
changed=False,
hostsystem=str(host),
hostname=module.params['esxi_hostname'],
status='NO_ACTION',
msg='Host not in maintenance mode')
try:
task = host.ExitMaintenanceMode_Task(
module.params['timeout'])
success, result = wait_for_task(task)
return dict(changed=success,
hostsystem=str(host),
hostname=module.params['esxi_hostname'],
status='EXIT',
msg='Host exited maintenance mode')
except TaskError:
module.fail_json(
msg='Host failed to exit maintenance mode')
def main():
spec = vmware_argument_spec()
spec.update(dict(
esxi_hostname=dict(required=True),
vsan=dict(required=False, choices=['ensureObjectAccessibility',
'evacuateAllData',
'noAction']),
evacuate=dict(required=False, type='bool', default=False),
timeout=dict(required=False, default=0, type='int'),
state=dict(required=False,
default='present',
choices=['present', 'absent'])))
module = AnsibleModule(argument_spec=spec)
if not HAS_PYVMOMI:
module.fail_json(msg='pyvmomi is required for this module')
content = connect_to_api(module)
host = find_hostsystem_by_name(content, module.params['esxi_hostname'])
if not host:
module.fail_json(
msg='Host not found in vCenter')
if module.params['state'] == 'present':
result = EnterMaintenanceMode(module, host)
elif module.params['state'] == 'absent':
result = ExitMaintenanceMode(module, host)
module.exit_json(**result)
from ansible.module_utils.basic import *
from ansible.module_utils.vmware import *
if __name__ == '__main__':
main()
| gpl-3.0 |
mohanprasath/Course-Work | data_analysis/uh_data_analysis_with_python/hy-data-analysis-with-python-spring-2020/part02-e04_word_frequencies/tmc/result.py | 91 | 1502 | from unittest.runner import TextTestResult
from .points import _parse_points, _name_test
import atexit
import json
import traceback
results = []
class TMCResult(TextTestResult):
def __init__(self, stream, descriptions, verbosity):
super(TMCResult, self).__init__(stream, descriptions, verbosity)
def startTest(self, test):
super(TMCResult, self).startTest(test)
def addSuccess(self, test):
super(TMCResult, self).addSuccess(test)
self.addResult(test, 'passed')
def addFailure(self, test, err):
super(TMCResult, self).addFailure(test, err)
self.addResult(test, 'failed', err)
def addError(self, test, err):
super(TMCResult, self).addError(test, err)
self.addResult(test, 'errored', err)
def addResult(self, test, status, err=None):
points = _parse_points(test)
message = ""
backtrace = []
if err is not None:
message = str(err[1])
backtrace = traceback.format_tb(err[2])
details = {
'name': _name_test(test),
'status': status,
'message': message,
'passed': status == 'passed',
'points': points,
'backtrace': backtrace
}
results.append(details)
# TODO: Do not do this if not using TMCTestRunner
@atexit.register
def write_output():
with open('.tmc_test_results.json', 'w') as f:
json.dump(results, f, ensure_ascii=False)
| gpl-3.0 |
unicefuganda/rapidsms-bednets | bednets/view_helpers.py | 1 | 3046 | import datetime
import xlwt
from time import strftime
from mtrack.utils import write_xls
from django.http import HttpResponse
from django.db import connection
def generate_excel_response(data, headings):
book = xlwt.Workbook(encoding="utf8")
write_xls(sheet_name="BedNets Report", headings=headings, data=data, book=book,cell_red_if_value="")
response = HttpResponse(mimetype="application/vnd.ms-excel")
fname_prefix = datetime.date.today().strftime('%Y%m%d') + "-" + strftime('%H%M%S')
response["Content-Disposition"] = 'attachment; filename=%s_bednet_report.xls' % fname_prefix
book.save(response)
return response
def generate_multiple_excel_sheets_response(sent_data,received_data,dist_data,sent_headings,recv_headings):
book = xlwt.Workbook(encoding="utf8")
write_xls(sheet_name="Sent Report", headings=sent_headings, data=sent_data, book=book,cell_red_if_value=True)
write_xls(sheet_name="Received Report", headings=recv_headings, data=received_data, book=book,cell_red_if_value=True)
write_xls(sheet_name="Distributed Report", headings=recv_headings, data=dist_data, book=book,cell_red_if_value=True)
response = HttpResponse(mimetype="application/vnd.ms-excel")
fname_prefix = datetime.date.today().strftime('%Y%m%d') + "-" + strftime('%H%M%S')
response["Content-Disposition"] = 'attachment; filename=%s_bednet_report.xls' % fname_prefix
book.save(response)
return response
def replace_zero_with_empty_string(data):
for index,value_list in enumerate(data):
data_builder = []
for key,item in enumerate(value_list):
if key==0:
data_builder.append(item)
elif key==1 or key==6:
data_builder.append(" " if item==0 else item)
elif key==2 or key==3:
data_builder.append("" if item==0 else item)
elif key==4 :
data_builder.append("" if item==0 or value_list[2]=="" else item)
elif key==5 :
data_builder.append("" if item==0 or value_list[2]=="" or value_list[4]=="" else item)
has_recv_at_sc = True if value_list[1]>0 else False
data_builder = [" " if data_index>1 and has_recv_at_sc and item=="" else item for data_index,item in enumerate(data_builder) ]
data[index] = data_builder
return data
def execute_sql(sql):
cursor = connection.cursor()
cursor.execute(sql)
data = cursor.fetchall()
data = filter(None, data)
return data
def get_consolidated_data():
data = execute_sql("select sub_county,quantity_at_subcounty,quantity_sent_to_dp,distribution_point,quantity_received_at_dp,quantity_distributed_at_dp,in_stock from bednets_bednetsreport")
data = replace_zero_with_empty_string(data)
return data
def get_data_dump(keyword):
return execute_sql("select name,telephone,district,invalid_submission,invalid_reporter,number_of_bednets,at_location,from_location "
"from bednets_dumpreport where keyword='" + keyword + "'")
| bsd-3-clause |
alexhersh/calico | calico/etcddriver/test/test_hwm.py | 2 | 6347 | # -*- coding: utf-8 -*-
# Copyright 2015 Metaswitch Networks
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
test_hwm
~~~~~~~~
Tests for high water mark tracking function.
"""
import logging
from unittest import TestCase
from mock import Mock, call, patch
from calico.etcddriver import hwm
from calico.etcddriver.hwm import HighWaterTracker
_log = logging.getLogger(__name__)
class TestHighWaterTracker(TestCase):
def setUp(self):
self.hwm = HighWaterTracker()
def test_mainline(self):
# Test merging of updates between a snapshot with etcd_index 10 and
# updates coming in afterwards with indexes 11, 12, ...
# We use prefix "/a/$" because $ is not allowed in the trie so it
# implicitly tests encoding/decoding is being properly applied.
old_hwm = self.hwm.update_hwm("/a/$/c", 9) # Pre-snapshot
self.assertEqual(old_hwm, None)
old_hwm = self.hwm.update_hwm("/b/c/d", 9) # Pre-snapshot
self.assertEqual(old_hwm, None)
old_hwm = self.hwm.update_hwm("/j/c/d", 9) # Pre-snapshot
self.assertEqual(old_hwm, None)
self.assertEqual(len(self.hwm), 3)
# While merging a snapshot we track deletions.
self.hwm.start_tracking_deletions()
# Send in some keys from the snapshot.
old_hwm = self.hwm.update_hwm("/a/$/c", 10) # From snapshot
self.assertEqual(old_hwm, 9)
old_hwm = self.hwm.update_hwm("/a/$/d", 10) # From snapshot
self.assertEqual(old_hwm, None)
old_hwm = self.hwm.update_hwm("/d/e/f", 10) # From snapshot
self.assertEqual(old_hwm, None)
self.assertEqual(len(self.hwm), 5)
# This key is first seen in the event stream, so the snapshot version
# should be ignored.
old_hwm = self.hwm.update_hwm("/a/h/i", 11) # From events
self.assertEqual(old_hwm, None)
old_hwm = self.hwm.update_hwm("/a/h/i", 10) # From snapshot
self.assertEqual(old_hwm, 11)
old_hwm = self.hwm.update_hwm("/a/h/i", 12) # From events
self.assertEqual(old_hwm, 11) # Still 11, snapshot ignored.
self.assertEqual(len(self.hwm), 6)
# Then a whole subtree gets deleted by the events.
deleted_keys = self.hwm.store_deletion("/a/$", 13)
self.assertEqual(set(deleted_keys), set(["/a/$/c", "/a/$/d"]))
self.assertEqual(len(self.hwm), 4)
# But afterwards, we see a snapshot key within the subtree, it should
# be ignored.
old_hwm = self.hwm.update_hwm("/a/$/e", 10)
self.assertEqual(old_hwm, 13) # Returns the etcd_index of the delete.
# Then a new update from the event stream, recreates the directory.
old_hwm = self.hwm.update_hwm("/a/$/f", 14)
self.assertEqual(old_hwm, None)
self.assertEqual(len(self.hwm), 5)
# And subsequent updates are processed ignoring the delete.
old_hwm = self.hwm.update_hwm("/a/$/f", 15)
self.assertEqual(old_hwm, 14)
# However, snapshot updates from within the deleted subtree are still
# ignored.
old_hwm = self.hwm.update_hwm("/a/$/e", 10)
self.assertEqual(old_hwm, 13) # Returns the etcd_index of the delete.
old_hwm = self.hwm.update_hwm("/a/$/f", 10)
self.assertEqual(old_hwm, 13) # Returns the etcd_index of the delete.
old_hwm = self.hwm.update_hwm("/a/$/g", 10)
self.assertEqual(old_hwm, 13) # Returns the etcd_index of the delete.
self.assertEqual(len(self.hwm), 5)
# But ones outside the subtree ar not.
old_hwm = self.hwm.update_hwm("/f/g", 10)
self.assertEqual(old_hwm, None)
# And subsequent updates are processed ignoring the delete.
old_hwm = self.hwm.update_hwm("/a/$/f", 16)
self.assertEqual(old_hwm, 15)
# End of snapshot: we stop tracking deletions, which should free up the
# resources.
self.hwm.stop_tracking_deletions()
self.assertEqual(self.hwm._deletion_hwms, None)
# Then, subseqent updates should be handled normally.
old_hwm = self.hwm.update_hwm("/a/$/f", 17)
self.assertEqual(old_hwm, 16) # From previous event
old_hwm = self.hwm.update_hwm("/g/b/f", 18)
self.assertEqual(old_hwm, None) # Seen for the first time.
old_hwm = self.hwm.update_hwm("/d/e/f", 19)
self.assertEqual(old_hwm, 10) # From the snapshot.
self.assertEqual(len(self.hwm), 7)
# We should be able to find all the keys that weren't seen during
# the snapshot.
old_keys = self.hwm.remove_old_keys(10)
self.assertEqual(set(old_keys), set(["/b/c/d", "/j/c/d"]))
self.assertEqual(len(self.hwm), 5)
# They should now be gone from the index.
old_hwm = self.hwm.update_hwm("/b/c/d", 20)
self.assertEqual(old_hwm, None)
self.assertEqual(len(self.hwm), 6)
class TestKeyEncoding(TestCase):
def test_encode_key(self):
self.assert_enc_dec("/calico/v1/foo/bar", "/calico/v1/foo/bar/")
self.assert_enc_dec("/:_-./foo", "/:_-./foo/")
self.assert_enc_dec("/:_-.~/foo", "/:_-.%7E/foo/")
self.assert_enc_dec("/%/foo", "/%25/foo/")
self.assert_enc_dec(u"/\u01b1/foo", "/%C6%B1/foo/")
self.assertEqual(hwm.encode_key("/foo/"), "/foo/")
def assert_enc_dec(self, key, expected_encoding):
encoded = hwm.encode_key(key)
self.assertEqual(
encoded,
expected_encoding,
msg="Expected %r to encode as %r but got %r" %
(key, expected_encoding, encoded))
decoded = hwm.decode_key(encoded)
self.assertEqual(
decoded,
key,
msg="Expected %r to decode as %r but got %r" %
(encoded, key, decoded))
| apache-2.0 |
lukeiwanski/tensorflow | tensorflow/contrib/slim/python/slim/data/dataset.py | 163 | 2444 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Contains the definition of a Dataset.
A Dataset is a collection of several components: (1) a list of data sources
(2) a Reader class that can read those sources and returns possibly encoded
samples of data (3) a decoder that decodes each sample of data provided by the
reader (4) the total number of samples and (5) an optional dictionary mapping
the list of items returns to a description of those items.
Data can be loaded from a dataset specification using a dataset_data_provider:
dataset = CreateMyDataset(...)
provider = dataset_data_provider.DatasetDataProvider(
dataset, shuffle=False)
image, label = provider.get(['image', 'label'])
See slim.data.dataset_data_provider for additional examples.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
class Dataset(object):
"""Represents a Dataset specification."""
def __init__(self, data_sources, reader, decoder, num_samples,
items_to_descriptions, **kwargs):
"""Initializes the dataset.
Args:
data_sources: A list of files that make up the dataset.
reader: The reader class, a subclass of BaseReader such as TextLineReader
or TFRecordReader.
decoder: An instance of a data_decoder.
num_samples: The number of samples in the dataset.
items_to_descriptions: A map from the items that the dataset provides to
the descriptions of those items.
**kwargs: Any remaining dataset-specific fields.
"""
kwargs['data_sources'] = data_sources
kwargs['reader'] = reader
kwargs['decoder'] = decoder
kwargs['num_samples'] = num_samples
kwargs['items_to_descriptions'] = items_to_descriptions
self.__dict__.update(kwargs)
| apache-2.0 |
CredoReference/edx-platform | common/lib/xmodule/xmodule/partitions/partitions.py | 21 | 8474 | """Defines ``Group`` and ``UserPartition`` models for partitioning"""
from collections import namedtuple
from stevedore.extension import ExtensionManager
# We use ``id`` in this file as the IDs of our Groups and UserPartitions,
# which Pylint disapproves of.
# pylint: disable=redefined-builtin
# UserPartition IDs must be unique. The Cohort and Random UserPartitions (when they are
# created via Studio) choose an unused ID in the range of 100 (historical) to MAX_INT. Therefore the
# dynamic UserPartitionIDs must be under 100, and they have to be hard-coded to ensure
# they are always the same whenever the dynamic partition is added (since the UserPartition
# ID is stored in the xblock group_access dict).
ENROLLMENT_TRACK_PARTITION_ID = 50
MINIMUM_STATIC_PARTITION_ID = 100
class UserPartitionError(Exception):
"""
Base Exception for when an error was found regarding user partitions.
"""
pass
class NoSuchUserPartitionError(UserPartitionError):
"""
Exception to be raised when looking up a UserPartition by its ID fails.
"""
pass
class NoSuchUserPartitionGroupError(UserPartitionError):
"""
Exception to be raised when looking up a UserPartition Group by its ID fails.
"""
pass
class Group(namedtuple("Group", "id name")):
"""
An id and name for a group of students. The id should be unique
within the UserPartition this group appears in.
"""
# in case we want to add to this class, a version will be handy
# for deserializing old versions. (This will be serialized in courses)
VERSION = 1
def __new__(cls, id, name):
return super(Group, cls).__new__(cls, int(id), name)
def to_json(self):
"""
'Serialize' to a json-serializable representation.
Returns:
a dictionary with keys for the properties of the group.
"""
return {
"id": self.id,
"name": self.name,
"version": Group.VERSION
}
@staticmethod
def from_json(value):
"""
Deserialize a Group from a json-like representation.
Args:
value: a dictionary with keys for the properties of the group.
Raises TypeError if the value doesn't have the right keys.
"""
if isinstance(value, Group):
return value
for key in ("id", "name", "version"):
if key not in value:
raise TypeError("Group dict {0} missing value key '{1}'".format(
value, key))
if value["version"] != Group.VERSION:
raise TypeError("Group dict {0} has unexpected version".format(
value))
return Group(value["id"], value["name"])
# The Stevedore extension point namespace for user partition scheme plugins.
USER_PARTITION_SCHEME_NAMESPACE = 'openedx.user_partition_scheme'
class UserPartition(namedtuple("UserPartition", "id name description groups scheme parameters active")):
"""A named way to partition users into groups, primarily intended for
running experiments. It is expected that each user will be in at most one
group in a partition.
A Partition has an id, name, scheme, description, parameters, and a list
of groups. The id is intended to be unique within the context where these
are used. (e.g., for partitions of users within a course, the ids should
be unique per-course). The scheme is used to assign users into groups.
The parameters field is used to save extra parameters e.g., location of
the block in case of VerificationPartitionScheme.
Partitions can be marked as inactive by setting the "active" flag to False.
Any group access rule referencing inactive partitions will be ignored
when performing access checks.
"""
VERSION = 3
# The collection of user partition scheme extensions.
scheme_extensions = None
# The default scheme to be used when upgrading version 1 partitions.
VERSION_1_SCHEME = "random"
def __new__(cls, id, name, description, groups, scheme=None, parameters=None, active=True, scheme_id=VERSION_1_SCHEME): # pylint: disable=line-too-long
if not scheme:
scheme = UserPartition.get_scheme(scheme_id)
if parameters is None:
parameters = {}
return super(UserPartition, cls).__new__(cls, int(id), name, description, groups, scheme, parameters, active)
@staticmethod
def get_scheme(name):
"""
Returns the user partition scheme with the given name.
"""
# Note: we're creating the extension manager lazily to ensure that the Python path
# has been correctly set up. Trying to create this statically will fail, unfortunately.
if not UserPartition.scheme_extensions:
UserPartition.scheme_extensions = ExtensionManager(namespace=USER_PARTITION_SCHEME_NAMESPACE)
try:
scheme = UserPartition.scheme_extensions[name].plugin
except KeyError:
raise UserPartitionError("Unrecognized scheme '{0}'".format(name))
scheme.name = name
return scheme
def to_json(self):
"""
'Serialize' to a json-serializable representation.
Returns:
a dictionary with keys for the properties of the partition.
"""
return {
"id": self.id,
"name": self.name,
"scheme": self.scheme.name,
"description": self.description,
"parameters": self.parameters,
"groups": [g.to_json() for g in self.groups],
"active": bool(self.active),
"version": UserPartition.VERSION
}
@staticmethod
def from_json(value):
"""
Deserialize a Group from a json-like representation.
Args:
value: a dictionary with keys for the properties of the group.
Raises TypeError if the value doesn't have the right keys.
"""
if isinstance(value, UserPartition):
return value
for key in ("id", "name", "description", "version", "groups"):
if key not in value:
raise TypeError("UserPartition dict {0} missing value key '{1}'".format(value, key))
if value["version"] == 1:
# If no scheme was provided, set it to the default ('random')
scheme_id = UserPartition.VERSION_1_SCHEME
# Version changes should be backwards compatible in case the code
# gets rolled back. If we see a version number greater than the current
# version, we should try to read it rather than raising an exception.
elif value["version"] >= 2:
if "scheme" not in value:
raise TypeError("UserPartition dict {0} missing value key 'scheme'".format(value))
scheme_id = value["scheme"]
else:
raise TypeError("UserPartition dict {0} has unexpected version".format(value))
parameters = value.get("parameters", {})
active = value.get("active", True)
groups = [Group.from_json(g) for g in value["groups"]]
scheme = UserPartition.get_scheme(scheme_id)
if not scheme:
raise TypeError("UserPartition dict {0} has unrecognized scheme {1}".format(value, scheme_id))
if hasattr(scheme, "create_user_partition"):
return scheme.create_user_partition(
value["id"],
value["name"],
value["description"],
groups,
parameters,
active,
)
else:
return UserPartition(
value["id"],
value["name"],
value["description"],
groups,
scheme,
parameters,
active,
)
def get_group(self, group_id):
"""
Returns the group with the specified id.
Arguments:
group_id (int): ID of the partition group.
Raises:
NoSuchUserPartitionGroupError: The specified group could not be found.
"""
for group in self.groups:
if group.id == group_id:
return group
raise NoSuchUserPartitionGroupError(
"Could not find a Group with ID [{group_id}] in UserPartition [{partition_id}].".format(
group_id=group_id, partition_id=self.id
)
)
| agpl-3.0 |
40223142/2015cad0623 | static/Brython3.1.1-20150328-091302/Lib/_abcoll.py | 688 | 5155 | # Copyright 2007 Google, Inc. All Rights Reserved.
# Licensed to PSF under a Contributor Agreement.
"""Abstract Base Classes (ABCs) for collections, according to PEP 3119.
DON'T USE THIS MODULE DIRECTLY! The classes here should be imported
via collections; they are defined here only to alleviate certain
bootstrapping issues. Unit tests are in test_collections.
"""
from abc import ABCMeta, abstractmethod
import sys
__all__ = ["Hashable", "Iterable", "Iterator",
"Sized", "Container", "Callable",
"Set", "MutableSet",
"Mapping", "MutableMapping",
"MappingView", "KeysView", "ItemsView", "ValuesView",
"Sequence", "MutableSequence",
"ByteString",
]
"""
### collection related types which are not exposed through builtin ###
## iterators ##
#fixme brython
#bytes_iterator = type(iter(b''))
bytes_iterator = type(iter(''))
#fixme brython
#bytearray_iterator = type(iter(bytearray()))
#callable_iterator = ???
dict_keyiterator = type(iter({}.keys()))
dict_valueiterator = type(iter({}.values()))
dict_itemiterator = type(iter({}.items()))
list_iterator = type(iter([]))
list_reverseiterator = type(iter(reversed([])))
range_iterator = type(iter(range(0)))
set_iterator = type(iter(set()))
str_iterator = type(iter(""))
tuple_iterator = type(iter(()))
zip_iterator = type(iter(zip()))
## views ##
dict_keys = type({}.keys())
dict_values = type({}.values())
dict_items = type({}.items())
## misc ##
dict_proxy = type(type.__dict__)
"""
def abstractmethod(self):
return self
### ONE-TRICK PONIES ###
#class Iterable(metaclass=ABCMeta):
class Iterable:
@abstractmethod
def __iter__(self):
while False:
yield None
@classmethod
def __subclasshook__(cls, C):
if cls is Iterable:
if any("__iter__" in B.__dict__ for B in C.__mro__):
return True
return NotImplemented
#class Sized(metaclass=ABCMeta):
class Sized:
@abstractmethod
def __len__(self):
return 0
@classmethod
def __subclasshook__(cls, C):
if cls is Sized:
if any("__len__" in B.__dict__ for B in C.__mro__):
return True
return NotImplemented
#class Container(metaclass=ABCMeta):
class Container:
@abstractmethod
def __contains__(self, x):
return False
@classmethod
def __subclasshook__(cls, C):
if cls is Container:
if any("__contains__" in B.__dict__ for B in C.__mro__):
return True
return NotImplemented
### MAPPINGS ###
class Mapping(Sized, Iterable, Container):
@abstractmethod
def __getitem__(self, key):
raise KeyError
def get(self, key, default=None):
try:
return self[key]
except KeyError:
return default
def __contains__(self, key):
try:
self[key]
except KeyError:
return False
else:
return True
def keys(self):
return KeysView(self)
def items(self):
return ItemsView(self)
def values(self):
return ValuesView(self)
def __eq__(self, other):
if not isinstance(other, Mapping):
return NotImplemented
return dict(self.items()) == dict(other.items())
def __ne__(self, other):
return not (self == other)
class MutableMapping(Mapping):
@abstractmethod
def __setitem__(self, key, value):
raise KeyError
@abstractmethod
def __delitem__(self, key):
raise KeyError
__marker = object()
def pop(self, key, default=__marker):
try:
value = self[key]
except KeyError:
if default is self.__marker:
raise
return default
else:
del self[key]
return value
def popitem(self):
try:
key = next(iter(self))
except StopIteration:
raise KeyError
value = self[key]
del self[key]
return key, value
def clear(self):
try:
while True:
self.popitem()
except KeyError:
pass
def update(*args, **kwds):
if len(args) > 2:
raise TypeError("update() takes at most 2 positional "
"arguments ({} given)".format(len(args)))
elif not args:
raise TypeError("update() takes at least 1 argument (0 given)")
self = args[0]
other = args[1] if len(args) >= 2 else ()
if isinstance(other, Mapping):
for key in other:
self[key] = other[key]
elif hasattr(other, "keys"):
for key in other.keys():
self[key] = other[key]
else:
for key, value in other:
self[key] = value
for key, value in kwds.items():
self[key] = value
def setdefault(self, key, default=None):
try:
return self[key]
except KeyError:
self[key] = default
return default
#MutableMapping.register(dict)
| gpl-3.0 |
vbelakov/h2o | py/testdir_multi_jvm/notest_import_covtype_parse_2jvm_fvec.py | 9 | 1398 | import unittest, sys, random, time
sys.path.extend(['.','..','../..','py'])
import h2o, h2o_browse as h2b, h2o_import as h2i
class Basic(unittest.TestCase):
def tearDown(self):
h2o.check_sandbox_for_errors()
@classmethod
def setUpClass(cls):
pass
print "Will build clouds with incrementing heap sizes and import folder/parse"
@classmethod
def tearDownClass(cls):
h2o.tear_down_cloud()
def test_import_covtype_parse_2jvm_fvec(self):
csvFilename = "covtype.data"
importFolderPath = "standard"
trialMax = 2
for tryHeap in [1]:
print "\n", tryHeap,"GB heap, 2 jvms, import folder, then loop parsing 'covtype.data' to unique keys"
h2o.init(2, java_heap_GB=tryHeap)
for trial in range(trialMax):
# import each time, because h2o deletes source file after parse
csvPathname = importFolderPath + "/" + csvFilename
hex_key = csvFilename + "_" + str(trial) + ".hex"
parseResult = h2i.import_parse(bucket='home-0xdiag-datasets', path=csvPathname, hex_key=hex_key, timeoutSecs=20)
# sticky ports?
h2o.tear_down_cloud()
time.sleep(3)
# print "Waiting 60 secs for TIME_WAIT sockets to go away"
# time.sleep(60)
if __name__ == '__main__':
h2o.unit_main()
| apache-2.0 |
stinebuu/nest-simulator | pynest/nest/tests/test_weights_as_lists.py | 10 | 5009 | # -*- coding: utf-8 -*-
#
# test_weights_as_lists.py
#
# This file is part of NEST.
#
# Copyright (C) 2004 The NEST Initiative
#
# NEST is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# NEST is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NEST. If not, see <http://www.gnu.org/licenses/>.
"""
Weights given as lists with the different connection rules
"""
import unittest
import nest
@nest.ll_api.check_stack
class WeightsAsListTestCase(unittest.TestCase):
"""Test weights given as lists"""
def setUp(self):
nest.ResetKernel()
def test_OneToOneWeight(self):
"""Weight given as list, when connection rule is one_to_one"""
src = nest.Create('iaf_psc_alpha', 3)
tgt = nest.Create('iaf_psc_delta', 3)
# weight has to be a list with dimension (n_sources x 1) when one_to_one is used
ref_weights = [1.2, -3.5, 0.4]
conn_dict = {'rule': 'one_to_one'}
syn_dict = {'weight': ref_weights}
nest.Connect(src, tgt, conn_dict, syn_dict)
conns = nest.GetConnections()
weights = conns.weight
self.assertEqual(weights, ref_weights)
def test_AllToAllWeight(self):
"""Weight given as list of lists, when connection rule is all_to_all"""
src = nest.Create('iaf_psc_alpha', 3)
tgt = nest.Create('iaf_psc_delta', 2)
# weight has to be a list of lists with dimension (n_target x n_sources) when all_to_all is used
ref_weights = [[1.2, -3.5, 2.5], [0.4, -0.2, 0.7]]
conn_dict = {'rule': 'all_to_all'}
syn_dict = {'weight': ref_weights}
nest.Connect(src, tgt, conn_dict, syn_dict)
conns = nest.GetConnections()
weights = conns.weight
# Need to flatten ref_weights in order to compare with the weights given by the SynapseCollection.
ref_weights = [w for sub_weights in ref_weights for w in sub_weights]
self.assertEqual(weights.sort(), ref_weights.sort())
def test_FixedIndegreeWeight(self):
"""Weight given as list of list, when connection rule is fixed_indegree"""
src = nest.Create('iaf_psc_alpha', 5)
tgt = nest.Create('iaf_psc_delta', 3)
# weight has to be a list of lists with dimension (n_target x indegree) when fixed_indegree is used
ref_weights = [[1.2, -3.5], [0.4, -0.2], [0.6, 2.2]]
conn_dict = {'rule': 'fixed_indegree', 'indegree': 2}
syn_dict = {'weight': ref_weights}
nest.Connect(src, tgt, conn_dict, syn_dict)
conns = nest.GetConnections()
weights = conns.weight
# Need to flatten ref_weights in order to compare with the weights given by the SynapseCollection.
ref_weights = [w for sub_weights in ref_weights for w in sub_weights]
self.assertEqual(weights.sort(), ref_weights.sort())
def test_FixedOutdegreeWeight(self):
"""Weight given as list of lists, when connection rule is fixed_outdegree"""
src = nest.Create('iaf_psc_alpha', 2)
tgt = nest.Create('iaf_psc_delta', 5)
# weight has to be a list of lists with dimension (n_source x outegree) when fixed_outdegree is used
ref_weights = [[1.2, -3.5, 0.4], [-0.2, 0.6, 2.2]]
conn_dict = {'rule': 'fixed_outdegree', 'outdegree': 3}
syn_dict = {'weight': ref_weights}
nest.Connect(src, tgt, conn_dict, syn_dict)
conns = nest.GetConnections()
weights = conns.weight
# Need to flatten ref_weights in order to compare with the weights given by the SynapseCollection.
ref_weights = [w for sub_weights in ref_weights for w in sub_weights]
self.assertEqual(weights.sort(), ref_weights.sort())
def test_FixedTotalNumberWeight(self):
"""Weight given as list, when connection rule is fixed_total_number"""
src = nest.Create('iaf_psc_alpha', 3)
tgt = nest.Create('iaf_psc_delta', 4)
conn_dict = {'rule': 'fixed_total_number', 'N': 4}
# weight has to be a list with dimension (n_conns x 1) when fixed_total_number is used
ref_weights = [1.2, -3.5, 0.4, -0.2]
syn_dict = {'weight': ref_weights}
nest.Connect(src, tgt, conn_dict, syn_dict)
conns = nest.GetConnections()
weights = conns.weight
self.assertEqual(weights.sort(), ref_weights.sort())
def suite():
suite = unittest.makeSuite(WeightsAsListTestCase, 'test')
return suite
def run():
runner = unittest.TextTestRunner(verbosity=2)
runner.run(suite())
if __name__ == "__main__":
run()
| gpl-2.0 |
EdisonAlgorithms/HackerRank | practice/ai/machine-learning/stockprediction/stockprediction.py | 3 | 1779 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Author: Zeyuan Shang
# @Date: 2016-01-13 14:50:43
# @Last Modified by: Zeyuan Shang
# @Last Modified time: 2016-01-13 15:12:39
# Usefull reading:
# http://stackoverflow.com/questions/1989992/predict-stock-market-values
import numpy as np
import math
def read_data():
m, k, d = map(float, raw_input().strip().split())
k, d = int(k), int(d)
names, owned, prices = [], [], []
for data in xrange(k):
line = raw_input().strip().split()
names.append(line[0])
owned.append(int(line[1]))
prices.append([float(i) for i in line[2:]])
return m, k, d, names, owned, prices
def stochastic_oscillator(prices, period):
min_p = prices[-period:].min()
max_p = prices[-period:].max()
if min_p == max_p:
return 0.
return abs(100. * (prices[-1] - min_p) / (max_p - min_p))
def print_transactions(m, k, d, names, owned, prices):
output = []
prices = np.array(prices)
deviations = prices.std(1)
to_buy = []
for i in reversed(np.argsort(deviations)):
sa = stochastic_oscillator(prices[i], 3)
if sa >= 80. and owned[i]:
output.append('{} {} {}'.format(names[i], 'SELL', owned[i]))
elif sa <= 20. and m:
to_buy.append((i, sa, prices[i][-1]))
for i, sa, price in to_buy:
num = int(m / int(math.ceil(price)))
if num:
output.append('{} {} {}'.format(names[i], 'BUY', num))
m -= num * int(math.ceil(price))
return output
def main():
m, k, d, names, owned, prices = read_data()
output = print_transactions(m, k, d, names, owned, prices)
print len(output)
for line in output:
print line
if __name__ == "__main__":
main() | mit |
twneale/hercules | hercules/dict.py | 1 | 5018 | import functools
from hercules.loop_interface import IteratorWrapperBase
class KeyClobberError(KeyError):
pass
class NoClobberDict(dict):
'''An otherwise ordinary dict that complains if you
try to overwrite any existing keys.
'''
KeyClobberError = KeyClobberError
def __setitem__(self, key, val):
if key in self:
msg = "Can't overwrite key %r in %r"
raise KeyClobberError(msg % (key, self))
else:
dict.__setitem__(self, key, val)
def update(self, otherdict=None, **kwargs):
if otherdict is not None:
dupes = set(otherdict) & set(self)
for dupe in dupes:
if self[dupe] != otherdict[dupe]:
msg = "Can't overwrite keys %r in %r"
raise KeyClobberError(msg % (dupes, self))
if kwargs:
for dupe in dupes:
if self[dupe] != otherdict[dupe]:
msg = "Can't overwrite keys %r in %r"
raise KeyClobberError(msg % (dupes, self))
dict.update(self, otherdict or {}, **kwargs)
# -----------------------------------------------------------------------------
# Dict filter class.
# -----------------------------------------------------------------------------
class NonExistentHandler(object):
'''Raise if someone tries a dunder query that isn't supported.
'''
class DictFilterMixin(object):
'''
listy = [dict(a=1), dict(a=2), dict(a=3)]
for dicty in DictFilter(listy).filter(a=1):
print dicty
'''
def filter(self, **kwargs):
'''Assumes all the dict's items are hashable.
'''
# So we don't return anything more than once.
yielded = set()
dunder = '__'
filter_items = set()
for k, v in kwargs.items():
if dunder in k:
k, op = k.split(dunder)
try:
handler = getattr(self, 'handle__%s' % op)
except AttributeError:
msg = '%s has no %r method to handle operator %r.'
raise NonExistentHandler(msg % (self, handler, op))
for dicty in self:
if handler(k, v, dicty):
dicty_id = id(dicty)
if dicty_id not in yielded:
yield dicty
yielded.add(dicty_id)
else:
filter_items.add((k, v))
for dicty in self:
dicty_items = set(dicty.items())
if filter_items.issubset(dicty_items):
yield dicty
def handle__in(self, key, value, dicty):
dicty_val = dicty[key]
return dicty_val in value
def handle__ne(self, key, value, dicty):
dicty_val = dicty[key]
return dicty_val != value
class IteratorDictFilter(IteratorWrapperBase, DictFilterMixin):
'''A dict filter that wraps an iterator.
'''
pass
def iterdict_filter(f):
@functools.wraps(f)
def wrapped(*args, **kwargs):
result = f(*args, **kwargs)
return IteratorDictFilter(result)
return wrapped
class DictSetDefault:
'''Context manager like getattr, but yields a default value,
and sets on the instance on exit:
with DictSetDefault(somedict, key, []) as attr:
attr.append('something')
print obj['something']
'''
def __init__(self, obj, key, default_val):
self.obj = obj
self.key = key
self.default_val = default_val
def __enter__(self):
val = self.obj.get(self.key, self.default_val)
self.val = val
return val
def __exit__(self, exc_type, exc_value, traceback):
self.obj[self.key] = self.val
class DictSetTemporary:
def __init__(self, dicty):
self.dicty = dicty
self.backup = {}
self.remove = set()
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, traceback):
'''Restore the mutated items to the previous state.
'''
dicty = self.dicty
for key, value in self.backup.items():
dicty[key] = value
for key in self.remove:
dicty.pop(key)
def __setitem__(self, key, value):
if key in self.dicty:
self.backup[key] = self.dicty.pop(key)
else:
self.remove.add(key)
self.dicty[key] = value
def __getitem__(self, key):
return self.dicty[key]
def __delitem__(self, key):
self.backup[key] = self.dicty.pop(key)
def update(self, dicty=None, **kwargs):
for dicty in (dicty or {}, kwargs):
for key, value in dicty.items():
if key in self.dicty:
self.backup[key] = self.dicty.pop(key)
else:
self.remove.add(key)
self.dicty[key] = value
def get(self, key, default=None):
return self.dicty.get(key, default)
| bsd-3-clause |
BrotherPhil/django | tests/utils_tests/test_safestring.py | 278 | 3677 | from __future__ import unicode_literals
from django.template import Context, Template
from django.test import SimpleTestCase
from django.utils import html, six, text
from django.utils.encoding import force_bytes, force_text
from django.utils.functional import lazy
from django.utils.safestring import (
EscapeData, SafeData, mark_for_escaping, mark_safe,
)
lazystr = lazy(force_text, six.text_type)
lazybytes = lazy(force_bytes, bytes)
class customescape(six.text_type):
def __html__(self):
# implement specific and obviously wrong escaping
# in order to be able to tell for sure when it runs
return self.replace('<', '<<').replace('>', '>>')
class SafeStringTest(SimpleTestCase):
def assertRenderEqual(self, tpl, expected, **context):
context = Context(context)
tpl = Template(tpl)
self.assertEqual(tpl.render(context), expected)
def test_mark_safe(self):
s = mark_safe('a&b')
self.assertRenderEqual('{{ s }}', 'a&b', s=s)
self.assertRenderEqual('{{ s|force_escape }}', 'a&b', s=s)
def test_mark_safe_object_implementing_dunder_html(self):
e = customescape('<a&b>')
s = mark_safe(e)
self.assertIs(s, e)
self.assertRenderEqual('{{ s }}', '<<a&b>>', s=s)
self.assertRenderEqual('{{ s|force_escape }}', '<a&b>', s=s)
def test_mark_safe_lazy(self):
s = lazystr('a&b')
b = lazybytes(b'a&b')
self.assertIsInstance(mark_safe(s), SafeData)
self.assertIsInstance(mark_safe(b), SafeData)
self.assertRenderEqual('{{ s }}', 'a&b', s=mark_safe(s))
def test_mark_safe_object_implementing_dunder_str(self):
class Obj(object):
def __str__(self):
return '<obj>'
s = mark_safe(Obj())
self.assertRenderEqual('{{ s }}', '<obj>', s=s)
def test_mark_safe_result_implements_dunder_html(self):
self.assertEqual(mark_safe('a&b').__html__(), 'a&b')
def test_mark_safe_lazy_result_implements_dunder_html(self):
self.assertEqual(mark_safe(lazystr('a&b')).__html__(), 'a&b')
def test_mark_for_escaping(self):
s = mark_for_escaping('a&b')
self.assertRenderEqual('{{ s }}', 'a&b', s=s)
self.assertRenderEqual('{{ s }}', 'a&b', s=mark_for_escaping(s))
def test_mark_for_escaping_object_implementing_dunder_html(self):
e = customescape('<a&b>')
s = mark_for_escaping(e)
self.assertIs(s, e)
self.assertRenderEqual('{{ s }}', '<<a&b>>', s=s)
self.assertRenderEqual('{{ s|force_escape }}', '<a&b>', s=s)
def test_mark_for_escaping_lazy(self):
s = lazystr('a&b')
b = lazybytes(b'a&b')
self.assertIsInstance(mark_for_escaping(s), EscapeData)
self.assertIsInstance(mark_for_escaping(b), EscapeData)
self.assertRenderEqual('{% autoescape off %}{{ s }}{% endautoescape %}', 'a&b', s=mark_for_escaping(s))
def test_mark_for_escaping_object_implementing_dunder_str(self):
class Obj(object):
def __str__(self):
return '<obj>'
s = mark_for_escaping(Obj())
self.assertRenderEqual('{{ s }}', '<obj>', s=s)
def test_add_lazy_safe_text_and_safe_text(self):
s = html.escape(lazystr('a'))
s += mark_safe('&b')
self.assertRenderEqual('{{ s }}', 'a&b', s=s)
s = html.escapejs(lazystr('a'))
s += mark_safe('&b')
self.assertRenderEqual('{{ s }}', 'a&b', s=s)
s = text.slugify(lazystr('a'))
s += mark_safe('&b')
self.assertRenderEqual('{{ s }}', 'a&b', s=s)
| bsd-3-clause |
AlphaStaxLLC/docker-compose-ui | scripts/requires_auth.py | 5 | 1293 | # inspired by http://flask.pocoo.org/snippets/8/
from functools import wraps
from flask import request, Response
import os
def authentication_enabled():
return os.environ.has_key('COMPOSE_USERNAME') and os.environ.has_key('COMPOSE_PASSWORD')
def disable_authentication():
del os.environ['COMPOSE_USERNAME']
del os.environ['COMPOSE_PASSWORD']
def set_authentication(username, password):
os.environ['COMPOSE_USERNAME'] = username
os.environ['COMPOSE_PASSWORD'] = password
def check_auth(username, password):
"""This function is called to check if a username /
password combination is valid.
"""
return os.getenv('COMPOSE_USERNAME') == username and os.getenv('COMPOSE_PASSWORD') == password
def authenticate():
"""Sends a 401 response that enables basic auth"""
return Response(
'Could not verify your access level for that URL.\n'
'You have to login with proper credentials', 401,
{'WWW-Authenticate': 'Basic realm="docker-compose-ui"'})
def requires_auth(f):
@wraps(f)
def decorated(*args, **kwargs):
auth = request.authorization
if not authentication_enabled() or (auth and check_auth(auth.username, auth.password)):
return f(*args, **kwargs)
return authenticate()
return decorated
| mit |
ageron/tensorflow | tensorflow/compiler/tests/lrn_ops_test.py | 13 | 4787 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for Local Response Normalization ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import copy
import numpy as np
from tensorflow.compiler.tests import xla_test
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gen_nn_ops
from tensorflow.python.ops import nn
from tensorflow.python.platform import googletest
CPU_DEVICE = "/job:localhost/replica:0/task:0/cpu:0"
# Local response normalization tests. The forward tests are copied from
# tensorflow/python/kernel_tests/lrn_op_test.py
class LRNTest(xla_test.XLATestCase):
def _LRN(self, input_image, lrn_depth_radius=5, bias=1.0, alpha=1.0,
beta=0.5):
"""Compute expected result."""
output = copy.deepcopy(input_image)
batch_size = input_image.shape[0]
rows = input_image.shape[1]
cols = input_image.shape[2]
depth = input_image.shape[3]
for b in range(batch_size):
for r in range(rows):
for c in range(cols):
for d in range(depth):
begin = max(0, d - lrn_depth_radius)
end = min(depth, d + lrn_depth_radius + 1)
patch = input_image[b, r, c, begin:end]
output[b, r, c, d] /= (
np.power(bias + alpha * np.sum(patch * patch), beta))
return output
def _RunAndVerify(self, dtype):
with self.cached_session():
# random shape
shape = np.random.randint(1, 16, size=4)
# Make depth at least 2 to make it meaningful
shape[3] += 1
p = array_ops.placeholder(dtype, shape=shape)
# random depth_radius, bias, alpha, beta
lrn_depth_radius = np.random.randint(1, shape[3])
bias = 1.0 + np.random.rand()
alpha = 2.0 * np.random.rand()
beta = 2.0 * np.random.rand()
with self.test_scope():
lrn_t = nn.local_response_normalization(
p,
name="lrn",
depth_radius=lrn_depth_radius,
bias=bias,
alpha=alpha,
beta=beta)
params = {p: np.random.rand(*shape).astype("f")}
result = lrn_t.eval(feed_dict=params)
expected = self._LRN(
params[p],
lrn_depth_radius=lrn_depth_radius,
bias=bias,
alpha=alpha,
beta=beta)
err = np.amax(np.abs(result - expected))
print("LRN error for bias ", bias, "alpha ", alpha, " beta ", beta, " is ",
err)
if dtype == dtypes.float32:
self.assertTrue(err < 1e-4)
else:
self.assertTrue(err < 1e-2)
self.assertShapeEqual(expected, lrn_t)
def testCompute(self):
for _ in range(2):
self._RunAndVerify(dtypes.float32)
def testLrnGrad(self):
# Test for LRNGrad that compares against the CPU implementation.
shape = [1, 2, 3, 4]
total_size = np.prod(shape)
in_image_vals = np.arange(1, total_size + 1, dtype=np.float32)
out_image_vals = np.arange(1, total_size + 1, dtype=np.float32)
out_grads_vals = np.arange(1, total_size + 1, dtype=np.float32)
depth_radius = np.random.randint(1, shape[3])
bias = 1.0 + np.random.rand()
alpha = 1.0 * np.random.rand()
beta = 1.0 * np.random.rand()
with self.cached_session():
in_image = constant_op.constant(in_image_vals, shape=shape)
out_image = constant_op.constant(out_image_vals, shape=shape)
out_grads = constant_op.constant(out_grads_vals, shape=shape)
with ops.device(CPU_DEVICE):
expected = gen_nn_ops.lrn_grad(out_grads, in_image, out_image,
depth_radius, bias, alpha, beta)
with self.test_scope():
actual = gen_nn_ops.lrn_grad(out_grads, in_image, out_image,
depth_radius, bias, alpha, beta)
expected_val = self.evaluate(expected)
actual_val = self.evaluate(actual)
self.assertAllClose(actual_val, expected_val, rtol=1e-3)
if __name__ == "__main__":
googletest.main()
| apache-2.0 |
interDist/pasportaservo | hosting/forms/places.py | 1 | 11530 | from collections import namedtuple
from datetime import date
from django import forms
from django.contrib.auth import get_user_model
from django.utils.text import format_lazy
from django.utils.translation import ugettext_lazy as _
from core.models import SiteConfiguration
from maps.widgets import MapboxGlWidget
from ..models import Place, Profile
from ..utils import geocode
from ..validators import TooNearPastValidator
User = get_user_model()
class PlaceForm(forms.ModelForm):
class Meta:
model = Place
fields = [
'country',
'state_province',
'postcode',
'city',
'address',
'closest_city',
'max_guest', 'max_night', 'contact_before',
'description', 'short_description',
'available',
'tour_guide', 'have_a_drink',
'sporadic_presence',
'in_book',
'conditions',
]
widgets = {
'short_description': forms.Textarea(attrs={'rows': 3}),
}
class _validation_meta:
meeting_required_fields = ['city', ]
hosting_required_fields = ['address', 'city', 'closest_city', ]
book_required_fields = [
'address', 'city', 'closest_city', 'available',
]
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.fields['address'].widget.attrs['rows'] = 2
self.fields['conditions'].help_text = ""
self.fields['conditions'].widget.attrs['data-placeholder'] = _("Choose your conditions...")
def clean(self):
cleaned_data = super().clean()
config = SiteConfiguration.get_solo()
for_hosting = cleaned_data['available']
for_meeting = cleaned_data['tour_guide'] or cleaned_data['have_a_drink']
if any([for_hosting, for_meeting]):
# Verifies that user is of correct age if they want to host or meet visitors.
profile = self.profile if hasattr(self, 'profile') else self.instance.owner
try:
allowed_age = config.host_min_age if for_hosting else config.meet_min_age
TooNearPastValidator(allowed_age)(profile.birth_date or date.today())
except forms.ValidationError:
if for_hosting:
self.add_error('available', "")
message = _("The minimum age to be allowed hosting is {age:d}.")
else:
if cleaned_data['tour_guide']:
self.add_error('tour_guide', "")
if cleaned_data['have_a_drink']:
self.add_error('have_a_drink', "")
message = _("The minimum age to be allowed meeting with visitors is {age:d}.")
raise forms.ValidationError(format_lazy(message, age=allowed_age))
# Some fields are required if user wants to host or to meet visitors,
# or wants their data to be printed in the book.
Req = namedtuple('Requirements', 'on, required_fields, form_error, field_error')
requirements = [
Req(for_hosting, self._validation_meta.hosting_required_fields,
None,
forms.ValidationError(_("This field is required if you accept guests."),
code='host_condition')),
Req(for_meeting, self._validation_meta.meeting_required_fields,
None,
forms.ValidationError(_("This field is required if you meet visitors."),
code='host_condition')),
Req(cleaned_data['in_book'], self._validation_meta.book_required_fields,
_("You want to be in the printed edition of Pasporta Servo. "
"In order to have a quality product, some fields are required. "
"If you think there is a problem, please contact us."),
forms.ValidationError(_("This field is required to be printed in the book."),
code='book_condition')),
]
message = []
for cond in requirements:
all_filled = all([cleaned_data.get(field, False) for field in cond.required_fields])
if cond.on and not all_filled:
for field in cond.required_fields:
if not cleaned_data.get(field, False) and not self.has_error(field, cond.field_error.code):
self.add_error(field, cond.field_error)
if cond.form_error:
message += forms.ValidationError(cond.form_error)
if message:
raise forms.ValidationError(message)
return cleaned_data
def format_address(self, with_street=True):
address = {
'street': self.cleaned_data.get('address').replace('\r\n', ',') if with_street else '',
'zip': self.cleaned_data.get('postcode').replace(' ', ''),
'city': self.cleaned_data.get('city'),
'state': self.cleaned_data.get('state_province'),
}
return '{street}, {zip} {city}, {state}'.format(**address).lstrip(', ')
def save(self, commit=True):
place = super().save(commit=False)
residence_change = ['country', 'state_province', 'city', 'postcode']
if (hasattr(self, 'instance') and
any(field in self.changed_data and field in self.cleaned_data for field in residence_change)):
# When the user moves to a different country, state, or city their
# previously saved location (geopoint) is not up-to-date anymore.
place.location = None
if place.location is None or place.location.empty:
# Only recalculate the location if it was not already geocoded before.
location = geocode(self.format_address(), country=self.cleaned_data['country'], private=True)
if not location.point and 'address' in self.changed_data:
# Try again without the address block when location cannot be determined.
# This is because users often put stuff into the address block, which the
# poor geocoder has trouble deciphering.
location = geocode(
self.format_address(with_street=False),
country=self.cleaned_data['country'], private=True)
if location.point and location.confidence > 1:
# https://geocoder.opencagedata.com/api#confidence
place.location = location.point
place.location_confidence = location.confidence or 0
if commit:
place.save()
self.confidence = place.location_confidence
return place
save.alters_data = True
class PlaceCreateForm(PlaceForm):
def __init__(self, *args, **kwargs):
self.profile = kwargs.pop('profile')
super().__init__(*args, **kwargs)
def save(self, commit=True):
place = super().save(commit=False)
place.owner = self.profile
if commit:
place.save()
return place
save.alters_data = True
class PlaceLocationForm(forms.ModelForm):
class Meta:
model = Place
fields = ['location']
widgets = {
'location': MapboxGlWidget(),
}
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.fields['location'].widget.attrs['data-selectable-zoom'] = 11.5
def save(self, commit=True):
place = super().save(commit=False)
if self.cleaned_data.get('location'):
place.location_confidence = 100
else:
place.location_confidence = 0
if commit:
place.save(update_fields=['location', 'location_confidence'])
return place
class PlaceBlockForm(forms.ModelForm):
class Meta:
model = Place
fields = ['blocked_from', 'blocked_until']
dirty = forms.ChoiceField(
choices=(('blocked_from', 1), ('blocked_until', 2)),
widget=forms.HiddenInput, label="")
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
widget_settings = {
'data-date-start-date': '-0d',
'data-date-force-parse': 'false',
'data-date-autoclose': 'true',
'placeholder': 'jjjj-mm-tt',
'data-on-ajax-setup': 'blockPlaceSetup',
'data-on-ajax-success': 'blockPlaceSuccess',
}
widget_classes = ' form-control input-sm ajax-on-change'
for (field_name, field_label) in (('blocked_from', _("commencing on")),
('blocked_until', _("concluding on"))):
field = self.fields[field_name]
field.label = field_label
attrs = field.widget.attrs
attrs.update(widget_settings)
attrs['class'] = attrs.get('class', '') + widget_classes
value = self[field_name].value()
attrs['data-value'] = field.widget.format_value(value) if value is not None else ''
def clean(self):
"""
Checks if starting date is earlier than the ending date.
"""
cleaned_data = super().clean()
cleaned_data = dict((k, v) for k, v in cleaned_data.items()
if k == cleaned_data.get('dirty', ''))
today = date.today()
if (cleaned_data.get('blocked_from') or today) < today:
self.add_error('blocked_from', _("Preferably select a date in the future."))
if (cleaned_data.get('blocked_until') or today) < today:
self.add_error('blocked_until', _("Preferably select a date in the future."))
if cleaned_data.get('blocked_until') and self.instance.blocked_from:
if cleaned_data['blocked_until'] <= self.instance.blocked_from:
raise forms.ValidationError(_("Unavailability should finish after it starts."))
if cleaned_data.get('blocked_from') and self.instance.blocked_until:
if cleaned_data['blocked_from'] >= self.instance.blocked_until:
raise forms.ValidationError(_("Unavailability should start before it finishes."))
return cleaned_data
class UserAuthorizeForm(forms.Form):
user = forms.CharField(
label=_("Authorize user"),
max_length=254)
remove = forms.BooleanField(
required=False, initial=False,
widget=forms.widgets.HiddenInput)
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.fields['user'].widget.attrs['placeholder'] = _("username")
self.fields['user'].widget.attrs['inputmode'] = 'verbatim'
def clean(self):
cleaned_data = super().clean()
if 'user' not in cleaned_data:
return
user_qualifier = cleaned_data['user']
if not cleaned_data.get('remove', False):
try:
User.objects.get(username=user_qualifier).profile
except User.DoesNotExist:
raise forms.ValidationError(_("User does not exist"))
except Profile.DoesNotExist:
raise forms.ValidationError(_("User has not set up a profile"))
return cleaned_data
class UserAuthorizedOnceForm(UserAuthorizeForm):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.fields['user'].widget = forms.widgets.HiddenInput()
self.fields['remove'].initial = True
| agpl-3.0 |
factorlibre/OCB | openerp/addons/base/module/wizard/base_export_language.py | 269 | 3648 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Business Applications
# Copyright (c) 2004-2012 OpenERP S.A. <http://openerp.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import base64
import contextlib
import cStringIO
from openerp import tools
from openerp.osv import fields,osv
from openerp.tools.translate import _
from openerp.tools.misc import get_iso_codes
NEW_LANG_KEY = '__new__'
class base_language_export(osv.osv_memory):
_name = "base.language.export"
def _get_languages(self, cr, uid, context):
lang_obj = self.pool.get('res.lang')
ids = lang_obj.search(cr, uid, [('translatable', '=', True)])
langs = lang_obj.browse(cr, uid, ids)
return [(NEW_LANG_KEY, _('New Language (Empty translation template)'))] + [(lang.code, lang.name) for lang in langs]
_columns = {
'name': fields.char('File Name', readonly=True),
'lang': fields.selection(_get_languages, 'Language', required=True),
'format': fields.selection([('csv','CSV File'),
('po','PO File'),
('tgz', 'TGZ Archive')], 'File Format', required=True),
'modules': fields.many2many('ir.module.module', 'rel_modules_langexport', 'wiz_id', 'module_id', 'Modules To Export', domain=[('state','=','installed')]),
'data': fields.binary('File', readonly=True),
'state': fields.selection([('choose', 'choose'), # choose language
('get', 'get')]) # get the file
}
_defaults = {
'state': 'choose',
'lang': NEW_LANG_KEY,
'format': 'csv',
}
def act_getfile(self, cr, uid, ids, context=None):
this = self.browse(cr, uid, ids, context=context)[0]
lang = this.lang if this.lang != NEW_LANG_KEY else False
mods = sorted(map(lambda m: m.name, this.modules)) or ['all']
with contextlib.closing(cStringIO.StringIO()) as buf:
tools.trans_export(lang, mods, buf, this.format, cr)
out = base64.encodestring(buf.getvalue())
filename = 'new'
if lang:
filename = get_iso_codes(lang)
elif len(mods) == 1:
filename = mods[0]
extension = this.format
if not lang and extension == 'po':
extension = 'pot'
name = "%s.%s" % (filename, extension)
this.write({ 'state': 'get', 'data': out, 'name': name })
return {
'type': 'ir.actions.act_window',
'res_model': 'base.language.export',
'view_mode': 'form',
'view_type': 'form',
'res_id': this.id,
'views': [(False, 'form')],
'target': 'new',
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
openhatch/oh-mainline | mysite/profile/models.py | 6 | 27395 | # This file is part of OpenHatch.
# Copyright (C) 2010 Parker Phinney
# Copyright (C) 2009, 2010 OpenHatch, Inc.
# Copyright (C) 2010 Mark Freeman
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import cgi
import collections
import datetime
import hashlib
import logging
import os.path
import random
import re
import shutil
import uuid
import urllib
from django.conf import settings
from django.contrib.auth import SESSION_KEY, BACKEND_SESSION_KEY, load_backend
from django.contrib.auth.models import User
from django.core.files.base import ContentFile
from django.core.urlresolvers import reverse
from django.db import models
from django.db.models import Q
from django.utils import http
from mysite.search.models import Project, get_image_data_scaled
import mysite.customs.models
import mysite.profile.view_helpers
import mysite.base.models
DEFAULT_LOCATION = 'Inaccessible Island'
DEFAULT_LATITUDE = -37.3049962
DEFAULT_LONGITUDE = -12.6790445
logger = logging.getLogger(__name__)
def url2printably_short(url, CUTOFF=50):
short_enough_pieces_so_far = []
die_next = False
wrappable_characters = "/"
for url_piece in url.split(wrappable_characters):
if die_next:
return '/'.join(short_enough_pieces_so_far)
# Logic: If this URL piece is longer than CUTOFF, then stop appending
# and return.
if len(url_piece) > CUTOFF:
url_piece = url_piece[:CUTOFF - 3] + '...'
die_next = True
# always append
short_enough_pieces_so_far.append(url_piece)
return '/'.join(short_enough_pieces_so_far)
def generate_person_photo_path(instance, filename, suffix=""):
random_uuid = uuid.uuid4()
return random_uuid.hex + suffix
class RepositoryCommitter(models.Model):
"""Ok, so we need to keep track of repository committers, e.g.
paulproteus@fspot
That's because when a user says, 'oy, this data you guys imported isn't
mine', what she or he is typically saying is something like
'Don't give me any more data from Ohloh pertaining to this dude named
mickey.mouse@strange.ly checking code into F-Spot.'"""
project = models.ForeignKey(Project)
class Person(models.Model):
""" A human bean. """
homepage_url = models.URLField(default="", blank=True)
user = models.ForeignKey(User, unique=True)
gotten_name_from_ohloh = models.BooleanField(default=False)
last_polled = models.DateTimeField(default=datetime.datetime(1970, 1, 1))
show_email = models.BooleanField(default=False)
bio = models.TextField(blank=True)
contact_blurb = models.TextField(blank=True)
expand_next_steps = models.BooleanField(default=True)
photo = models.ImageField(
upload_to=lambda a, b: 'static/photos/profile-photos/' + generate_person_photo_path(a, b),
default=''
)
photo_thumbnail = models.ImageField(
upload_to=lambda a, b: 'static/photos/profile-photos/' + generate_person_photo_path(
a, b, suffix="-thumbnail"),
default='',
null=True
)
photo_thumbnail_30px_wide = models.ImageField(
upload_to=lambda a, b: 'static/photos/profile-photos/' + generate_person_photo_path(
a, b, suffix="-thumbnail-30px-wide"),
default='',
null=True
)
photo_thumbnail_20px_wide = models.ImageField(
upload_to=lambda a, b: 'static/photos/profile-photos/' + generate_person_photo_path(
a, b, suffix="-thumbnail-20px-wide"),
default='',
null=True
)
blacklisted_repository_committers = models.ManyToManyField(RepositoryCommitter)
dont_guess_my_location = models.BooleanField(default=False)
location_confirmed = models.BooleanField(default=False)
location_display_name = models.CharField(
max_length=255,
blank=True,
default=DEFAULT_LOCATION,
verbose_name='Location'
)
latitude = models.FloatField(null=False, default=-37.3049962)
longitude = models.FloatField(null=False, default=-12.6790445)
email_me_re_projects = models.BooleanField(
default=True,
verbose_name='Email me periodically about activity in my projects'
)
irc_nick = models.CharField(max_length=30, blank=True, null=True)
@staticmethod
def create_dummy(first_name="", email=None, **kwargs):
""" Creates a dummy user """
# Generate a random string to use as a username. Keep it short
# so that it doesn't overflow the username field!
username = uuid.uuid4().hex[:16]
if email is None:
email = "%s@example.com" % username
user = User(username=username, first_name=first_name, email=email)
data = {'user': user}
# If the caller of create_dummy passes in a user, then we won't use the
# dummy user defined above
data.update(kwargs)
# Save the user after the update, so we don't save a new user if one
# was never needed
user = data['user']
user.save()
person = user.get_profile()
for key, value in data.items():
setattr(person, key, value)
person.save()
return person
def location_is_public(self):
""" If you change this method, change the method (Person.inaccessible_islanders) too """
return self.location_confirmed and self.location_display_name
@staticmethod
def inaccessible_islanders():
""" If you change this method, change the method (location_is_public) too """
return Person.objects.filter(Q(location_confirmed=False) | Q(location_display_name=''))
def get_public_location_or_default(self):
if self.location_is_public():
return self.location_display_name
else:
return DEFAULT_LOCATION
def get_public_latitude_or_default(self):
if self.location_is_public():
return self.latitude
else:
return DEFAULT_LATITUDE
def get_public_longitude_or_default(self):
if self.location_is_public():
return self.longitude
else:
return DEFAULT_LONGITUDE
def __unicode__(self):
return "username: %s, name: %s %s" % (
self.user.username,
self.user.first_name,
self.user.last_name
)
def get_photo_url_or_default(self):
try:
return self.photo.url
except (IOError, ValueError):
return '/static/images/profile-photos/penguin.png'
@staticmethod
def get_from_session_key(session_key):
"""Based almost entirely on http://www.djangosnippets.org/snippets/1276/ Thanks jdunck!"""
session_engine = __import__(settings.SESSION_ENGINE, {}, {}, [''])
session_wrapper = session_engine.SessionStore(session_key)
user_id = session_wrapper.get(SESSION_KEY)
auth_backend = load_backend(session_wrapper.get(BACKEND_SESSION_KEY))
if user_id and auth_backend:
return Person.objects.get(user=auth_backend.get_user(user_id))
else:
return None
def get_photo_thumbnail_url_or_default(self):
try:
return self.photo_thumbnail.url
except (IOError, ValueError):
return '/static/images/profile-photos/penguin-40px.png'
def get_photo_thumbnail_width(self):
try:
return self.photo_thumbnail.width
except (IOError, ValueError):
return 40
def get_photo_thumbnail_height(self):
try:
return self.photo_thumbnail.height
except (IOError, ValueError):
return 51
def get_photo_thumbnail_30px_wide_url_or_default(self):
try:
return self.photo_thumbnail_30px_wide.url
except (IOError, ValueError):
return '/static/images/profile-photos/penguin-30px.png'
def get_photo_thumbnail_20px_wide_url_or_default(self):
try:
return self.photo_thumbnail_20px_wide.url
except (IOError, ValueError):
return '/static/images/profile-photos/penguin-20px.png'
def get_photo_thumbnail_width_20px(self):
try:
return self.photo_thumbnail_20px_wide.width
except (IOError, ValueError):
return 20
def get_photo_thumbnail_height_20px(self):
try:
return self.photo_thumbnail_20px_wide.height
except (IOError, ValueError):
return 20
def get_published_portfolio_entries(self):
return PortfolioEntry.published_ones.filter(person=self)
def get_nonarchived_published_portfolio_entries(self):
return PortfolioEntry.published_ones.filter(person=self, is_archived=False)
def get_maintainer_portfolio_entries(self):
"""
Return the PortfolioEntries that this person wants to receive from
maintainer updates.
"""
return PortfolioEntry.published_ones.filter(
person=self,
receive_maintainer_updates=True
)
def get_list_of_all_published_projects(self):
# This method looks familiar but testing -- jl
return self.get_published_portfolio_entries()
def get_list_of_all_project_names(self):
# if you change this method, be sure to increment the version number in
# the cache key above
return list(
self.get_published_portfolio_entries().values_list(
'project__name',
flat=True
).distinct()
)
def get_display_names_of_nonarchived_projects(self):
return list(
self.get_nonarchived_published_portfolio_entries().values_list(
'project__display_name',
flat=True
).distinct()
)
@staticmethod
def only_terms_with_results(terms):
# Remove terms whose hit counts are zero.
terms_with_results = []
for term in terms:
query = mysite.search.view_helpers.Query(terms=[term])
hit_count = query.get_or_create_cached_hit_count()
if hit_count != 0:
terms_with_results.append(term)
return terms_with_results
def get_recommended_search_terms(self):
if settings.RECOMMEND_BUGS:
return self._get_recommended_search_terms()
return []
def _get_recommended_search_terms(self):
terms = []
# Add terms based on languages in citations
citations = self.get_published_citations_flat()
for c in citations:
terms.extend(c.get_languages_as_list())
# Add terms based on projects in citations
terms.extend(
[pfe.project.name
for pfe in self.get_published_portfolio_entries()
if pfe.project.name and pfe.project.name.strip()]
)
# Add terms based on tags
terms.extend([tag.text for tag in self.get_tags_for_recommendations()])
# Remove duplicates
terms = sorted(set(terms), key=lambda s: s.lower())
return Person.only_terms_with_results(terms)
# FIXME: Add support for recommended projects.
# FIXME: Add support for recommended project tags.
def get_published_citations_flat(self):
return sum(
[list(pfe.get_published_citations())
for pfe in self.get_published_portfolio_entries()],
[]
)
def get_tag_texts_for_map(self):
""" Return a list of Tags linked to this Person. """
my_tag_texts = Tag.objects.filter(
link_person_tag__person=self).extra(select={'lowername': 'LOWER(text)'})
without_irrelevant_tags = my_tag_texts.exclude(
tag_type__name__in=['understands_not', 'studying'])
just_distinct_lowername = without_irrelevant_tags.values(
'lowername').distinct().order_by('lowername')
text_and_lower = just_distinct_lowername.values_list('lowername', 'text')
lower_set_so_far = set()
ret = []
for (lower, text) in text_and_lower:
if lower in lower_set_so_far:
continue
ret.append(text)
lower_set_so_far.add(lower)
return ret
def get_tags_as_dict(self):
ret = collections.defaultdict(set)
for link in self.link_person_tag_set.all():
ret[link.tag.tag_type.name].add(link.tag.text.lower())
return ret
def get_tag_descriptions_for_keyword(self, keyword):
keyword = keyword.lower()
d = self.get_tags_as_dict()
return sorted(
[TagType.short_name2long_name[short]
for short in [key for key in d if (keyword in d[key])]]
)
def get_tags_for_recommendations(self):
"""Return a list of Tags linked to this Person. For use with bug recommendations."""
exclude_me = TagType.objects.filter(name='understands_not')
return [link.tag
for link in self.link_person_tag_set.all()
if link.tag.tag_type not in exclude_me]
def get_full_name(self):
name = self.user.first_name
if self.user.first_name and self.user.last_name:
name += " "
name += self.user.last_name
return name
def get_full_name_with_nbsps(self):
full_name = self.get_full_name()
full_name_escaped = cgi.escape(full_name)
full_name_escaped_with_nbsps = re.sub("\s+", " ", full_name_escaped)
return full_name_escaped_with_nbsps
def get_full_name_or_username(self):
return self.get_full_name() or self.user.username
def get_full_name_and_username(self):
full_name_start = (
"%s (" % self.get_full_name() if self.user.first_name or self.user.last_name else ""
)
full_name_end = (
")" if self.user.first_name or self.user.last_name else ""
)
return "%s%s%s" % (full_name_start, self.user.username, full_name_end)
def generate_thumbnail_from_photo(self):
if self.photo:
width = 40
self.photo.file.seek(0)
scaled_down = get_image_data_scaled(self.photo.file.read(), width)
self.photo_thumbnail.save('', ContentFile(scaled_down))
width = 30
self.photo.file.seek(0)
scaled_down = get_image_data_scaled(self.photo.file.read(), width)
self.photo_thumbnail_30px_wide.save('', ContentFile(scaled_down))
width = 20
self.photo.file.seek(0)
scaled_down = get_image_data_scaled(self.photo.file.read(), width)
self.photo_thumbnail_20px_wide.save('', ContentFile(scaled_down))
def get_collaborators_for_landing_page(self, n=9):
projects = set([e.project for e in self.get_published_portfolio_entries()])
infinity = 10000
collaborator_lists = []
for project in projects:
people = project.get_n_other_contributors_than(n=infinity, person=self)
people = random.sample(people, min(n, len(people)))
collaborator_lists.append(people)
round_robin = mysite.profile.view_helpers.roundrobin(*collaborator_lists)
collaborators = set()
while len(collaborators) < n:
try:
collaborators.add(round_robin.next())
except StopIteration:
break
collaborators = list(collaborators)
# don't forget, this has a side effect and returns None
random.shuffle(collaborators)
return collaborators
@property
def profile_url(self):
return reverse(mysite.profile.views.display_person_web,
kwargs={'user_to_display__username': self.user.username})
@staticmethod
def get_by_username(username):
return Person.objects.get(user__username=username)
def should_be_nudged_about_location(self):
return not self.location_confirmed and not self.dont_guess_my_location
def get_coolness_factor(self, unhashed_tiebreaker):
"""This function's output is used as the sort order in (at least) the periodic emails.
You can be more cool if you:
* Have projects
* Have a picture
* Have user tags set
* Are a wannahelper of something
and finally we break ties by get_full_name_or_username(), just so that
we have a predictable sort.
."""
hashed_tiebreaker = hashlib.sha1(unhashed_tiebreaker).hexdigest()
factor = (bool(self.get_list_of_all_project_names()),
bool(self.get_tags_as_dict()),
bool(self.photo),
bool(self.projects_i_wanna_help),
hashed_tiebreaker)
return factor
def generate_new_unsubscribe_token(self):
token = UnsubscribeToken(string=uuid.uuid4().hex, owner=self)
token.save()
return token
def create_profile_when_user_created(instance, created, raw, *args, **kwargs):
"""
Post-save hook for Users. raw is populated from kwargs.
See Django docs on Signals:
https://docs.djangoproject.com/en/dev/ref/signals/#post-save
"""
if created and not raw:
person, p_created = Person.objects.get_or_create(user=instance)
models.signals.post_save.connect(create_profile_when_user_created, User)
def update_the_project_cached_contributor_count(sender, instance, **kwargs):
instance.project.update_cached_contributor_count_and_save()
class TagType(models.Model):
short_name2long_name = {'understands': 'understands',
'can_mentor': 'can mentor in',
'can_pitch_in': 'can pitch in with',
'understands_not': 'will never understand',
'studying': 'currently studying'}
name = models.CharField(max_length=100)
def __unicode__(self):
return self.name
class Tag(models.Model):
text = models.CharField(null=False, max_length=255)
tag_type = models.ForeignKey(TagType)
@property
def name(self):
return self.text
def save(self, *args, **kwargs):
if self.text:
return super(Tag, self).save(*args, **kwargs)
raise ValueError
def __unicode__(self):
return "%s: %s" % (self.tag_type.name, self.text)
class Link_Project_Tag(models.Model):
"""Many-to-many relation between Projects and Tags."""
tag = models.ForeignKey(Tag)
project = models.ForeignKey(Project)
source = models.CharField(max_length=200)
class Link_Person_Tag(models.Model):
"""Many-to-many relation between Person and Tags."""
tag = models.ForeignKey(Tag)
person = models.ForeignKey(Person)
source = models.CharField(max_length=200)
class PublishedPortfolioEntries(models.Manager):
def get_query_set(self):
return super(PublishedPortfolioEntries, self).get_query_set().filter(is_deleted=False,
is_published=True)
class PortfolioEntry(models.Model):
# Managers
objects = models.Manager()
published_ones = PublishedPortfolioEntries()
# FIXME: Constrain this so (person, project) pair uniquely finds a PortfolioEntry
person = models.ForeignKey(Person)
project = models.ForeignKey(Project)
project_description = models.TextField(blank=True)
experience_description = models.TextField(blank=True)
date_created = models.DateTimeField(default=datetime.datetime.utcnow)
is_published = models.BooleanField(default=False)
is_deleted = models.BooleanField(default=False)
is_archived = models.BooleanField(default=False)
sort_order = models.IntegerField(default=0)
use_my_description = models.BooleanField(default=True, verbose_name='')
receive_maintainer_updates = models.BooleanField(default=True)
def get_published_citations(self):
return Citation.untrashed.filter(portfolio_entry=self, is_published=True)
@staticmethod
def create_dummy(**kwargs):
data = {'project_description': "DESCRIPTION-----------------------------" + uuid.uuid4().hex}
data.update(kwargs)
ret = PortfolioEntry(**data)
ret.save()
return ret
@staticmethod
def create_dummy_with_project(**kwargs):
data = {'project': Project.create_dummy()}
data.update(kwargs)
return PortfolioEntry.create_dummy(**data)
class Meta:
ordering = ('-sort_order', '-id')
class UntrashedCitationManager(models.Manager):
def get_query_set(self):
return super(UntrashedCitationManager, self).get_query_set().filter(
# Was the citation superseded by a previously imported
# equivalent?
ignored_due_to_duplicate=False,
# Was the citation deleted?
is_deleted=False,
# Was its portfolio entry deleted?
portfolio_entry__is_deleted=False)
class Citation(models.Model):
portfolio_entry = models.ForeignKey(PortfolioEntry) # [0]
url = models.URLField(null=True, verbose_name="URL")
contributor_role = models.CharField(max_length=200, null=True)
languages = models.CharField(max_length=200, null=True)
first_commit_time = models.DateTimeField(null=True)
date_created = models.DateTimeField(default=datetime.datetime.utcnow)
is_published = models.BooleanField(default=False) # unpublished == Unread
is_deleted = models.BooleanField(default=False)
ignored_due_to_duplicate = models.BooleanField(default=False)
old_summary = models.TextField(null=True, default=None)
objects = models.Manager()
untrashed = UntrashedCitationManager()
@property
def summary(self):
# FIXME: Pluralize correctly.
# FIXME: Use "since year_started"
if self.url is not None:
return url2printably_short(self.url, CUTOFF=38)
elif self.languages is not None:
return "Coded in %s." % (self.languages,)
def get_languages_as_list(self):
if self.languages is None:
return []
return [lang.strip() for lang in self.languages.split(",") if lang.strip()]
def get_url_or_guess(self):
if self.url:
return self.url
def save_and_check_for_duplicates(self):
# FIXME: Cache summaries in the DB so this query is faster.
duplicates = [citation for citation in
Citation.objects.filter(portfolio_entry=self.portfolio_entry)
if (citation.pk != self.pk) and (citation.summary == self.summary)]
if duplicates:
self.ignored_due_to_duplicate = True
return self.save()
# [0]: FIXME: Let's learn how to use Django's ManyToManyField etc.
def __unicode__(self):
if self.pk is not None:
pk = self.pk
else:
pk = 'unassigned'
return "pk=%s, summary=%s" % (pk, self.summary)
class Forwarder(models.Model):
user = models.ForeignKey(User)
address = models.TextField()
expires_on = models.DateTimeField(default=datetime.datetime(1970, 1, 1))
stops_being_listed_on = models.DateTimeField(
default=datetime.datetime(1970, 1, 1))
# note about the above: for 3 days, 2 forwarders for the same user work.
# at worst, you visit someone's profile and find a forwarder that works for 3 more days
# at best, you visit someone's profile and find a forwarder that works for 5 more days
def generate_table_line(self):
line = '%s %s' % (self.get_email_address(), self.user.email)
return line
def get_email_address(self):
return self.address + "@" + settings.FORWARDER_DOMAIN
@staticmethod
def garbage_collect():
made_any_changes = False
# First, delete any Forwarders that ought to be destroyed.
now = datetime.datetime.utcnow()
expirable = Forwarder.objects.filter(expires_on__lt=now)
if expirable:
made_any_changes = True
expirable.delete()
# Second, for users who have '$fwd' in their blurb, if they have no
# corresponding Forwarder object that we can list on the site, give
# them one.
user_ids_that_need_forwarders = Person.objects.filter(
contact_blurb__contains='$fwd').values_list('user_id', flat=True)
user_ids_with_up_to_date_forwarders = Forwarder.objects.filter(
user__id__in=user_ids_that_need_forwarders,
stops_being_listed_on__gt=now).values_list('user__id', flat=True)
user_ids_needing_regeneration = (set(user_ids_that_need_forwarders).difference(
set(user_ids_with_up_to_date_forwarders)))
users_needing_regeneration = [User.objects.get(pk=pk) for pk in user_ids_needing_regeneration]
for user in users_needing_regeneration:
mysite.base.view_helpers.generate_forwarder(user)
made_any_changes = True
return made_any_changes
@staticmethod
def generate_list_of_lines_for_postfix_table():
lines = []
for live_forwarder in Forwarder.objects.all():
if live_forwarder.user.email:
line = live_forwarder.generate_table_line()
lines.append(line)
return lines
class UnsubscribeToken(mysite.search.models.OpenHatchModel):
string = models.CharField(null=False, blank=False, unique=True, max_length=255)
owner = models.ForeignKey(Person)
@staticmethod
def whose_token_string_is_this(string):
try:
expiry_date = datetime.datetime.utcnow() - datetime.timedelta(days=90)
return UnsubscribeToken.objects.get(string=string, created_date__gte=expiry_date).owner
except UnsubscribeToken.DoesNotExist:
return None
def make_forwarder_actually_work(sender, instance, **kwargs):
from mysite.profile.tasks import RegeneratePostfixAliasesForForwarder
RegeneratePostfixAliasesForForwarder().run()
models.signals.post_save.connect(update_the_project_cached_contributor_count,
sender=PortfolioEntry)
models.signals.post_save.connect(make_forwarder_actually_work, sender=Forwarder)
# The following signals are here so that we clear the cached list
# of people for the map whenever Person, PortfolioEntry, or LinkPersonTag
# change.
def flush_map_json_cache(*args, **kwargs):
path = os.path.join(settings.WEB_ROOT, '+cacheable')
shutil.rmtree(path, ignore_errors=True)
models.signals.post_save.connect(flush_map_json_cache, sender=PortfolioEntry)
models.signals.post_save.connect(flush_map_json_cache, sender=Person)
models.signals.post_save.connect(flush_map_json_cache, sender=Link_Person_Tag)
| agpl-3.0 |
krasch/smart-assistants | examples/visualize_habits.py | 1 | 1689 | # -*- coding: UTF-8 -*-
"""
Plot visualization of user habits, i.e. show which actions typically follow some given user action.
Note: the figure for "Frontdoor=Closed" slightly deviates from Figure 1 in the paper and Figure 5.1 in the
dissertation (see paper_experiments.py for bibliographical information). The number of observed actions was reported
correctly in the paper/dissertation, however there was an issue with ordering which actions occur most commonly,
which resulted in "Open cups cupboard" being erroneously included in the figure. Despite this issue, the main point
of the figure still stands: the user has some observable habits after closing the frontdoor.
"""
import sys
sys.path.append("..")
import pandas
from recsys.classifiers.temporal import TemporalEvidencesClassifier
from recsys.classifiers.binning import initialize_bins
from recsys.dataset import load_dataset
from evaluation import plot
import config
#configuration
data = load_dataset("../datasets/houseA.csv", "../datasets/houseA.config")
#fit classifier to dataset
cls = TemporalEvidencesClassifier(data.features, data.target_names, bins=initialize_bins(0, 300, 10))
cls = cls.fit(data.data, data.target)
#create visualizations of habits around each user action
plot_conf = plot.plot_config(config.plot_directory, sub_dirs=[data.name, "habits"], img_type=config.img_type)
for source in cls.sources.values():
observations = pandas.DataFrame(source.temporal_counts)
observations.columns = data.target_names
observations.index = cls.bins
plot.plot_observations(source.name(), observations, plot_conf)
print "Results can be found in the \"%s\" directory" % config.plot_directory
| mit |
teamfx/openjfx-9-dev-rt | modules/javafx.web/src/main/native/Source/JavaScriptCore/Scripts/builtins/builtins_generate_combined_header.py | 3 | 6346 | #!/usr/bin/env python
#
# Copyright (c) 2014, 2015 Apple Inc. All rights reserved.
# Copyright (c) 2014 University of Washington. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY APPLE INC. AND ITS CONTRIBUTORS ``AS IS''
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL APPLE INC. OR ITS CONTRIBUTORS
# BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
# THE POSSIBILITY OF SUCH DAMAGE.
import logging
import re
import string
from string import Template
from builtins_generator import BuiltinsGenerator
from builtins_templates import BuiltinsGeneratorTemplates as Templates
log = logging.getLogger('global')
class BuiltinsCombinedHeaderGenerator(BuiltinsGenerator):
def __init__(self, model):
BuiltinsGenerator.__init__(self, model)
def output_filename(self):
return "%sBuiltins.h" % self.model().framework.setting('namespace')
def generate_output(self):
args = {
'namespace': self.model().framework.setting('namespace'),
'headerGuard': self.output_filename().replace('.', '_'),
'macroPrefix': self.model().framework.setting('macro_prefix'),
}
sections = []
sections.append(self.generate_license())
sections.append(Template(Templates.DoNotEditWarning).substitute(args))
sections.append(Template(Templates.HeaderIncludeGuardTop).substitute(args))
sections.append(self.generate_forward_declarations())
sections.append(Template(Templates.NamespaceTop).substitute(args))
for object in self.model().objects:
sections.append(self.generate_section_for_object(object))
sections.append(self.generate_section_for_code_table_macro())
sections.append(self.generate_section_for_code_name_macro())
sections.append(Template(Templates.CombinedHeaderStaticMacros).substitute(args))
sections.append(Template(Templates.NamespaceBottom).substitute(args))
sections.append(Template(Templates.HeaderIncludeGuardBottom).substitute(args))
return "\n\n".join(sections)
def generate_forward_declarations(self):
return """namespace JSC {
class FunctionExecutable;
class VM;
enum class ConstructAbility : unsigned;
}"""
def generate_section_for_object(self, object):
lines = []
lines.append('/* %s */' % object.object_name)
lines.extend(self.generate_externs_for_object(object))
lines.append("")
lines.extend(self.generate_macros_for_object(object))
return '\n'.join(lines)
def generate_externs_for_object(self, object):
lines = []
for function in object.functions:
function_args = {
'codeName': BuiltinsGenerator.mangledNameForFunction(function) + 'Code',
}
lines.append("""extern const char* s_%(codeName)s;
extern const int s_%(codeName)sLength;
extern const JSC::ConstructAbility s_%(codeName)sConstructAbility;""" % function_args)
return lines
def generate_macros_for_object(self, object):
args = {
'macroPrefix': self.model().framework.setting('macro_prefix'),
'objectMacro': object.object_name.replace('.', '').upper(),
}
lines = []
lines.append("#define %(macroPrefix)s_FOREACH_%(objectMacro)s_BUILTIN_DATA(macro) \\" % args)
for function in object.functions:
function_args = {
'funcName': function.function_name,
'mangledName': BuiltinsGenerator.mangledNameForFunction(function),
'paramCount': len(function.parameters),
}
lines.append(" macro(%(funcName)s, %(mangledName)s, %(paramCount)d) \\" % function_args)
return lines
def generate_section_for_code_table_macro(self):
args = {
'macroPrefix': self.model().framework.setting('macro_prefix'),
}
lines = []
lines.append("#define %(macroPrefix)s_FOREACH_BUILTIN_CODE(macro) \\" % args)
for function in self.model().all_functions():
function_args = {
'funcName': function.function_name,
'codeName': BuiltinsGenerator.mangledNameForFunction(function) + 'Code',
}
lines.append(" macro(%(codeName)s, %(funcName)s, s_%(codeName)sLength) \\" % function_args)
return '\n'.join(lines)
def generate_section_for_code_name_macro(self):
args = {
'macroPrefix': self.model().framework.setting('macro_prefix'),
}
internal_function_names = [function.function_name for function in self.model().all_internal_functions()]
if len(internal_function_names) != len(set(internal_function_names)):
log.error("There are several internal functions with the same name. Private identifiers may clash.")
lines = []
lines.append("#define %(macroPrefix)s_FOREACH_BUILTIN_FUNCTION_NAME(macro) \\" % args)
unique_names = list(set([function.function_name for function in self.model().all_functions()]))
unique_names.sort()
for function_name in unique_names:
function_args = {
'funcName': function_name,
}
lines.append(" macro(%(funcName)s) \\" % function_args)
return '\n'.join(lines)
| gpl-2.0 |
arborh/tensorflow | tensorflow/python/distribute/multi_process_runner_no_init_test.py | 7 | 1561 | # Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for `multi_process_runner` for non-initialization."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.distribute import multi_process_runner
from tensorflow.python.distribute import multi_worker_test_base
from tensorflow.python.eager import test
class MultiProcessRunnerNoInitTest(test.TestCase):
def test_not_calling_correct_main(self):
def simple_func():
return 'foobar'
with self.assertRaisesRegexp(RuntimeError,
'`multi_process_runner` is not initialized.'):
multi_process_runner.run(
simple_func,
multi_worker_test_base.create_cluster_spec(num_workers=1))
if __name__ == '__main__':
# Intentionally not using `multi_process_runner.test_main()` so the error
# would occur.
test.main()
| apache-2.0 |
macosforge/ccs-calendarserver | twistedcaldav/test/test_extensions.py | 1 | 9681 | # -*- coding: utf-8 -*-
##
# Copyright (c) 2009-2017 Apple Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##
from twext.python.filepath import CachingFilePath as FilePath
from txweb2.http_headers import MimeType
from txweb2.static import MetaDataMixin
from twisted.internet.defer import inlineCallbacks, Deferred, succeed
from twisted.web.microdom import parseString
from twistedcaldav.extensions import DAVFile, DAVResourceWithChildrenMixin, extractCalendarServerPrincipalSearchData, validateTokens
from twistedcaldav.test.util import TestCase
from txdav.xml.element import WebDAVElement, ResourceType
from txdav.xml.parser import WebDAVDocument
from xml.etree.cElementTree import XML
class UnicodeProperty(WebDAVElement):
"""
An element with a unicode name.
"""
name = u'unicode'
allowed_children = {}
class StrProperty(WebDAVElement):
"""
An element with a unicode name.
"""
name = 'str'
allowed_children = {}
class SimpleFakeRequest(object):
"""
Emulate a very small portion of the web2 'Request' API, just enough to
render a L{DAVFile}.
@ivar path: the path portion of the URL being rendered.
"""
def __init__(self, path):
self.path = path
def urlForResource(self, resource):
"""
@return: this L{SimpleFakeRequest}'s 'path' attribute, since this
request can render only one thing.
"""
return self.path
def browserHTML2ETree(htmlString):
"""
Loosely interpret an HTML string as XML and return an ElementTree object for it.
We're not promising strict XML (in fact, we're specifically saying HTML) in
the content-type of certain responses, but it's much easier to work with
the ElementTree data structures present in Python 2.5+ for testing; so
we'll use Twisted's built-in facilities to sanitize the inputs before
making any structured assertions about them.
A more precise implementation would use
U{HTML5Lib<http://code.google.com/p/html5lib/wiki/UserDocumentation>}'s
etree bindings to do the parsing, as that is more directly 'what a browser
would do', but Twisted's built-in stuff is a good approximation and doesn't
drag in another dependency.
@param htmlString: a L{str}, encoded in UTF-8, representing a pile of
browser-friendly HTML tag soup.
@return: an object implementing the standard library ElementTree interface.
"""
return XML(parseString(htmlString, beExtremelyLenient=True).toxml())
nonASCIIFilename = u"アニメ.txt"
class DirectoryListingTest(TestCase):
"""
Test cases for HTML directory listing.
"""
@inlineCallbacks
def doDirectoryTest(self, addedNames, modify=lambda x: None,
expectedNames=None):
"""
Do a test of a L{DAVFile} pointed at a directory, verifying that files
existing with the given names will be faithfully 'played back' via HTML
rendering.
"""
if expectedNames is None:
expectedNames = addedNames
fp = FilePath(self.mktemp())
fp.createDirectory()
for sampleName in expectedNames:
fp.child(sampleName).touch()
df = DAVFile(fp)
modify(df)
responseText = (yield df.render(SimpleFakeRequest('/'))).stream.read()
responseXML = browserHTML2ETree(responseText)
names = set([element.text.encode("utf-8")
for element in responseXML.findall(".//a")])
self.assertEquals(set(expectedNames), names)
def test_simpleList(self):
"""
Rendering a L{DAVFile} that is backed by a directory will produce an
HTML document including links to its contents.
"""
return self.doDirectoryTest([u'gamma.txt', u'beta.html', u'alpha.xml'])
def test_emptyList(self):
"""
Listing a directory with no files in it will produce an index with no
links.
"""
return self.doDirectoryTest([])
def test_nonASCIIList(self):
"""
Listing a directory with a file in it that includes characters that
fall outside of the 'Basic Latin' and 'Latin-1 Supplement' unicode
blocks should result in those characters being rendered as links in the
index.
"""
return self.doDirectoryTest([nonASCIIFilename.encode("utf-8")])
@inlineCallbacks
def test_nonASCIIListMixedChildren(self):
"""
Listing a directory that contains unicode content-type metadata and
non-ASCII characters in a filename should result in a listing that
contains the names of both entities.
"""
unicodeChildName = "test"
def addUnicodeChild(davFile):
m = MetaDataMixin()
m.contentType = lambda: MimeType.fromString('text/plain')
m.resourceType = lambda: ResourceType()
m.isCollection = lambda: False
davFile.putChild(unicodeChildName, m)
yield self.doDirectoryTest([nonASCIIFilename], addUnicodeChild,
[nonASCIIFilename.encode("utf-8"), unicodeChildName])
@inlineCallbacks
def test_nonASCIIListMixedProperties(self):
"""
Listing a directory that contains unicode DAV properties and non-ASCII
characters in a filename should result in a listing that contains the
names of both entities.
"""
def addUnicodeChild(davFile):
davFile.writeProperty(UnicodeProperty(), None)
davFile.writeProperty(StrProperty(), None)
yield self.doDirectoryTest([nonASCIIFilename], addUnicodeChild,
[nonASCIIFilename.encode("utf-8")])
def test_quotedCharacters(self):
"""
Filenames might contain < or > characters, which need to be quoted in
HTML.
"""
return self.doDirectoryTest([u'<a>.txt', u'<script>.html',
u'<style>.xml'])
class ChildTraversalTests(TestCase):
def test_makeChildDeferred(self):
"""
If L{DAVResourceWithChildrenMixin.makeChild} returns a L{Deferred},
L{DAVResourceWithChildrenMixin.locateChild} will return a L{Deferred}.
"""
class FakeChild(object):
def __init__(self, name):
self.name = name
class SmellsLikeDAVResource(object):
def __init__(self, **kw):
pass
class ResourceWithCheese(DAVResourceWithChildrenMixin,
SmellsLikeDAVResource):
def makeChild(self, name):
return succeed(FakeChild(name))
d = ResourceWithCheese().locateChild(None, ['cheese', 'burger'])
self.assertIsInstance(d, Deferred)
x = []
d.addCallback(x.append)
self.assertEquals(len(x), 1)
[result] = x
self.assertEquals(len(result), 2)
self.assertEquals(result[0].name, 'cheese')
self.assertEquals(result[1], ['burger'])
class CalendarServerPrincipalSearchTests(TestCase):
def test_extractCalendarServerPrincipalSearchData(self):
"""
Exercise the parser for calendarserver-principal-search documents
"""
data = """<B:calendarserver-principal-search xmlns:A="DAV:" xmlns:B="http://calendarserver.org/ns/" context="attendee">
<B:search-token>morgen</B:search-token>
<A:prop>
<A:principal-URL/>
<A:displayname/>
</A:prop>
</B:calendarserver-principal-search>
"""
doc = WebDAVDocument.fromString(data)
tokens, context, applyTo, clientLimit, _ignore_propElement = extractCalendarServerPrincipalSearchData(doc.root_element)
self.assertEquals(tokens, ["morgen"])
self.assertEquals(context, "attendee")
self.assertFalse(applyTo)
self.assertEquals(clientLimit, None)
data = """<B:calendarserver-principal-search xmlns:A="DAV:" xmlns:B="http://calendarserver.org/ns/">
<B:search-token>morgen</B:search-token>
<B:search-token>sagen</B:search-token>
<B:limit>
<B:nresults>42</B:nresults>
</B:limit>
<A:prop>
<A:principal-URL/>
<A:displayname/>
</A:prop>
<A:apply-to-principal-collection-set/>
</B:calendarserver-principal-search>
"""
doc = WebDAVDocument.fromString(data)
tokens, context, applyTo, clientLimit, _ignore_propElement = extractCalendarServerPrincipalSearchData(doc.root_element)
self.assertEquals(tokens, ["morgen", "sagen"])
self.assertEquals(context, None)
self.assertTrue(applyTo)
self.assertEquals(clientLimit, 42)
def test_validateTokens(self):
"""
Ensure validateTokens only returns True if there is at least one token
longer than one character
"""
self.assertTrue(validateTokens(["abc"]))
self.assertTrue(validateTokens(["ab", "c"]))
self.assertTrue(validateTokens(["ab"]))
self.assertFalse(validateTokens(["a"]))
self.assertFalse(validateTokens(["a", "b", "c"]))
self.assertFalse(validateTokens([""]))
self.assertFalse(validateTokens([]))
| apache-2.0 |
cjhak/b2share | invenio/utils/xmlhelpers.py | 18 | 1097 | # -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2014 CERN.
#
# Invenio is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License as
# published by the Free Software Foundation; either version 2 of the
# License, or (at your option) any later version.
#
# Invenio is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Invenio; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""Custom XML handling."""
def etree_to_dict(tree):
"""Translate etree into dictionary.
:param tree: etree dictionary object
:type tree: <http://lxml.de/api/lxml.etree-module.html>
"""
d = {tree.tag.split('}')[1]: map(
etree_to_dict, tree.iterchildren()
) or tree.text}
return d
| gpl-2.0 |
twosigma/beaker-notebook | beakerx/beakerx_magics/java_magic.py | 1 | 1242 | # Copyright 2017 TWO SIGMA OPEN SOURCE, LLC #
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from IPython import get_ipython
from IPython.core.magic import (magics_class, cell_magic)
from beakerx_magics import KernelRunnerMagic
from ipykernel.zmqshell import ZMQInteractiveShell
@magics_class
class JavaMagics(KernelRunnerMagic):
def __init__(self, shell):
super(JavaMagics, self).__init__(shell)
@cell_magic
def java(self, line, cell):
return super(JavaMagics, self).kernel("java", cell)
def load_ipython_extension(ipython):
if isinstance(ipython, ZMQInteractiveShell):
ipython.register_magics(JavaMagics)
if __name__ == '__main__':
ip = get_ipython()
ip.register_magics(JavaMagics)
| apache-2.0 |
justathoughtor2/atomicApe | cygwin/lib/python2.7/types.py | 99 | 2094 | """Define names for all type symbols known in the standard interpreter.
Types that are part of optional modules (e.g. array) are not listed.
"""
import sys
# Iterators in Python aren't a matter of type but of protocol. A large
# and changing number of builtin types implement *some* flavor of
# iterator. Don't check the type! Use hasattr to check for both
# "__iter__" and "next" attributes instead.
NoneType = type(None)
TypeType = type
ObjectType = object
IntType = int
LongType = long
FloatType = float
BooleanType = bool
try:
ComplexType = complex
except NameError:
pass
StringType = str
# StringTypes is already outdated. Instead of writing "type(x) in
# types.StringTypes", you should use "isinstance(x, basestring)". But
# we keep around for compatibility with Python 2.2.
try:
UnicodeType = unicode
StringTypes = (StringType, UnicodeType)
except NameError:
StringTypes = (StringType,)
BufferType = buffer
TupleType = tuple
ListType = list
DictType = DictionaryType = dict
def _f(): pass
FunctionType = type(_f)
LambdaType = type(lambda: None) # Same as FunctionType
CodeType = type(_f.func_code)
def _g():
yield 1
GeneratorType = type(_g())
class _C:
def _m(self): pass
ClassType = type(_C)
UnboundMethodType = type(_C._m) # Same as MethodType
_x = _C()
InstanceType = type(_x)
MethodType = type(_x._m)
BuiltinFunctionType = type(len)
BuiltinMethodType = type([].append) # Same as BuiltinFunctionType
ModuleType = type(sys)
FileType = file
XRangeType = xrange
try:
raise TypeError
except TypeError:
tb = sys.exc_info()[2]
TracebackType = type(tb)
FrameType = type(tb.tb_frame)
del tb
SliceType = slice
EllipsisType = type(Ellipsis)
DictProxyType = type(TypeType.__dict__)
NotImplementedType = type(NotImplemented)
# For Jython, the following two types are identical
GetSetDescriptorType = type(FunctionType.func_code)
MemberDescriptorType = type(FunctionType.func_globals)
del sys, _f, _g, _C, _x # Not for export
__all__ = list(n for n in globals() if n[:1] != '_')
| gpl-3.0 |
jfsantos/chainer | tests/functions_tests/test_softmax_cross_entropy.py | 3 | 2482 | import math
import unittest
import numpy
import six
import chainer
from chainer import cuda
from chainer import functions
from chainer import gradient_check
from chainer import testing
from chainer.testing import attr
from chainer.testing import condition
if cuda.available:
cuda.init()
class TestSoftmaxCrossEntropy(unittest.TestCase):
def setUp(self):
self.x = numpy.random.uniform(-1, 1, (4, 3)).astype(numpy.float32)
self.t = numpy.random.randint(0, 3, (4,)).astype(numpy.int32)
def check_forward(self, x_data, t_data, use_cudnn=True):
x = chainer.Variable(x_data)
t = chainer.Variable(t_data)
loss = functions.softmax_cross_entropy(x, t, use_cudnn)
self.assertEqual(loss.data.shape, ())
self.assertEqual(loss.data.dtype, numpy.float32)
loss_value = float(cuda.to_cpu(loss.data))
# Compute expected value
y = numpy.exp(self.x)
loss_expect = 0
for i in six.moves.range(y.shape[0]):
loss_expect -= math.log(y[i, self.t[i]] / y[i].sum())
loss_expect /= y.shape[0]
self.assertAlmostEqual(loss_expect, loss_value, places=5)
@condition.retry(3)
def test_forward_cpu(self):
self.check_forward(self.x, self.t)
@attr.cudnn
@condition.retry(3)
def test_forward_gpu(self):
self.check_forward(cuda.to_gpu(self.x), cuda.to_gpu(self.t))
@attr.gpu
@condition.retry(3)
def test_forward_gpu_no_cudnn(self):
self.check_forward(cuda.to_gpu(self.x), cuda.to_gpu(self.t), False)
def check_backward(self, x_data, t_data, use_cudnn=True):
x = chainer.Variable(x_data)
t = chainer.Variable(t_data)
loss = functions.softmax_cross_entropy(x, t, use_cudnn)
loss.backward()
self.assertEqual(None, t.grad)
func = loss.creator
f = lambda: func.forward((x.data, t.data))
gx, = gradient_check.numerical_grad(f, (x.data,), (1,), eps=0.02)
gradient_check.assert_allclose(gx, x.grad)
@condition.retry(3)
def test_backward_cpu(self):
self.check_backward(self.x, self.t)
@attr.cudnn
@condition.retry(3)
def test_backward_gpu(self):
self.check_backward(cuda.to_gpu(self.x), cuda.to_gpu(self.t))
@attr.gpu
@condition.retry(3)
def test_backward_gpu_no_cudnn(self):
self.check_backward(cuda.to_gpu(self.x), cuda.to_gpu(self.t), False)
testing.run_module(__name__, __file__)
| mit |
andrewor14/iolap | python/pyspark/ml/param/__init__.py | 2 | 8455 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from abc import ABCMeta
import copy
from pyspark.ml.util import Identifiable
__all__ = ['Param', 'Params']
class Param(object):
"""
A param with self-contained documentation.
"""
def __init__(self, parent, name, doc):
if not isinstance(parent, Identifiable):
raise TypeError("Parent must be an Identifiable but got type %s." % type(parent))
self.parent = parent.uid
self.name = str(name)
self.doc = str(doc)
def __str__(self):
return str(self.parent) + "__" + self.name
def __repr__(self):
return "Param(parent=%r, name=%r, doc=%r)" % (self.parent, self.name, self.doc)
def __hash__(self):
return hash(str(self))
def __eq__(self, other):
if isinstance(other, Param):
return self.parent == other.parent and self.name == other.name
else:
return False
class Params(Identifiable):
"""
Components that take parameters. This also provides an internal
param map to store parameter values attached to the instance.
"""
__metaclass__ = ABCMeta
def __init__(self):
super(Params, self).__init__()
#: internal param map for user-supplied values param map
self._paramMap = {}
#: internal param map for default values
self._defaultParamMap = {}
#: value returned by :py:func:`params`
self._params = None
@property
def params(self):
"""
Returns all params ordered by name. The default implementation
uses :py:func:`dir` to get all attributes of type
:py:class:`Param`.
"""
if self._params is None:
self._params = list(filter(lambda attr: isinstance(attr, Param),
[getattr(self, x) for x in dir(self) if x != "params"]))
return self._params
def explainParam(self, param):
"""
Explains a single param and returns its name, doc, and optional
default value and user-supplied value in a string.
"""
param = self._resolveParam(param)
values = []
if self.isDefined(param):
if param in self._defaultParamMap:
values.append("default: %s" % self._defaultParamMap[param])
if param in self._paramMap:
values.append("current: %s" % self._paramMap[param])
else:
values.append("undefined")
valueStr = "(" + ", ".join(values) + ")"
return "%s: %s %s" % (param.name, param.doc, valueStr)
def explainParams(self):
"""
Returns the documentation of all params with their optionally
default values and user-supplied values.
"""
return "\n".join([self.explainParam(param) for param in self.params])
def getParam(self, paramName):
"""
Gets a param by its name.
"""
param = getattr(self, paramName)
if isinstance(param, Param):
return param
else:
raise ValueError("Cannot find param with name %s." % paramName)
def isSet(self, param):
"""
Checks whether a param is explicitly set by user.
"""
param = self._resolveParam(param)
return param in self._paramMap
def hasDefault(self, param):
"""
Checks whether a param has a default value.
"""
param = self._resolveParam(param)
return param in self._defaultParamMap
def isDefined(self, param):
"""
Checks whether a param is explicitly set by user or has
a default value.
"""
return self.isSet(param) or self.hasDefault(param)
def hasParam(self, paramName):
"""
Tests whether this instance contains a param with a given
(string) name.
"""
param = self._resolveParam(paramName)
return param in self.params
def getOrDefault(self, param):
"""
Gets the value of a param in the user-supplied param map or its
default value. Raises an error if neither is set.
"""
param = self._resolveParam(param)
if param in self._paramMap:
return self._paramMap[param]
else:
return self._defaultParamMap[param]
def extractParamMap(self, extra=None):
"""
Extracts the embedded default param values and user-supplied
values, and then merges them with extra values from input into
a flat param map, where the latter value is used if there exist
conflicts, i.e., with ordering: default param values <
user-supplied values < extra.
:param extra: extra param values
:return: merged param map
"""
if extra is None:
extra = dict()
paramMap = self._defaultParamMap.copy()
paramMap.update(self._paramMap)
paramMap.update(extra)
return paramMap
def copy(self, extra=None):
"""
Creates a copy of this instance with the same uid and some
extra params. The default implementation creates a
shallow copy using :py:func:`copy.copy`, and then copies the
embedded and extra parameters over and returns the copy.
Subclasses should override this method if the default approach
is not sufficient.
:param extra: Extra parameters to copy to the new instance
:return: Copy of this instance
"""
if extra is None:
extra = dict()
that = copy.copy(self)
that._paramMap = self.extractParamMap(extra)
return that
def _shouldOwn(self, param):
"""
Validates that the input param belongs to this Params instance.
"""
if not (self.uid == param.parent and self.hasParam(param.name)):
raise ValueError("Param %r does not belong to %r." % (param, self))
def _resolveParam(self, param):
"""
Resolves a param and validates the ownership.
:param param: param name or the param instance, which must
belong to this Params instance
:return: resolved param instance
"""
if isinstance(param, Param):
self._shouldOwn(param)
return param
elif isinstance(param, str):
return self.getParam(param)
else:
raise ValueError("Cannot resolve %r as a param." % param)
@staticmethod
def _dummy():
"""
Returns a dummy Params instance used as a placeholder to
generate docs.
"""
dummy = Params()
dummy.uid = "undefined"
return dummy
def _set(self, **kwargs):
"""
Sets user-supplied params.
"""
for param, value in kwargs.items():
self._paramMap[getattr(self, param)] = value
return self
def _setDefault(self, **kwargs):
"""
Sets default params.
"""
for param, value in kwargs.items():
self._defaultParamMap[getattr(self, param)] = value
return self
def _copyValues(self, to, extra=None):
"""
Copies param values from this instance to another instance for
params shared by them.
:param to: the target instance
:param extra: extra params to be copied
:return: the target instance with param values copied
"""
if extra is None:
extra = dict()
paramMap = self.extractParamMap(extra)
for p in self.params:
if p in paramMap and to.hasParam(p.name):
to._set(**{p.name: paramMap[p]})
return to
| apache-2.0 |
chheplo/jaikuengine | explore/views.py | 30 | 2353 | # Copyright 2009 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from django import http
from django import template
from django.conf import settings
from django.template import loader
from common import api, util
from common.display import prep_entry_list, prep_stream_dict
ENTRIES_PER_PAGE = 20
def explore_recent(request, format="html"):
per_page = ENTRIES_PER_PAGE
offset, prev = util.page_offset(request)
inbox = api.inbox_get_explore(request.user, limit=(per_page + 1),
offset=offset)
# START inbox generation chaos
# TODO(termie): refacccttttooorrrrr
entries = api.entry_get_entries(request.user, inbox)
per_page = per_page - (len(inbox) - len(entries))
entries, more = util.page_entries(request, entries, per_page)
stream_keys = [e.stream for e in entries]
streams = api.stream_get_streams(request.user, stream_keys)
actor_nicks = [e.owner for e in entries] + [e.actor for e in entries]
actors = api.actor_get_actors(request.user, actor_nicks)
# here comes lots of munging data into shape
streams = prep_stream_dict(streams, actors)
entries = prep_entry_list(entries, streams, actors)
# END inbox generation chaos
area = 'explore'
sidebar_green_top = True
c = template.RequestContext(request, locals())
if format == 'html':
t = loader.get_template('explore/templates/recent.html')
return http.HttpResponse(t.render(c));
elif format == 'json':
t = loader.get_template('explore/templates/recent.json')
return util.HttpJsonResponse(t.render(c), request)
elif format == 'atom':
t = loader.get_template('explore/templates/recent.atom')
return util.HttpAtomResponse(t.render(c), request)
elif format == 'rss':
t = loader.get_template('explore/templates/recent.rss')
return util.HttpRssResponse(t.render(c), request)
| apache-2.0 |
ybellavance/python-for-android | python-modules/twisted/twisted/python/dist.py | 60 | 12329 | """
Distutils convenience functionality.
Don't use this outside of Twisted.
Maintainer: Christopher Armstrong
"""
import sys, os
from distutils.command import build_scripts, install_data, build_ext, build_py
from distutils.errors import CompileError
from distutils import core
from distutils.core import Extension
twisted_subprojects = ["conch", "lore", "mail", "names",
"news", "pair", "runner", "web", "web2",
"words", "vfs"]
class ConditionalExtension(Extension):
"""
An extension module that will only be compiled if certain conditions are
met.
@param condition: A callable of one argument which returns True or False to
indicate whether the extension should be built. The argument is an
instance of L{build_ext_twisted}, which has useful methods for checking
things about the platform.
"""
def __init__(self, *args, **kwargs):
self.condition = kwargs.pop("condition", lambda builder: True)
Extension.__init__(self, *args, **kwargs)
def setup(**kw):
"""
An alternative to distutils' setup() which is specially designed
for Twisted subprojects.
Pass twisted_subproject=projname if you want package and data
files to automatically be found for you.
@param conditionalExtensions: Extensions to optionally build.
@type conditionalExtensions: C{list} of L{ConditionalExtension}
"""
return core.setup(**get_setup_args(**kw))
def get_setup_args(**kw):
if 'twisted_subproject' in kw:
if 'twisted' not in os.listdir('.'):
raise RuntimeError("Sorry, you need to run setup.py from the "
"toplevel source directory.")
projname = kw['twisted_subproject']
projdir = os.path.join('twisted', projname)
kw['packages'] = getPackages(projdir, parent='twisted')
kw['version'] = getVersion(projname)
plugin = "twisted/plugins/twisted_" + projname + ".py"
if os.path.exists(plugin):
kw.setdefault('py_modules', []).append(
plugin.replace("/", ".")[:-3])
kw['data_files'] = getDataFiles(projdir, parent='twisted')
del kw['twisted_subproject']
else:
if 'plugins' in kw:
py_modules = []
for plg in kw['plugins']:
py_modules.append("twisted.plugins." + plg)
kw.setdefault('py_modules', []).extend(py_modules)
del kw['plugins']
if 'cmdclass' not in kw:
kw['cmdclass'] = {
'install_data': install_data_twisted,
'build_scripts': build_scripts_twisted}
if sys.version_info[:3] < (2, 3, 0):
kw['cmdclass']['build_py'] = build_py_twisted
if "conditionalExtensions" in kw:
extensions = kw["conditionalExtensions"]
del kw["conditionalExtensions"]
if 'ext_modules' not in kw:
# This is a workaround for distutils behavior; ext_modules isn't
# actually used by our custom builder. distutils deep-down checks
# to see if there are any ext_modules defined before invoking
# the build_ext command. We need to trigger build_ext regardless
# because it is the thing that does the conditional checks to see
# if it should build any extensions. The reason we have to delay
# the conditional checks until then is that the compiler objects
# are not yet set up when this code is executed.
kw["ext_modules"] = extensions
class my_build_ext(build_ext_twisted):
conditionalExtensions = extensions
kw.setdefault('cmdclass', {})['build_ext'] = my_build_ext
return kw
def getVersion(proj, base="twisted"):
"""
Extract the version number for a given project.
@param proj: the name of the project. Examples are "core",
"conch", "words", "mail".
@rtype: str
@returns: The version number of the project, as a string like
"2.0.0".
"""
if proj == 'core':
vfile = os.path.join(base, '_version.py')
else:
vfile = os.path.join(base, proj, '_version.py')
ns = {'__name__': 'Nothing to see here'}
execfile(vfile, ns)
return ns['version'].base()
# Names that are exluded from globbing results:
EXCLUDE_NAMES = ["{arch}", "CVS", ".cvsignore", "_darcs",
"RCS", "SCCS", ".svn"]
EXCLUDE_PATTERNS = ["*.py[cdo]", "*.s[ol]", ".#*", "*~", "*.py"]
import fnmatch
def _filterNames(names):
"""Given a list of file names, return those names that should be copied.
"""
names = [n for n in names
if n not in EXCLUDE_NAMES]
# This is needed when building a distro from a working
# copy (likely a checkout) rather than a pristine export:
for pattern in EXCLUDE_PATTERNS:
names = [n for n in names
if (not fnmatch.fnmatch(n, pattern))
and (not n.endswith('.py'))]
return names
def relativeTo(base, relativee):
"""
Gets 'relativee' relative to 'basepath'.
i.e.,
>>> relativeTo('/home/', '/home/radix/')
'radix'
>>> relativeTo('.', '/home/radix/Projects/Twisted') # curdir is /home/radix
'Projects/Twisted'
The 'relativee' must be a child of 'basepath'.
"""
basepath = os.path.abspath(base)
relativee = os.path.abspath(relativee)
if relativee.startswith(basepath):
relative = relativee[len(basepath):]
if relative.startswith(os.sep):
relative = relative[1:]
return os.path.join(base, relative)
raise ValueError("%s is not a subpath of %s" % (relativee, basepath))
def getDataFiles(dname, ignore=None, parent=None):
"""
Get all the data files that should be included in this distutils Project.
'dname' should be the path to the package that you're distributing.
'ignore' is a list of sub-packages to ignore. This facilitates
disparate package hierarchies. That's a fancy way of saying that
the 'twisted' package doesn't want to include the 'twisted.conch'
package, so it will pass ['conch'] as the value.
'parent' is necessary if you're distributing a subpackage like
twisted.conch. 'dname' should point to 'twisted/conch' and 'parent'
should point to 'twisted'. This ensures that your data_files are
generated correctly, only using relative paths for the first element
of the tuple ('twisted/conch/*').
The default 'parent' is the current working directory.
"""
parent = parent or "."
ignore = ignore or []
result = []
for directory, subdirectories, filenames in os.walk(dname):
resultfiles = []
for exname in EXCLUDE_NAMES:
if exname in subdirectories:
subdirectories.remove(exname)
for ig in ignore:
if ig in subdirectories:
subdirectories.remove(ig)
for filename in _filterNames(filenames):
resultfiles.append(filename)
if resultfiles:
result.append((relativeTo(parent, directory),
[relativeTo(parent,
os.path.join(directory, filename))
for filename in resultfiles]))
return result
def getPackages(dname, pkgname=None, results=None, ignore=None, parent=None):
"""
Get all packages which are under dname. This is necessary for
Python 2.2's distutils. Pretty similar arguments to getDataFiles,
including 'parent'.
"""
parent = parent or ""
prefix = []
if parent:
prefix = [parent]
bname = os.path.basename(dname)
ignore = ignore or []
if bname in ignore:
return []
if results is None:
results = []
if pkgname is None:
pkgname = []
subfiles = os.listdir(dname)
abssubfiles = [os.path.join(dname, x) for x in subfiles]
if '__init__.py' in subfiles:
results.append(prefix + pkgname + [bname])
for subdir in filter(os.path.isdir, abssubfiles):
getPackages(subdir, pkgname=pkgname + [bname],
results=results, ignore=ignore,
parent=parent)
res = ['.'.join(result) for result in results]
return res
def getScripts(projname, basedir=''):
"""
Returns a list of scripts for a Twisted subproject; this works in
any of an SVN checkout, a project-specific tarball.
"""
scriptdir = os.path.join(basedir, 'bin', projname)
if not os.path.isdir(scriptdir):
# Probably a project-specific tarball, in which case only this
# project's bins are included in 'bin'
scriptdir = os.path.join(basedir, 'bin')
if not os.path.isdir(scriptdir):
return []
thingies = os.listdir(scriptdir)
if '.svn' in thingies:
thingies.remove('.svn')
return filter(os.path.isfile,
[os.path.join(scriptdir, x) for x in thingies])
## Helpers and distutil tweaks
class build_py_twisted(build_py.build_py):
"""
Changes behavior in Python 2.2 to support simultaneous specification of
`packages' and `py_modules'.
"""
def run(self):
if self.py_modules:
self.build_modules()
if self.packages:
self.build_packages()
self.byte_compile(self.get_outputs(include_bytecode=0))
class build_scripts_twisted(build_scripts.build_scripts):
"""Renames scripts so they end with '.py' on Windows."""
def run(self):
build_scripts.build_scripts.run(self)
if not os.name == "nt":
return
for f in os.listdir(self.build_dir):
fpath=os.path.join(self.build_dir, f)
if not fpath.endswith(".py"):
try:
os.unlink(fpath + ".py")
except EnvironmentError, e:
if e.args[1]=='No such file or directory':
pass
os.rename(fpath, fpath + ".py")
class install_data_twisted(install_data.install_data):
"""I make sure data files are installed in the package directory."""
def finalize_options(self):
self.set_undefined_options('install',
('install_lib', 'install_dir')
)
install_data.install_data.finalize_options(self)
class build_ext_twisted(build_ext.build_ext):
"""
Allow subclasses to easily detect and customize Extensions to
build at install-time.
"""
def prepare_extensions(self):
"""
Prepare the C{self.extensions} attribute (used by
L{build_ext.build_ext}) by checking which extensions in
L{conditionalExtensions} should be built. In addition, if we are
building on NT, define the WIN32 macro to 1.
"""
# always define WIN32 under Windows
if os.name == 'nt':
self.define_macros = [("WIN32", 1)]
else:
self.define_macros = []
self.extensions = [x for x in self.conditionalExtensions
if x.condition(self)]
for ext in self.extensions:
ext.define_macros.extend(self.define_macros)
def build_extensions(self):
"""
Check to see which extension modules to build and then build them.
"""
self.prepare_extensions()
build_ext.build_ext.build_extensions(self)
def _remove_conftest(self):
for filename in ("conftest.c", "conftest.o", "conftest.obj"):
try:
os.unlink(filename)
except EnvironmentError:
pass
def _compile_helper(self, content):
conftest = open("conftest.c", "w")
try:
conftest.write(content)
conftest.close()
try:
self.compiler.compile(["conftest.c"], output_dir='')
except CompileError:
return False
return True
finally:
self._remove_conftest()
def _check_header(self, header_name):
"""
Check if the given header can be included by trying to compile a file
that contains only an #include line.
"""
self.compiler.announce("checking for %s ..." % header_name, 0)
return self._compile_helper("#include <%s>\n" % header_name)
| apache-2.0 |
rmfitzpatrick/ansible | lib/ansible/plugins/lookup/dnstxt.py | 46 | 2169 | # (c) 2012, Jan-Piet Mens <jpmens(at)gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
HAVE_DNS = False
try:
import dns.resolver
from dns.exception import DNSException
HAVE_DNS = True
except ImportError:
pass
from ansible.errors import AnsibleError
from ansible.module_utils._text import to_native
from ansible.plugins.lookup import LookupBase
# ==============================================================
# DNSTXT: DNS TXT records
#
# key=domainname
# TODO: configurable resolver IPs
# --------------------------------------------------------------
class LookupModule(LookupBase):
def run(self, terms, variables=None, **kwargs):
if HAVE_DNS is False:
raise AnsibleError("Can't LOOKUP(dnstxt): module dns.resolver is not installed")
ret = []
for term in terms:
domain = term.split()[0]
string = []
try:
answers = dns.resolver.query(domain, 'TXT')
for rdata in answers:
s = rdata.to_text()
string.append(s[1:-1]) # Strip outside quotes on TXT rdata
except dns.resolver.NXDOMAIN:
string = 'NXDOMAIN'
except dns.resolver.Timeout:
string = ''
except DNSException as e:
raise AnsibleError("dns.resolver unhandled exception %s" % to_native(e))
ret.append(''.join(string))
return ret
| gpl-3.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.