repo_name stringlengths 6 100 | path stringlengths 4 294 | copies stringlengths 1 5 | size stringlengths 4 6 | content stringlengths 606 896k | license stringclasses 15
values |
|---|---|---|---|---|---|
hooman/swift | benchmark/scripts/compare_perf_tests.py | 10 | 16354 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# ===--- compare_perf_tests.py -------------------------------------------===//
#
# This source file is part of the Swift.org open source project
#
# Copyright (c) 2014 - 2017 Apple Inc. and the Swift project authors
# Licensed under Apache License v2.0 with Runtime Library Exception
#
# See https://swift.org/LICENSE.txt for license information
# See https://swift.org/CONTRIBUTORS.txt for the list of Swift project authors
#
# ===---------------------------------------------------------------------===//
from __future__ import print_function
import argparse
import csv
import sys
from math import sqrt
class PerformanceTestResult(object):
"""PerformanceTestResult holds results from executing an individual
benchmark from the Swift Benchmark Suite as reported by the test driver
(Benchmark_O, Benchmark_Onone, Benchmark_Ounchecked or Benchmark_Driver).
It depends on the log format emitted by the test driver in the form:
#,TEST,SAMPLES,MIN(μs),MAX(μs),MEAN(μs),SD(μs),MEDIAN(μs),MAX_RSS(B)
The last column, MAX_RSS, is emitted only for runs instrumented by the
Benchmark_Driver to measure rough memory use during the execution of the
benchmark.
"""
def __init__(self, csv_row):
"""PerformanceTestResult instance is created from an iterable with
length of 8 or 9. (Like a row provided by the CSV parser.)
"""
# csv_row[0] is just an ordinal number of the test - skip that
self.name = csv_row[1] # Name of the performance test
self.samples = int(csv_row[2]) # Number of measurement samples taken
self.min = int(csv_row[3]) # Minimum runtime (ms)
self.max = int(csv_row[4]) # Maximum runtime (ms)
self.mean = int(csv_row[5]) # Mean (average) runtime (ms)
sd = int(csv_row[6]) # Standard Deviation (ms)
# For computing running variance
self.S_runtime = (0 if self.samples < 2 else
(sd * sd) * (self.samples - 1))
self.median = int(csv_row[7]) # Median runtime (ms)
self.max_rss = ( # Maximum Resident Set Size (B)
int(csv_row[8]) if len(csv_row) > 8 else None)
def __repr__(self):
return (
'<PerformanceTestResult name:{0.name!r} '
'samples:{0.samples!r} min:{0.min!r} max:{0.max!r} '
'mean:{0.mean!r} sd:{0.sd!r} median:{0.median!r}>'.format(self))
@property
def sd(self):
"""Standard Deviation (ms)"""
return (0 if self.samples < 2 else
sqrt(self.S_runtime / (self.samples - 1)))
@staticmethod
def running_mean_variance((k, M_, S_), x):
"""
Compute running variance, B. P. Welford's method
See Knuth TAOCP vol 2, 3rd edition, page 232, or
https://www.johndcook.com/blog/standard_deviation/
M is mean, Standard Deviation is defined as sqrt(S/k-1)
"""
k = float(k + 1)
M = M_ + (x - M_) / k
S = S_ + (x - M_) * (x - M)
return (k, M, S)
def merge(self, r):
"""Merging test results recomputes min and max.
It attempts to recompute mean and standard deviation when all_samples
are available. There is no correct way to compute these values from
test results that are summaries from more than 3 samples.
The use case here is comparing tests results parsed from concatenated
log files from multiple runs of benchmark driver.
"""
self.min = min(self.min, r.min)
self.max = max(self.max, r.max)
# self.median = None # unclear what to do here
def push(x):
state = (self.samples, self.mean, self.S_runtime)
state = self.running_mean_variance(state, x)
(self.samples, self.mean, self.S_runtime) = state
# Merging test results with up to 3 samples is exact
values = [r.min, r.max, r.median][:min(r.samples, 3)]
map(push, values)
# Column labels for header row in results table
header = ('TEST', 'MIN', 'MAX', 'MEAN', 'MAX_RSS')
def values(self):
"""Values property for display in results table comparisons
in format: ('TEST', 'MIN', 'MAX', 'MEAN', 'MAX_RSS').
"""
return (
self.name,
str(self.min), str(self.max), str(int(self.mean)),
str(self.max_rss) if self.max_rss else '—'
)
class ResultComparison(object):
"""ResultComparison compares MINs from new and old PerformanceTestResult.
It computes speedup ratio and improvement delta (%).
"""
def __init__(self, old, new):
self.old = old
self.new = new
assert(old.name == new.name)
self.name = old.name # Test name, convenience accessor
# Speedup ratio
self.ratio = (old.min + 0.001) / (new.min + 0.001)
# Test runtime improvement in %
ratio = (new.min + 0.001) / (old.min + 0.001)
self.delta = ((ratio - 1) * 100)
# Add ' (?)' to the speedup column as indication of dubious changes:
# result's MIN falls inside the (MIN, MAX) interval of result they are
# being compared with.
self.is_dubious = (
' (?)' if ((old.min < new.min and new.min < old.max) or
(new.min < old.min and old.min < new.max))
else '')
# Column labels for header row in results table
header = ('TEST', 'OLD', 'NEW', 'DELTA', 'SPEEDUP')
def values(self):
"""Values property for display in results table comparisons
in format: ('TEST', 'OLD', 'NEW', 'DELTA', 'SPEEDUP').
"""
return (self.name,
str(self.old.min), str(self.new.min),
'{0:+.1f}%'.format(self.delta),
'{0:.2f}x{1}'.format(self.ratio, self.is_dubious))
class TestComparator(object):
"""TestComparator parses `PerformanceTestResult`s from CSV log files.
Then it determines which tests were `added`, `removed` and which can be
compared. It then splits the `ResultComparison`s into 3 groups according to
the `delta_threshold` by the change in performance: `increased`,
`descreased` and `unchanged`.
The lists of `added`, `removed` and `unchanged` tests are sorted
alphabetically. The `increased` and `decreased` lists are sorted in
descending order by the amount of change.
"""
def __init__(self, old_file, new_file, delta_threshold):
def load_from_CSV(filename): # handles output from Benchmark_O and
def skip_totals(row): # Benchmark_Driver (added MAX_RSS column)
return len(row) > 7 and row[0].isdigit()
tests = map(PerformanceTestResult,
filter(skip_totals, csv.reader(open(filename))))
def add_or_merge(names, r):
if r.name not in names:
names[r.name] = r
else:
names[r.name].merge(r)
return names
return reduce(add_or_merge, tests, dict())
old_results = load_from_CSV(old_file)
new_results = load_from_CSV(new_file)
old_tests = set(old_results.keys())
new_tests = set(new_results.keys())
comparable_tests = new_tests.intersection(old_tests)
added_tests = new_tests.difference(old_tests)
removed_tests = old_tests.difference(new_tests)
self.added = sorted([new_results[t] for t in added_tests],
key=lambda r: r.name)
self.removed = sorted([old_results[t] for t in removed_tests],
key=lambda r: r.name)
def compare(name):
return ResultComparison(old_results[name], new_results[name])
comparisons = map(compare, comparable_tests)
def partition(l, p):
return reduce(lambda x, y: x[not p(y)].append(y) or x, l, ([], []))
decreased, not_decreased = partition(
comparisons, lambda c: c.ratio < (1 - delta_threshold))
increased, unchanged = partition(
not_decreased, lambda c: c.ratio > (1 + delta_threshold))
# sorted partitions
names = [c.name for c in comparisons]
comparisons = dict(zip(names, comparisons))
self.decreased = [comparisons[c.name]
for c in sorted(decreased, key=lambda c: -c.delta)]
self.increased = [comparisons[c.name]
for c in sorted(increased, key=lambda c: c.delta)]
self.unchanged = [comparisons[c.name]
for c in sorted(unchanged, key=lambda c: c.name)]
class ReportFormatter(object):
"""ReportFormatter formats the `PerformanceTestResult`s and
`ResultComparison`s provided by `TestComparator` using their `header` and
`values()` into report table. Supported formats are: `markdown` (used for
displaying benchmark results on GitHub), `git` and `html`.
"""
def __init__(self, comparator, old_branch, new_branch, changes_only):
self.comparator = comparator
self.old_branch = old_branch
self.new_branch = new_branch
self.changes_only = changes_only
MARKDOWN_DETAIL = """
<details {3}>
<summary>{0} ({1})</summary>
{2}
</details>
"""
GIT_DETAIL = """
{0} ({1}): {2}"""
def markdown(self):
return self._formatted_text(
ROW='{0} | {1} | {2} | {3} | {4} \n',
HEADER_SEPARATOR='---',
DETAIL=self.MARKDOWN_DETAIL)
def git(self):
return self._formatted_text(
ROW='{0} {1} {2} {3} {4} \n',
HEADER_SEPARATOR=' ',
DETAIL=self.GIT_DETAIL)
def _column_widths(self):
changed = self.comparator.decreased + self.comparator.increased
comparisons = (changed if self.changes_only else
changed + self.comparator.unchanged)
comparisons += self.comparator.added + self.comparator.removed
widths = [
map(len, columns) for columns in
[PerformanceTestResult.header, ResultComparison.header] +
[c.values() for c in comparisons]
]
def max_widths(maximum, widths):
return tuple(map(max, zip(maximum, widths)))
return reduce(max_widths, widths, tuple([0] * 5))
def _formatted_text(self, ROW, HEADER_SEPARATOR, DETAIL):
widths = self._column_widths()
def justify_columns(contents):
return tuple([c.ljust(w) for w, c in zip(widths, contents)])
def row(contents):
return ROW.format(*justify_columns(contents))
def header(header):
return '\n' + row(header) + row(tuple([HEADER_SEPARATOR] * 5))
def format_columns(r, strong):
return (r if not strong else
r[:-1] + ('**{0}**'.format(r[-1]), ))
def table(title, results, is_strong=False, is_open=False):
rows = [
row(format_columns(result_comparison.values(), is_strong))
for result_comparison in results
]
return ('' if not rows else
DETAIL.format(*[
title, len(results),
(header(results[0].header) + ''.join(rows)),
('open' if is_open else '')
]))
return ''.join([
# FIXME print self.old_branch, self.new_branch
table('Regression', self.comparator.decreased, True, True),
table('Improvement', self.comparator.increased, True),
('' if self.changes_only else
table('No Changes', self.comparator.unchanged)),
table('Added', self.comparator.added, is_open=True),
table('Removed', self.comparator.removed, is_open=True)
])
HTML = """
<!DOCTYPE html>
<html>
<head>
<meta http-equiv="Content-Type" content="text/html; charset=utf-8" />
<style>
body {{ font-family: -apple-system, sans-serif; font-size: 14px; }}
table {{ border-spacing: 2px; border-color: gray; border-spacing: 0;
border-collapse: collapse; }}
table tr {{ background-color: #fff; border-top: 1px solid #c6cbd1; }}
table th, table td {{ padding: 6px 13px; border: 1px solid #dfe2e5; }}
th {{ text-align: center; padding-top: 130px; }}
td {{ text-align: right; }}
table td:first-child {{ text-align: left; }}
tr:nth-child(even) {{ background-color: #000000; }}
tr:nth-child(2n) {{ background-color: #f6f8fa; }}
</style>
</head>
<body>
<table>
{0}
</table>
</body>
</html>"""
HTML_HEADER_ROW = """
<tr>
<th align='left'>{0} ({1})</th>
<th align='left'>{2}</th>
<th align='left'>{3}</th>
<th align='left'>{4}</th>
<th align='left'>{5}</th>
</tr>
"""
HTML_ROW = """
<tr>
<td align='left'>{0}</td>
<td align='left'>{1}</td>
<td align='left'>{2}</td>
<td align='left'>{3}</td>
<td align='left'><font color='{4}'>{5}</font></td>
</tr>
"""
def html(self):
def row(name, old, new, delta, speedup, speedup_color):
return self.HTML_ROW.format(
name, old, new, delta, speedup_color, speedup)
def header(contents):
return self.HTML_HEADER_ROW.format(* contents)
def table(title, results, speedup_color):
rows = [
row(*(result_comparison.values() + (speedup_color,)))
for result_comparison in results
]
return ('' if not rows else
header((title, len(results)) + results[0].header[1:]) +
''.join(rows))
return self.HTML.format(
''.join([
# FIXME print self.old_branch, self.new_branch
table('Regression', self.comparator.decreased, 'red'),
table('Improvement', self.comparator.increased, 'green'),
('' if self.changes_only else
table('No Changes', self.comparator.unchanged, 'black')),
table('Added', self.comparator.added, ''),
table('Removed', self.comparator.removed, '')
]))
def parse_args(args):
"""Parse command line arguments and set default values."""
parser = argparse.ArgumentParser(description='Compare Performance tests.')
parser.add_argument('--old-file',
help='Baseline performance test suite (csv file)',
required=True)
parser.add_argument('--new-file',
help='New performance test suite (csv file)',
required=True)
parser.add_argument('--format',
choices=['markdown', 'git', 'html'],
help='Output format. Default is markdown.',
default="markdown")
parser.add_argument('--output', help='Output file name')
parser.add_argument('--changes-only',
help='Output only affected tests', action='store_true')
parser.add_argument('--new-branch',
help='Name of the new branch', default='NEW_MIN')
parser.add_argument('--old-branch',
help='Name of the old branch', default='OLD_MIN')
parser.add_argument('--delta-threshold',
help='Delta threshold. Default 0.05.',
type=float, default=0.05)
return parser.parse_args(args)
def main():
args = parse_args(sys.argv[1:])
comparator = TestComparator(args.old_file, args.new_file,
args.delta_threshold)
formatter = ReportFormatter(comparator, args.old_branch, args.new_branch,
args.changes_only)
formats = {
'markdown': formatter.markdown,
'git': formatter.git,
'html': formatter.html
}
report = formats[args.format]()
print(report)
if args.output:
with open(args.output, 'w') as f:
f.write(report)
if __name__ == '__main__':
sys.exit(main())
| apache-2.0 |
jiumutazhao/Flask-web-Development | app/models.py | 1 | 1264 | from werkzeug.security import generate_password_hash, check_password_hash
from flask_login import UserMixin
from . import db, login_manager
class Role(db.Model):
__tablename__ = 'roles'
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(64), unique=True)
users = db.relationship('User', backref='role', lazy='dynamic')
def __repr__(self):
return '<Role %r>' % self.name
class User(UserMixin, db.Model):
__tablename__ = 'users'
id = db.Column(db.Integer, primary_key=True)
email = db.Column(db.String(64), unique=True, index=True)
username = db.Column(db.String(64), unique=True, index=True)
role_id = db.Column(db.Integer, db.ForeignKey('roles.id'))
password_hash = db.Column(db.String(128))
@property
def password(self):
raise AttributeError('password is not a readable attribute')
@password.setter
def password(self, password):
self.password_hash = generate_password_hash(password)
def verify_password(self, password):
return check_password_hash(self.password_hash, password)
def __repr__(self):
return '<User %r>' % self.username
@login_manager.user_loader
def load_user(user_id):
return User.query.get(int(user_id))
| mit |
jonhadfield/ansible | test/units/executor/test_play_iterator.py | 44 | 4158 | # (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
from ansible.compat.tests import unittest
from ansible.compat.tests.mock import patch, MagicMock
from ansible.errors import AnsibleError, AnsibleParserError
from ansible.executor.play_iterator import PlayIterator
from ansible.playbook import Playbook
from ansible.playbook.play_context import PlayContext
from units.mock.loader import DictDataLoader
class TestPlayIterator(unittest.TestCase):
def setUp(self):
pass
def tearDown(self):
pass
def test_play_iterator(self):
fake_loader = DictDataLoader({
"test_play.yml": """
- hosts: all
gather_facts: false
roles:
- test_role
pre_tasks:
- debug: msg="this is a pre_task"
tasks:
- debug: msg="this is a regular task"
post_tasks:
- debug: msg="this is a post_task"
""",
'/etc/ansible/roles/test_role/tasks/main.yml': """
- debug: msg="this is a role task"
""",
})
mock_var_manager = MagicMock()
mock_var_manager._fact_cache = dict()
mock_var_manager.get_vars.return_value = dict()
p = Playbook.load('test_play.yml', loader=fake_loader, variable_manager=mock_var_manager)
hosts = []
for i in range(0, 10):
host = MagicMock()
host.get_name.return_value = 'host%02d' % i
hosts.append(host)
inventory = MagicMock()
inventory.get_hosts.return_value = hosts
inventory.filter_hosts.return_value = hosts
play_context = PlayContext(play=p._entries[0])
itr = PlayIterator(
inventory=inventory,
play=p._entries[0],
play_context=play_context,
variable_manager=mock_var_manager,
all_vars=dict(),
)
# pre task
(host_state, task) = itr.get_next_task_for_host(hosts[0])
self.assertIsNotNone(task)
self.assertEqual(task.action, 'debug')
# implicit meta: flush_handlers
(host_state, task) = itr.get_next_task_for_host(hosts[0])
self.assertIsNotNone(task)
self.assertEqual(task.action, 'meta')
# role task
(host_state, task) = itr.get_next_task_for_host(hosts[0])
self.assertIsNotNone(task)
self.assertEqual(task.action, 'debug')
self.assertIsNotNone(task._role)
# regular play task
(host_state, task) = itr.get_next_task_for_host(hosts[0])
self.assertIsNotNone(task)
self.assertEqual(task.action, 'debug')
self.assertIsNone(task._role)
# implicit meta: flush_handlers
(host_state, task) = itr.get_next_task_for_host(hosts[0])
self.assertIsNotNone(task)
self.assertEqual(task.action, 'meta')
# post task
(host_state, task) = itr.get_next_task_for_host(hosts[0])
self.assertIsNotNone(task)
self.assertEqual(task.action, 'debug')
# implicit meta: flush_handlers
(host_state, task) = itr.get_next_task_for_host(hosts[0])
self.assertIsNotNone(task)
self.assertEqual(task.action, 'meta')
# end of iteration
(host_state, task) = itr.get_next_task_for_host(hosts[0])
self.assertIsNone(task)
| gpl-3.0 |
ButterflyNetwork/bazel | third_party/py/abseil/absl/flags/_flagvalues.py | 15 | 45530 | # Copyright 2017 The Abseil Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Defines the FlagValues class - registry of 'Flag' objects.
Do NOT import this module directly. Import the flags package and use the
aliases defined at the package level instead.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import itertools
import logging
import os
import sys
import warnings
from xml.dom import minidom
from absl.flags import _exceptions
from absl.flags import _flag
from absl.flags import _helpers
import six
# Add flagvalues module to disclaimed module ids.
_helpers.disclaim_module_ids.add(id(sys.modules[__name__]))
class FlagValues(object):
"""Registry of 'Flag' objects.
A 'FlagValues' can then scan command line arguments, passing flag
arguments through to the 'Flag' objects that it owns. It also
provides easy access to the flag values. Typically only one
'FlagValues' object is needed by an application: flags.FLAGS
This class is heavily overloaded:
'Flag' objects are registered via __setitem__:
FLAGS['longname'] = x # register a new flag
The .value attribute of the registered 'Flag' objects can be accessed
as attributes of this 'FlagValues' object, through __getattr__. Both
the long and short name of the original 'Flag' objects can be used to
access its value:
FLAGS.longname # parsed flag value
FLAGS.x # parsed flag value (short name)
Command line arguments are scanned and passed to the registered 'Flag'
objects through the __call__ method. Unparsed arguments, including
argv[0] (e.g. the program name) are returned.
argv = FLAGS(sys.argv) # scan command line arguments
The original registered Flag objects can be retrieved through the use
of the dictionary-like operator, __getitem__:
x = FLAGS['longname'] # access the registered Flag object
The str() operator of a 'FlagValues' object provides help for all of
the registered 'Flag' objects.
"""
# A note on collections.abc.Mapping:
# FlagValues defines __getitem__, __iter__, and __len__. It makes perfect
# sense to let it be a collections.abc.Mapping class. However, we are not
# able to do so. The mixin methods, e.g. keys, values, are not uncommon flag
# names. Those flag values would not be accessible via the FLAGS.xxx form.
def __init__(self):
# Since everything in this class is so heavily overloaded, the only
# way of defining and using fields is to access __dict__ directly.
# Dictionary: flag name (string) -> Flag object.
self.__dict__['__flags'] = {}
# Set: name of hidden flag (string).
# Holds flags that should not be directly accessible from Python.
self.__dict__['__hiddenflags'] = set()
# Dictionary: module name (string) -> list of Flag objects that are defined
# by that module.
self.__dict__['__flags_by_module'] = {}
# Dictionary: module id (int) -> list of Flag objects that are defined by
# that module.
self.__dict__['__flags_by_module_id'] = {}
# Dictionary: module name (string) -> list of Flag objects that are
# key for that module.
self.__dict__['__key_flags_by_module'] = {}
# Bool: True if flags were parsed.
self.__dict__['__flags_parsed'] = False
# Bool: True if unparse_flags() was called.
self.__dict__['__unparse_flags_called'] = False
# None or Method(name, value) to call from __setattr__ for an unknown flag.
self.__dict__['__set_unknown'] = None
# A set of banned flag names. This is to prevent users from accidentally
# defining a flag that has the same name as a method on this class.
# Users can still allow defining the flag by passing
# allow_using_method_names=True in DEFINE_xxx functions.
self.__dict__['__banned_flag_names'] = frozenset(dir(FlagValues))
# Bool: Whether to use GNU style scanning.
self.__dict__['__use_gnu_getopt'] = True
# Bool: Whether use_gnu_getopt has been explicitly set by the user.
self.__dict__['__use_gnu_getopt_explicitly_set'] = False
# Function: Takes a flag name as parameter, returns a tuple
# (is_retired, type_is_bool).
self.__dict__['__is_retired_flag_func'] = None
def set_gnu_getopt(self, gnu_getopt=True):
"""Sets whether or not to use GNU style scanning.
GNU style allows mixing of flag and non-flag arguments. See
http://docs.python.org/library/getopt.html#getopt.gnu_getopt
Args:
gnu_getopt: bool, whether or not to use GNU style scanning.
"""
self.__dict__['__use_gnu_getopt'] = gnu_getopt
self.__dict__['__use_gnu_getopt_explicitly_set'] = True
def is_gnu_getopt(self):
return self.__dict__['__use_gnu_getopt']
def _flags(self):
return self.__dict__['__flags']
def flags_by_module_dict(self):
"""Returns the dictionary of module_name -> list of defined flags.
Returns:
A dictionary. Its keys are module names (strings). Its values
are lists of Flag objects.
"""
return self.__dict__['__flags_by_module']
def flags_by_module_id_dict(self):
"""Returns the dictionary of module_id -> list of defined flags.
Returns:
A dictionary. Its keys are module IDs (ints). Its values
are lists of Flag objects.
"""
return self.__dict__['__flags_by_module_id']
def key_flags_by_module_dict(self):
"""Returns the dictionary of module_name -> list of key flags.
Returns:
A dictionary. Its keys are module names (strings). Its values
are lists of Flag objects.
"""
return self.__dict__['__key_flags_by_module']
def register_flag_by_module(self, module_name, flag):
"""Records the module that defines a specific flag.
We keep track of which flag is defined by which module so that we
can later sort the flags by module.
Args:
module_name: str, the name of a Python module.
flag: Flag, the Flag instance that is key to the module.
"""
flags_by_module = self.flags_by_module_dict()
flags_by_module.setdefault(module_name, []).append(flag)
def register_flag_by_module_id(self, module_id, flag):
"""Records the module that defines a specific flag.
Args:
module_id: int, the ID of the Python module.
flag: Flag, the Flag instance that is key to the module.
"""
flags_by_module_id = self.flags_by_module_id_dict()
flags_by_module_id.setdefault(module_id, []).append(flag)
def register_key_flag_for_module(self, module_name, flag):
"""Specifies that a flag is a key flag for a module.
Args:
module_name: str, the name of a Python module.
flag: Flag, the Flag instance that is key to the module.
"""
key_flags_by_module = self.key_flags_by_module_dict()
# The list of key flags for the module named module_name.
key_flags = key_flags_by_module.setdefault(module_name, [])
# Add flag, but avoid duplicates.
if flag not in key_flags:
key_flags.append(flag)
def _flag_is_registered(self, flag_obj):
"""Checks whether a Flag object is registered under long name or short name.
Args:
flag_obj: Flag, the Flag instance to check for.
Returns:
bool, True iff flag_obj is registered under long name or short name.
"""
flag_dict = self._flags()
# Check whether flag_obj is registered under its long name.
name = flag_obj.name
if flag_dict.get(name, None) == flag_obj:
return True
# Check whether flag_obj is registered under its short name.
short_name = flag_obj.short_name
if (short_name is not None and
flag_dict.get(short_name, None) == flag_obj):
return True
return False
def _cleanup_unregistered_flag_from_module_dicts(self, flag_obj):
"""Cleans up unregistered flags from all module -> [flags] dictionaries.
If flag_obj is registered under either its long name or short name, it
won't be removed from the dictionaries.
Args:
flag_obj: Flag, the Flag instance to clean up for.
"""
if self._flag_is_registered(flag_obj):
return
for flags_by_module_dict in (self.flags_by_module_dict(),
self.flags_by_module_id_dict(),
self.key_flags_by_module_dict()):
for flags_in_module in six.itervalues(flags_by_module_dict):
# While (as opposed to if) takes care of multiple occurrences of a
# flag in the list for the same module.
while flag_obj in flags_in_module:
flags_in_module.remove(flag_obj)
def _get_flags_defined_by_module(self, module):
"""Returns the list of flags defined by a module.
Args:
module: module|str, the module to get flags from.
Returns:
[Flag], a new list of Flag instances. Caller may update this list as
desired: none of those changes will affect the internals of this
FlagValue instance.
"""
if not isinstance(module, str):
module = module.__name__
return list(self.flags_by_module_dict().get(module, []))
def get_key_flags_for_module(self, module):
"""Returns the list of key flags for a module.
Args:
module: module|str, the module to get key flags from.
Returns:
[Flag], a new list of Flag instances. Caller may update this list as
desired: none of those changes will affect the internals of this
FlagValue instance.
"""
if not isinstance(module, str):
module = module.__name__
# Any flag is a key flag for the module that defined it. NOTE:
# key_flags is a fresh list: we can update it without affecting the
# internals of this FlagValues object.
key_flags = self._get_flags_defined_by_module(module)
# Take into account flags explicitly declared as key for a module.
for flag in self.key_flags_by_module_dict().get(module, []):
if flag not in key_flags:
key_flags.append(flag)
return key_flags
def find_module_defining_flag(self, flagname, default=None):
"""Return the name of the module defining this flag, or default.
Args:
flagname: str, name of the flag to lookup.
default: Value to return if flagname is not defined. Defaults
to None.
Returns:
The name of the module which registered the flag with this name.
If no such module exists (i.e. no flag with this name exists),
we return default.
"""
registered_flag = self._flags().get(flagname)
if registered_flag is None:
return default
for module, flags in six.iteritems(self.flags_by_module_dict()):
for flag in flags:
# It must compare the flag with the one in _flags. This is because a
# flag might be overridden only for its long name (or short name),
# and only its short name (or long name) is considered registered.
if (flag.name == registered_flag.name and
flag.short_name == registered_flag.short_name):
return module
return default
def find_module_id_defining_flag(self, flagname, default=None):
"""Return the ID of the module defining this flag, or default.
Args:
flagname: str, name of the flag to lookup.
default: Value to return if flagname is not defined. Defaults
to None.
Returns:
The ID of the module which registered the flag with this name.
If no such module exists (i.e. no flag with this name exists),
we return default.
"""
registered_flag = self._flags().get(flagname)
if registered_flag is None:
return default
for module_id, flags in six.iteritems(self.flags_by_module_id_dict()):
for flag in flags:
# It must compare the flag with the one in _flags. This is because a
# flag might be overridden only for its long name (or short name),
# and only its short name (or long name) is considered registered.
if (flag.name == registered_flag.name and
flag.short_name == registered_flag.short_name):
return module_id
return default
def _register_unknown_flag_setter(self, setter):
"""Allow set default values for undefined flags.
Args:
setter: Method(name, value) to call to __setattr__ an unknown flag.
Must raise NameError or ValueError for invalid name/value.
"""
self.__dict__['__set_unknown'] = setter
def _set_unknown_flag(self, name, value):
"""Returns value if setting flag |name| to |value| returned True.
Args:
name: str, name of the flag to set.
value: Value to set.
Returns:
Flag value on successful call.
Raises:
UnrecognizedFlagError
IllegalFlagValueError
"""
setter = self.__dict__['__set_unknown']
if setter:
try:
setter(name, value)
return value
except (TypeError, ValueError): # Flag value is not valid.
raise _exceptions.IllegalFlagValueError(
'"{1}" is not valid for --{0}' .format(name, value))
except NameError: # Flag name is not valid.
pass
raise _exceptions.UnrecognizedFlagError(name, value)
def append_flag_values(self, flag_values):
"""Appends flags registered in another FlagValues instance.
Args:
flag_values: FlagValues, the FlagValues instance from which to copy flags.
"""
for flag_name, flag in six.iteritems(flag_values._flags()): # pylint: disable=protected-access
# Each flags with short_name appears here twice (once under its
# normal name, and again with its short name). To prevent
# problems (DuplicateFlagError) with double flag registration, we
# perform a check to make sure that the entry we're looking at is
# for its normal name.
if flag_name == flag.name:
try:
self[flag_name] = flag
except _exceptions.DuplicateFlagError:
raise _exceptions.DuplicateFlagError.from_flag(
flag_name, self, other_flag_values=flag_values)
def remove_flag_values(self, flag_values):
"""Remove flags that were previously appended from another FlagValues.
Args:
flag_values: FlagValues, the FlagValues instance containing flags to
remove.
"""
for flag_name in flag_values:
self.__delattr__(flag_name)
def __setitem__(self, name, flag):
"""Registers a new flag variable."""
fl = self._flags()
if not isinstance(flag, _flag.Flag):
raise _exceptions.IllegalFlagValueError(flag)
if str is bytes and isinstance(name, unicode):
# When using Python 2 with unicode_literals, allow it but encode it
# into the bytes type we require.
name = name.encode('utf-8')
if not isinstance(name, type('')):
raise _exceptions.Error('Flag name must be a string')
if not name:
raise _exceptions.Error('Flag name cannot be empty')
self._check_method_name_conflicts(name, flag)
if name in fl and not flag.allow_override and not fl[name].allow_override:
module, module_name = _helpers.get_calling_module_object_and_name()
if (self.find_module_defining_flag(name) == module_name and
id(module) != self.find_module_id_defining_flag(name)):
# If the flag has already been defined by a module with the same name,
# but a different ID, we can stop here because it indicates that the
# module is simply being imported a subsequent time.
return
raise _exceptions.DuplicateFlagError.from_flag(name, self)
short_name = flag.short_name
# If a new flag overrides an old one, we need to cleanup the old flag's
# modules if it's not registered.
flags_to_cleanup = set()
if short_name is not None:
if (short_name in fl and not flag.allow_override and
not fl[short_name].allow_override):
raise _exceptions.DuplicateFlagError.from_flag(short_name, self)
if short_name in fl and fl[short_name] != flag:
flags_to_cleanup.add(fl[short_name])
fl[short_name] = flag
if (name not in fl # new flag
or fl[name].using_default_value
or not flag.using_default_value):
if name in fl and fl[name] != flag:
flags_to_cleanup.add(fl[name])
fl[name] = flag
for f in flags_to_cleanup:
self._cleanup_unregistered_flag_from_module_dicts(f)
def __dir__(self):
"""Returns list of names of all defined flags.
Useful for TAB-completion in ipython.
Returns:
[str], a list of names of all defined flags.
"""
return sorted(self.__dict__['__flags'])
def __getitem__(self, name):
"""Returns the Flag object for the flag --name."""
return self._flags()[name]
def _hide_flag(self, name):
"""Marks the flag --name as hidden."""
self.__dict__['__hiddenflags'].add(name)
# This exists for legacy reasons, and will be removed in the future.
def _is_unparsed_flag_access_allowed(self, name):
"""Determine whether to allow unparsed flag access or not."""
del name
return False
def __getattr__(self, name):
"""Retrieves the 'value' attribute of the flag --name."""
fl = self._flags()
if name not in fl:
raise AttributeError(name)
if name in self.__dict__['__hiddenflags']:
raise AttributeError(name)
if self.__dict__['__flags_parsed'] or fl[name].present:
return fl[name].value
else:
error_message = (
'Trying to access flag --%s before flags were parsed.' % name)
if self._is_unparsed_flag_access_allowed(name):
# Print warning to stderr. Messages in logs are often ignored/unnoticed.
warnings.warn(
error_message + ' This will raise an exception in the future.',
RuntimeWarning,
stacklevel=2)
# Force logging.exception() to behave realistically, but don't propagate
# exception up. Allow flag value to be returned (for now).
try:
raise _exceptions.UnparsedFlagAccessError(error_message)
except _exceptions.UnparsedFlagAccessError:
logging.exception(error_message)
return fl[name].value
else:
if six.PY2:
# In Python 2, hasattr returns False if getattr raises any exception.
# That means if someone calls hasattr(FLAGS, 'flag'), it returns False
# instead of raises UnparsedFlagAccessError even if --flag is already
# defined. To make the error more visible, the best we can do is to
# log an error message before raising the exception.
# Don't log a full stacktrace here since that makes other callers
# get too much noise.
logging.error(error_message)
raise _exceptions.UnparsedFlagAccessError(error_message)
def __setattr__(self, name, value):
"""Sets the 'value' attribute of the flag --name."""
fl = self._flags()
if name in self.__dict__['__hiddenflags']:
raise AttributeError(name)
if name not in fl:
return self._set_unknown_flag(name, value)
fl[name].value = value
self._assert_validators(fl[name].validators)
fl[name].using_default_value = False
return value
def _assert_all_validators(self):
all_validators = set()
for flag in six.itervalues(self._flags()):
for validator in flag.validators:
all_validators.add(validator)
self._assert_validators(all_validators)
def _assert_validators(self, validators):
"""Asserts if all validators in the list are satisfied.
It asserts validators in the order they were created.
Args:
validators: Iterable(validators.Validator), validators to be
verified.
Raises:
AttributeError: Raised if validators work with a non-existing flag.
IllegalFlagValueError: Raised if validation fails for at least one
validator.
"""
for validator in sorted(
validators, key=lambda validator: validator.insertion_index):
try:
validator.verify(self)
except _exceptions.ValidationError as e:
message = validator.print_flags_with_values(self)
raise _exceptions.IllegalFlagValueError('%s: %s' % (message, str(e)))
def __delattr__(self, flag_name):
"""Deletes a previously-defined flag from a flag object.
This method makes sure we can delete a flag by using
del FLAGS.<flag_name>
E.g.,
flags.DEFINE_integer('foo', 1, 'Integer flag.')
del flags.FLAGS.foo
If a flag is also registered by its the other name (long name or short
name), the other name won't be deleted.
Args:
flag_name: str, the name of the flag to be deleted.
Raises:
AttributeError: Raised when there is no registered flag named flag_name.
"""
fl = self._flags()
if flag_name not in fl:
raise AttributeError(flag_name)
flag_obj = fl[flag_name]
del fl[flag_name]
self._cleanup_unregistered_flag_from_module_dicts(flag_obj)
def set_default(self, name, value):
"""Changes the default value of the named flag object.
The flag's current value is also updated if the flag is currently using
the default value, i.e. not specified in the command line, and not set
by FLAGS.name = value.
Args:
name: str, the name of the flag to modify.
value: The new default value.
Raises:
UnrecognizedFlagError: Raised when there is no registered flag named name.
IllegalFlagValueError: Raised when value is not valid.
"""
fl = self._flags()
if name not in fl:
self._set_unknown_flag(name, value)
return
fl[name]._set_default(value) # pylint: disable=protected-access
self._assert_validators(fl[name].validators)
def __contains__(self, name):
"""Returns True if name is a value (flag) in the dict."""
return name in self._flags()
def __len__(self):
return len(self.__dict__['__flags'])
def __iter__(self):
return iter(self._flags())
def __call__(self, argv):
"""Parses flags from argv; stores parsed flags into this FlagValues object.
All unparsed arguments are returned.
Args:
argv: a tuple/list of strings.
Returns:
The list of arguments not parsed as options, including argv[0].
Raises:
Error: Raised on any parsing error.
TypeError: Raised on passing wrong type of arguments.
ValueError: Raised on flag value parsing error.
"""
if _helpers.is_bytes_or_string(argv):
raise TypeError(
'argv should be a tuple/list of strings, not bytes or string.')
if not argv:
raise ValueError(
'argv cannot be an empty list, and must contain the program name as '
'the first element.')
# This pre parses the argv list for --flagfile=<> options.
program_name = argv[0]
args = self.read_flags_from_files(argv[1:], force_gnu=False)
# Parse the arguments.
unknown_flags, unparsed_args, undefok = self._parse_args(args)
# Handle unknown flags by raising UnrecognizedFlagError.
# Note some users depend on us raising this particular error.
for name, value in unknown_flags:
if name in undefok:
continue
suggestions = _helpers.get_flag_suggestions(name, list(self))
raise _exceptions.UnrecognizedFlagError(
name, value, suggestions=suggestions)
self.mark_as_parsed()
self._assert_all_validators()
return [program_name] + unparsed_args
def _set_is_retired_flag_func(self, is_retired_flag_func):
"""Sets a function for checking retired flags.
Do not use it. This is a private absl API used to check retired flags
registered by the absl C++ flags library.
Args:
is_retired_flag_func: Callable(str) -> (bool, bool), a function takes flag
name as parameter, returns a tuple (is_retired, type_is_bool).
"""
self.__dict__['__is_retired_flag_func'] = is_retired_flag_func
def _parse_args(self, args):
"""Helper function to do the main argument parsing.
This function goes through args and does the bulk of the flag parsing.
It will find the corresponding flag in our flag dictionary, and call its
.parse() method on the flag value.
Args:
args: [str], a list of strings with the arguments to parse.
Returns:
A tuple with the following:
unknown_flags: List of (flag name, arg) for flags we don't know about.
unparsed_args: List of arguments we did not parse.
undefok: Set of flags that were given via --undefok.
Raises:
Error: Raised on any parsing error.
ValueError: Raised on flag value parsing error.
"""
unknown_flags, unparsed_args, undefok = [], [], set()
retired_flag_func = self.__dict__['__is_retired_flag_func']
flag_dict = self._flags()
args = iter(args)
for arg in args:
value = None
def get_value():
# pylint: disable=cell-var-from-loop
try:
return next(args) if value is None else value
except StopIteration:
raise _exceptions.Error('Missing value for flag ' + arg) # pylint: disable=undefined-loop-variable
if not arg.startswith('-'):
# A non-argument: default is break, GNU is skip.
unparsed_args.append(arg)
if self.is_gnu_getopt():
continue
else:
break
if arg == '--':
break
# At this point, arg must start with '-'.
if arg.startswith('--'):
arg_without_dashes = arg[2:]
else:
arg_without_dashes = arg[1:]
if '=' in arg_without_dashes:
name, value = arg_without_dashes.split('=', 1)
else:
name, value = arg_without_dashes, None
if not name:
# The argument is all dashes (including one dash).
unparsed_args.append(arg)
if self.is_gnu_getopt():
continue
else:
break
# --undefok is a special case.
if name == 'undefok':
value = get_value()
undefok.update(v.strip() for v in value.split(','))
undefok.update('no' + v.strip() for v in value.split(','))
continue
flag = flag_dict.get(name)
if flag:
if flag.boolean and value is None:
value = 'true'
else:
value = get_value()
elif name.startswith('no') and len(name) > 2:
# Boolean flags can take the form of --noflag, with no value.
noflag = flag_dict.get(name[2:])
if noflag and noflag.boolean:
if value is not None:
raise ValueError(arg + ' does not take an argument')
flag = noflag
value = 'false'
if retired_flag_func and not flag:
is_retired, is_bool = retired_flag_func(name)
# If we didn't recognize that flag, but it starts with
# "no" then maybe it was a boolean flag specified in the
# --nofoo form.
if not is_retired and name.startswith('no'):
is_retired, is_bool = retired_flag_func(name[2:])
is_retired = is_retired and is_bool
if is_retired:
if not is_bool and value is None:
# This happens when a non-bool retired flag is specified
# in format of "--flag value".
get_value()
logging.error('Flag "%s" is retired and should no longer '
'be specified. See go/totw/90.', name)
continue
if flag:
flag.parse(value)
flag.using_default_value = False
else:
unknown_flags.append((name, arg))
unparsed_args.extend(list(args))
return unknown_flags, unparsed_args, undefok
def is_parsed(self):
"""Returns whether flags were parsed."""
return self.__dict__['__flags_parsed']
def mark_as_parsed(self):
"""Explicitly marks flags as parsed.
Use this when the caller knows that this FlagValues has been parsed as if
a __call__() invocation has happened. This is only a public method for
use by things like appcommands which do additional command like parsing.
"""
self.__dict__['__flags_parsed'] = True
def unparse_flags(self):
"""Unparses all flags to the point before any FLAGS(argv) was called."""
for f in self._flags().values():
f.unparse()
# We log this message before marking flags as unparsed to avoid a
# problem when the logging library causes flags access.
logging.info('unparse_flags() called; flags access will now raise errors.')
self.__dict__['__flags_parsed'] = False
self.__dict__['__unparse_flags_called'] = True
def flag_values_dict(self):
"""Returns a dictionary that maps flag names to flag values."""
return {name: flag.value for name, flag in six.iteritems(self._flags())}
def __str__(self):
"""Returns a help string for all known flags."""
return self.get_help()
def get_help(self, prefix='', include_special_flags=True):
"""Returns a help string for all known flags.
Args:
prefix: str, per-line output prefix.
include_special_flags: bool, whether to include description of
_SPECIAL_FLAGS, i.e. --flagfile and --undefok.
Returns:
str, formatted help message.
"""
helplist = []
flags_by_module = self.flags_by_module_dict()
if flags_by_module:
modules = sorted(flags_by_module)
# Print the help for the main module first, if possible.
main_module = sys.argv[0]
if main_module in modules:
modules.remove(main_module)
modules = [main_module] + modules
for module in modules:
self._render_our_module_flags(module, helplist, prefix)
if include_special_flags:
self._render_module_flags(
'absl.flags',
_helpers.SPECIAL_FLAGS._flags().values(), # pylint: disable=protected-access
helplist,
prefix)
else:
# Just print one long list of flags.
values = six.itervalues(self._flags())
if include_special_flags:
values = itertools.chain(
values, six.itervalues(_helpers.SPECIAL_FLAGS._flags())) # pylint: disable=protected-access
self._render_flag_list(values, helplist, prefix)
return '\n'.join(helplist)
def _render_module_flags(self, module, flags, output_lines, prefix=''):
"""Returns a help string for a given module."""
if not isinstance(module, str):
module = module.__name__
output_lines.append('\n%s%s:' % (prefix, module))
self._render_flag_list(flags, output_lines, prefix + ' ')
def _render_our_module_flags(self, module, output_lines, prefix=''):
"""Returns a help string for a given module."""
flags = self._get_flags_defined_by_module(module)
if flags:
self._render_module_flags(module, flags, output_lines, prefix)
def _render_our_module_key_flags(self, module, output_lines, prefix=''):
"""Returns a help string for the key flags of a given module.
Args:
module: module|str, the module to render key flags for.
output_lines: [str], a list of strings. The generated help message
lines will be appended to this list.
prefix: str, a string that is prepended to each generated help line.
"""
key_flags = self.get_key_flags_for_module(module)
if key_flags:
self._render_module_flags(module, key_flags, output_lines, prefix)
def module_help(self, module):
"""Describes the key flags of a module.
Args:
module: module|str, the module to describe the key flags for.
Returns:
str, describing the key flags of a module.
"""
helplist = []
self._render_our_module_key_flags(module, helplist)
return '\n'.join(helplist)
def main_module_help(self):
"""Describes the key flags of the main module.
Returns:
str, describing the key flags of the main module.
"""
return self.module_help(sys.argv[0])
def _render_flag_list(self, flaglist, output_lines, prefix=' '):
fl = self._flags()
special_fl = _helpers.SPECIAL_FLAGS._flags() # pylint: disable=protected-access
flaglist = [(flag.name, flag) for flag in flaglist]
flaglist.sort()
flagset = {}
for (name, flag) in flaglist:
# It's possible this flag got deleted or overridden since being
# registered in the per-module flaglist. Check now against the
# canonical source of current flag information, the _flags.
if fl.get(name, None) != flag and special_fl.get(name, None) != flag:
# a different flag is using this name now
continue
# only print help once
if flag in flagset: continue
flagset[flag] = 1
flaghelp = ''
if flag.short_name: flaghelp += '-%s,' % flag.short_name
if flag.boolean:
flaghelp += '--[no]%s:' % flag.name
else:
flaghelp += '--%s:' % flag.name
flaghelp += ' '
if flag.help:
flaghelp += flag.help
flaghelp = _helpers.text_wrap(
flaghelp, indent=prefix+' ', firstline_indent=prefix)
if flag.default_as_str:
flaghelp += '\n'
flaghelp += _helpers.text_wrap(
'(default: %s)' % flag.default_as_str, indent=prefix+' ')
if flag.parser.syntactic_help:
flaghelp += '\n'
flaghelp += _helpers.text_wrap(
'(%s)' % flag.parser.syntactic_help, indent=prefix+' ')
output_lines.append(flaghelp)
def get_flag_value(self, name, default): # pylint: disable=invalid-name
"""Returns the value of a flag (if not None) or a default value.
Args:
name: str, the name of a flag.
default: Default value to use if the flag value is None.
Returns:
Requested flag value or default.
"""
value = self.__getattr__(name)
if value is not None: # Can't do if not value, b/c value might be '0' or ""
return value
else:
return default
def _is_flag_file_directive(self, flag_string):
"""Checks whether flag_string contain a --flagfile=<foo> directive."""
if isinstance(flag_string, type('')):
if flag_string.startswith('--flagfile='):
return 1
elif flag_string == '--flagfile':
return 1
elif flag_string.startswith('-flagfile='):
return 1
elif flag_string == '-flagfile':
return 1
else:
return 0
return 0
def _extract_filename(self, flagfile_str):
"""Returns filename from a flagfile_str of form -[-]flagfile=filename.
The cases of --flagfile foo and -flagfile foo shouldn't be hitting
this function, as they are dealt with in the level above this
function.
Args:
flagfile_str: str, the flagfile string.
Returns:
str, the filename from a flagfile_str of form -[-]flagfile=filename.
Raises:
Error: Raised when illegal --flagfile is provided.
"""
if flagfile_str.startswith('--flagfile='):
return os.path.expanduser((flagfile_str[(len('--flagfile=')):]).strip())
elif flagfile_str.startswith('-flagfile='):
return os.path.expanduser((flagfile_str[(len('-flagfile=')):]).strip())
else:
raise _exceptions.Error(
'Hit illegal --flagfile type: %s' % flagfile_str)
def _get_flag_file_lines(self, filename, parsed_file_stack=None):
"""Returns the useful (!=comments, etc) lines from a file with flags.
Args:
filename: str, the name of the flag file.
parsed_file_stack: [str], a list of the names of the files that we have
recursively encountered at the current depth. MUTATED BY THIS FUNCTION
(but the original value is preserved upon successfully returning from
function call).
Returns:
List of strings. See the note below.
NOTE(springer): This function checks for a nested --flagfile=<foo>
tag and handles the lower file recursively. It returns a list of
all the lines that _could_ contain command flags. This is
EVERYTHING except whitespace lines and comments (lines starting
with '#' or '//').
"""
if parsed_file_stack is None:
parsed_file_stack = []
# We do a little safety check for reparsing a file we've already encountered
# at a previous depth.
if filename in parsed_file_stack:
sys.stderr.write('Warning: Hit circular flagfile dependency. Ignoring'
' flagfile: %s\n' % (filename,))
return []
else:
parsed_file_stack.append(filename)
line_list = [] # All line from flagfile.
flag_line_list = [] # Subset of lines w/o comments, blanks, flagfile= tags.
try:
file_obj = open(filename, 'r')
except IOError as e_msg:
raise _exceptions.CantOpenFlagFileError(
'ERROR:: Unable to open flagfile: %s' % e_msg)
with file_obj:
line_list = file_obj.readlines()
# This is where we check each line in the file we just read.
for line in line_list:
if line.isspace():
pass
# Checks for comment (a line that starts with '#').
elif line.startswith('#') or line.startswith('//'):
pass
# Checks for a nested "--flagfile=<bar>" flag in the current file.
# If we find one, recursively parse down into that file.
elif self._is_flag_file_directive(line):
sub_filename = self._extract_filename(line)
included_flags = self._get_flag_file_lines(
sub_filename, parsed_file_stack=parsed_file_stack)
flag_line_list.extend(included_flags)
else:
# Any line that's not a comment or a nested flagfile should get
# copied into 2nd position. This leaves earlier arguments
# further back in the list, thus giving them higher priority.
flag_line_list.append(line.strip())
parsed_file_stack.pop()
return flag_line_list
def read_flags_from_files(self, argv, force_gnu=True):
"""Processes command line args, but also allow args to be read from file.
Args:
argv: [str], a list of strings, usually sys.argv[1:], which may contain
one or more flagfile directives of the form --flagfile="./filename".
Note that the name of the program (sys.argv[0]) should be omitted.
force_gnu: bool, if False, --flagfile parsing obeys normal flag semantics.
If True, --flagfile parsing instead follows gnu_getopt semantics.
*** WARNING *** force_gnu=False may become the future default!
Returns:
A new list which has the original list combined with what we read
from any flagfile(s).
Raises:
IllegalFlagValueError: Raised when --flagfile is provided with no
argument.
This function is called by FLAGS(argv).
It scans the input list for a flag that looks like:
--flagfile=<somefile>. Then it opens <somefile>, reads all valid key
and value pairs and inserts them into the input list in exactly the
place where the --flagfile arg is found.
Note that your application's flags are still defined the usual way
using absl.flags DEFINE_flag() type functions.
Notes (assuming we're getting a commandline of some sort as our input):
--> For duplicate flags, the last one we hit should "win".
--> Since flags that appear later win, a flagfile's settings can be "weak"
if the --flagfile comes at the beginning of the argument sequence,
and it can be "strong" if the --flagfile comes at the end.
--> A further "--flagfile=<otherfile.cfg>" CAN be nested in a flagfile.
It will be expanded in exactly the spot where it is found.
--> In a flagfile, a line beginning with # or // is a comment.
--> Entirely blank lines _should_ be ignored.
"""
rest_of_args = argv
new_argv = []
while rest_of_args:
current_arg = rest_of_args[0]
rest_of_args = rest_of_args[1:]
if self._is_flag_file_directive(current_arg):
# This handles the case of -(-)flagfile foo. In this case the
# next arg really is part of this one.
if current_arg == '--flagfile' or current_arg == '-flagfile':
if not rest_of_args:
raise _exceptions.IllegalFlagValueError(
'--flagfile with no argument')
flag_filename = os.path.expanduser(rest_of_args[0])
rest_of_args = rest_of_args[1:]
else:
# This handles the case of (-)-flagfile=foo.
flag_filename = self._extract_filename(current_arg)
new_argv.extend(self._get_flag_file_lines(flag_filename))
else:
new_argv.append(current_arg)
# Stop parsing after '--', like getopt and gnu_getopt.
if current_arg == '--':
break
# Stop parsing after a non-flag, like getopt.
if not current_arg.startswith('-'):
if not force_gnu and not self.__dict__['__use_gnu_getopt']:
break
else:
if ('=' not in current_arg and
rest_of_args and not rest_of_args[0].startswith('-')):
# If this is an occurrence of a legitimate --x y, skip the value
# so that it won't be mistaken for a standalone arg.
fl = self._flags()
name = current_arg.lstrip('-')
if name in fl and not fl[name].boolean:
current_arg = rest_of_args[0]
rest_of_args = rest_of_args[1:]
new_argv.append(current_arg)
if rest_of_args:
new_argv.extend(rest_of_args)
return new_argv
def flags_into_string(self):
"""Returns a string with the flags assignments from this FlagValues object.
This function ignores flags whose value is None. Each flag
assignment is separated by a newline.
NOTE: MUST mirror the behavior of the C++ CommandlineFlagsIntoString
from https://github.com/gflags/gflags.
Returns:
str, the string with the flags assignments from this FlagValues object.
"""
s = ''
for flag in self._flags().values():
if flag.value is not None:
s += flag.serialize() + '\n'
return s
def append_flags_into_file(self, filename):
"""Appends all flags assignments from this FlagInfo object to a file.
Output will be in the format of a flagfile.
NOTE: MUST mirror the behavior of the C++ AppendFlagsIntoFile
from https://github.com/gflags/gflags.
Args:
filename: str, name of the file.
"""
with open(filename, 'a') as out_file:
out_file.write(self.flags_into_string())
def write_help_in_xml_format(self, outfile=None):
"""Outputs flag documentation in XML format.
NOTE: We use element names that are consistent with those used by
the C++ command-line flag library, from
https://github.com/gflags/gflags.
We also use a few new elements (e.g., <key>), but we do not
interfere / overlap with existing XML elements used by the C++
library. Please maintain this consistency.
Args:
outfile: File object we write to. Default None means sys.stdout.
"""
doc = minidom.Document()
all_flag = doc.createElement('AllFlags')
doc.appendChild(all_flag)
all_flag.appendChild(_helpers.create_xml_dom_element(
doc, 'program', os.path.basename(sys.argv[0])))
usage_doc = sys.modules['__main__'].__doc__
if not usage_doc:
usage_doc = '\nUSAGE: %s [flags]\n' % sys.argv[0]
else:
usage_doc = usage_doc.replace('%s', sys.argv[0])
all_flag.appendChild(_helpers.create_xml_dom_element(
doc, 'usage', usage_doc))
# Get list of key flags for the main module.
key_flags = self.get_key_flags_for_module(sys.argv[0])
# Sort flags by declaring module name and next by flag name.
flags_by_module = self.flags_by_module_dict()
all_module_names = list(flags_by_module.keys())
all_module_names.sort()
for module_name in all_module_names:
flag_list = [(f.name, f) for f in flags_by_module[module_name]]
flag_list.sort()
for unused_flag_name, flag in flag_list:
is_key = flag in key_flags
all_flag.appendChild(flag._create_xml_dom_element( # pylint: disable=protected-access
doc, module_name, is_key=is_key))
outfile = outfile or sys.stdout
if six.PY2:
outfile.write(doc.toprettyxml(indent=' ', encoding='utf-8'))
else:
outfile.write(
doc.toprettyxml(indent=' ', encoding='utf-8').decode('utf-8'))
outfile.flush()
def _check_method_name_conflicts(self, name, flag):
if flag.allow_using_method_names:
return
short_name = flag.short_name
flag_names = {name} if short_name is None else {name, short_name}
for flag_name in flag_names:
if flag_name in self.__dict__['__banned_flag_names']:
raise _exceptions.FlagNameConflictsWithMethodError(
'Cannot define a flag named "{name}". It conflicts with a method '
'on class "{class_name}". To allow defining it, use '
'allow_using_method_names and access the flag value with '
"FLAGS['{name}'].value. FLAGS.{name} returns the method, "
'not the flag value.'.format(
name=flag_name, class_name=type(self).__name__))
FLAGS = FlagValues()
| apache-2.0 |
aschampion/CATMAID | django/applications/catmaid/control/roi.py | 5 | 9438 | import json
import os.path
from django.conf import settings
from django.http import HttpResponse
from django.shortcuts import redirect
from django.core.cache import cache
from catmaid.control import cropping
from catmaid.control.authentication import requires_user_role
from catmaid.control.common import urljoin
from catmaid.models import UserRole, RegionOfInterest, Project, Relation, \
Stack, ClassInstance, RegionOfInterestClassInstance
from celery.task import task
from celery.utils.log import get_task_logger
# Prefix for stored ROIs
file_prefix = "roi_"
# File extension of the stored ROIs
file_extension = "png"
# The path were cropped files get stored in
roi_path = os.path.join(settings.MEDIA_ROOT,
settings.MEDIA_ROI_SUBDIRECTORY)
# A common logger for the celery tasks
logger = get_task_logger(__name__)
# Locks will expire after two minutes
LOCK_EXPIRE = 60 * 2
@requires_user_role([UserRole.Browse])
def get_roi_info(request, project_id=None, roi_id=None):
""" Returns a JSON string filled with information about
the region of interest with ID <roi_id>.
"""
roi = RegionOfInterest.objects.get(id=roi_id)
info = {
'id': roi.id,
'zoom_level': roi.zoom_level,
'location': [roi.location_x, roi.location_y, roi.location_z],
'width': roi.width,
'height': roi.height,
'rotation_cw': roi.rotation_cw,
'stack_id': roi.stack.id,
'project_id': roi.project.id}
return HttpResponse(json.dumps(info))
def _add_roi(project_id, stack_id, user_id, x_min, x_max, y_min, y_max, z,
zoom_level, rotation_cw):
""" Add a new ROI database object and return it.
"""
# Calculate ROI center and extent
cx = (x_max + x_min) * 0.5
cy = (y_max + y_min) * 0.5
cz = z
width = abs(x_max - x_min)
height = abs(y_max - y_min)
# Create a new ROI class instance
roi = RegionOfInterest()
roi.user_id = user_id
roi.editor_id = user_id
roi.project_id = project_id
roi.stack_id = stack_id
roi.zoom_level = zoom_level
roi.location_x = cx
roi.location_y = cy
roi.location_z = cz
roi.width = width
roi.height = height
roi.rotation_cw = rotation_cw
roi.save()
# Create cropped image, if wanted
if settings.ROI_AUTO_CREATE_IMAGE:
file_name, file_path = create_roi_path(roi.id)
create_roi_image(request.user, project_id, roi.id, file_path)
return roi
@requires_user_role(UserRole.Annotate)
def add_roi(request, project_id=None):
# Try to get all needed POST parameters
x_min = float(request.POST['x_min'])
x_max = float(request.POST['x_max'])
y_min = float(request.POST['y_min'])
y_max = float(request.POST['y_max'])
z = float(request.POST['z'])
zoom_level = int(request.POST['zoom_level'])
rotation_cw = int(request.POST['rotation_cw'])
stack_id = int(request.POST['stack'])
roi = _add_roi(project_id, stack_id, request.user.id, x_min, x_max,
y_min, y_max, z, zoom_level, rotation_cw)
# Build result data set
status = {'status': "Created new ROI with ID %s." % roi.id}
return HttpResponse(json.dumps(status))
@requires_user_role(UserRole.Annotate)
def link_roi_to_class_instance(request, project_id=None, relation_id=None,
stack_id=None, ci_id=None):
""" With the help of this method one can link a region of interest
(ROI) to a class instance. The information about the ROI is passed
as POST variables.
"""
# Try to get all needed POST parameters
x_min = float(request.POST['x_min'])
x_max = float(request.POST['x_max'])
y_min = float(request.POST['y_min'])
y_max = float(request.POST['y_max'])
z = float(request.POST['z'])
zoom_level = int(request.POST['zoom_level'])
rotation_cw = int(request.POST['rotation_cw'])
# Get related objects
project = Project.objects.get(id=project_id)
stack = Stack.objects.get(id=stack_id)
ci = ClassInstance.objects.get(id=ci_id)
rel = Relation.objects.get(id=relation_id)
roi = _add_roi(project.id, stack.id, request.user.id, x_min, x_max, y_min,
y_max, z, zoom_level, rotation_cw)
# Link ROI and class instance
roi_ci = RegionOfInterestClassInstance()
roi_ci.user = request.user
roi_ci.project = project
roi_ci.relation = rel
roi_ci.region_of_interest = roi
roi_ci.class_instance = ci
roi_ci.save()
# Build result data set
status = {'status': "Created new ROI with ID %s." % roi.id}
return HttpResponse(json.dumps(status))
@requires_user_role([UserRole.Annotate, UserRole.Browse])
def remove_roi_link(request, project_id=None, roi_id=None):
""" Removes the ROI link with the ID <roi_id>. If there are no more
links to the actual ROI after the removal, the ROI gets removed as well.
"""
# Remove ROI link
roi_link = RegionOfInterestClassInstance.objects.get(id=roi_id)
roi_link.delete()
# Remove ROI if there are no more links to it
remaining_links = RegionOfInterestClassInstance.objects.filter(
region_of_interest=roi_link.region_of_interest)
if remaining_links.count() == 0:
# Delete the ROI class instance
roi_link.region_of_interest.delete()
# Make sure, there is no cropped image left
file_name, file_path = create_roi_path(roi_id)
file_info = ""
if os.path.exists(file_path) and os.path.isfile(file_path):
try:
os.remove(file_path)
file_info = " The same goes for its cropped image."
except OSError, e:
file_info = " However, its cropped image couldn't be removed."
# Create status data
status = {'status': "Removed ROI link with ID %s. The ROI " \
"itself has been deleted as well.%s" % (roi_id, file_info)}
else:
status = {'status': "Removed ROI link with ID %s. The ROI " \
"itself has not been deleted, because there are still " \
"links to it." % roi_id}
return HttpResponse(json.dumps(status))
def create_lock_name(roi_id):
""" Creates a name for the image creation lock.
"""
return "%s-lock-%s" % ('catmaid.create_roi_image', roi_id)
def create_roi_image(user, project_id, roi_id, file_path):
""" Tries to acquire a lock for a creating the cropped image
of a certain ROI. If able to do this, launches the celery task
which removes the lock when done.
"""
lock_id = create_lock_name(roi_id)
# cache.add fails if the key is already exists
acquire_lock = lambda: cache.add(lock_id, "true", LOCK_EXPIRE)
if not acquire_lock():
logger.debug("ROI %s is already taken care of by another worker" % roi_id)
return False
else:
create_roi_image_task.delay(user, project_id, roi_id, file_path)
return True
@task(name='catmaid.create_roi_image')
def create_roi_image_task(user, project_id, roi_id, file_path):
lock_id = create_lock_name(roi_id)
# memcache delete is very slow, but we have to use it to take
# advantage of using add() for atomic locking
release_lock = lambda: cache.delete(lock_id)
logger.debug("Creating cropped image for ROI with ID %s" % roi_id)
try:
# Get ROI
roi = RegionOfInterest.objects.get(id=roi_id)
# Prepare parameters
hwidth = roi.width * 0.5
x_min = roi.location_x - hwidth
x_max = roi.location_x + hwidth
hheight = roi.height * 0.5
y_min = roi.location_y - hheight
y_max = roi.location_y + hheight
z_min = z_max = roi.location_z
single_channel = False
# Create a cropping job
job = cropping.CropJob(user, project_id, [roi.stack.id],
x_min, x_max, y_min, y_max, z_min, z_max, roi.rotation_cw,
roi.zoom_level, single_channel)
# Create the pgmagick images
cropped_stacks = cropping.extract_substack( job )
if len(cropped_stacks) == 0:
raise StandardError("Couldn't create ROI image")
# There is only one image here
img = cropped_stacks[0]
img.write(str(file_path))
finally:
release_lock()
return "Created image of ROI %s" % roi_id
def create_roi_path(roi_id):
""" Creates a tuple (file name, file path) for the given ROI ID.
"""
file_name = file_prefix + str(roi_id) + "." + file_extension
file_path = os.path.join(roi_path, file_name)
return (file_name, file_path)
@requires_user_role([UserRole.Browse])
def get_roi_image(request, project_id=None, roi_id=None):
""" Returns the URL to the cropped image, described by the ROI. These
images are cached, and won't get removed automatically. If the image is
already present its URL is used and returned. For performance reasons it
might be a good idea, to add this test to the web-server config.
"""
file_name, file_path = create_roi_path(roi_id)
if not os.path.exists(file_path):
# Start async processing
create_roi_image(request.user, project_id, roi_id, file_path)
# Use waiting image
url = urljoin(settings.STATIC_URL,
"images/wait_bgwhite.gif")
else:
# Create real image di
url_base = urljoin(settings.MEDIA_URL,
settings.MEDIA_ROI_SUBDIRECTORY)
url = urljoin(url_base, file_name)
return redirect(url)
| gpl-3.0 |
sporttech/phantomjs | src/breakpad/src/tools/gyp/test/defines/gyptest-defines-env-regyp.py | 151 | 1312 | #!/usr/bin/env python
# Copyright (c) 2009 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Verifies build of an executable with C++ define specified by a gyp define, and
the use of the environment during regeneration when the gyp file changes.
"""
import os
import TestGyp
# Regenerating build files when a gyp file changes is currently only supported
# by the make generator.
test = TestGyp.TestGyp(formats=['make'])
try:
os.environ['GYP_DEFINES'] = 'value=50'
test.run_gyp('defines.gyp')
finally:
# We clear the environ after calling gyp. When the auto-regeneration happens,
# the same define should be reused anyway. Reset to empty string first in
# case the platform doesn't support unsetenv.
os.environ['GYP_DEFINES'] = ''
del os.environ['GYP_DEFINES']
test.build('defines.gyp')
expect = """\
FOO is defined
VALUE is 1
"""
test.run_built_executable('defines', stdout=expect)
# Sleep so that the changed gyp file will have a newer timestamp than the
# previously generated build files.
test.sleep()
test.write('defines.gyp', test.read('defines-env.gyp'))
test.build('defines.gyp', test.ALL)
expect = """\
VALUE is 50
"""
test.run_built_executable('defines', stdout=expect)
test.pass_test()
| bsd-3-clause |
senthil10/scilifelab | scilifelab/pm/__init__.py | 4 | 1657 | """
Project Management Tools
"""
__import__('pkg_resources').declare_namespace(__name__)
import os
import sys
import re
import argparse
import textwrap
import subprocess
from cStringIO import StringIO
from cement.core import foundation, controller, handler, backend, output, hook
from scilifelab.pm.core import command
from scilifelab.pm.core import shell
from scilifelab.pm.core.controller import PmController
from scilifelab.pm.core.log import PmLogHandler
LOG = backend.minimal_logger(__name__)
class PmApp(foundation.CementApp):
"""
Main Pm application.
"""
class Meta:
label = "pm"
base_controller = PmController
cmd_handler = shell.ShCommandHandler
log_handler = PmLogHandler
def __init__(self, label=None, **kw):
super(PmApp, self).__init__(**kw)
handler.define(command.ICommand)
self.cmd = None
def setup(self):
super(PmApp, self).setup()
self._setup_cmd_handler()
## FIXME: look at backend in cement
self._output_data = dict(stdout=StringIO(), stderr=StringIO(), debug=StringIO())
def _setup_cmd_handler(self):
"""Setup a command handler"""
LOG.debug("setting up {}.command handler".format(self._meta.label))
self.cmd = self._resolve_handler('command', self._meta.cmd_handler)
def flush(self):
"""Flush output contained in _output_data dictionary"""
if self._output_data["stdout"].getvalue():
print self._output_data["stdout"].getvalue()
if self._output_data["stderr"].getvalue():
print >> sys.stderr, self._output_data["stderr"].getvalue()
| mit |
MathieuR/ardupilot | Tools/autotest/apm_unit_tests/dev/arducopter_AP_Limits.py | 227 | 2098 | import arducopter
import util, pexpect, sys, time, math, shutil, os
from common import *
import mavutil, mavwp, random
def unit_test(mavproxy, mav):
'''A scripted flight plan for testing AP_Limits'''
time.sleep(5)
print "# Setting AP_Limits parameters"
mavproxy.send('param set LIM_ENABLED 1\n')
mavproxy.send('param set LIM_REQUIRED 0\n')
mavproxy.send('param set LIM_DEBUG 1\n')
mavproxy.send('param set LIM_SAFETIME 1\n')
mavproxy.send('param set LIM_ALT_ON 1\n')
mavproxy.send('param set LIM_ALT_REQ 0\n')
mavproxy.send('param set LIM_ALT_MIN 0\n')
mavproxy.send('param set LIM_ALT_MAX 50\n')
mavproxy.send('param set LIM_FNC_ON 0\n')
mavproxy.send('param set LIM_FNC_REQ 0\n')
mavproxy.send('param set LIM_FNC_SMPL 1\n')
mavproxy.send('param set LIM_FNC_RAD 50\n')
time.sleep(5)
print "# Listing AP_Limits parameters"
mavproxy.send('param show LIM*\n')
if (
arducopter.calibrate_level(mavproxy, mav) and
arducopter.arm_motors(mavproxy, mav) and
arducopter.takeoff(mavproxy,mav, alt_min=30, takeoff_throttle=1510) and
arducopter.hover(mavproxy, mav, hover_throttle=1500)
):
# Trigger for ALT_MAX
climb_rate = 0
previous_alt = 0
timeout = 30
# Do Not Exceed altitude
alt_dne = 55
tstart = time.time()
mavproxy.send('rc 3 1550\n')
while (time.time() < tstart + timeout):
m = mav.recv_match(type='VFR_HUD', blocking=True)
climb_rate = m.alt - previous_alt
previous_alt = m.alt
print("Trigger Altitude Limit: Cur:%u, climb_rate: %u" % (m.alt, climb_rate))
if abs(climb_rate) > 0:
tstart = time.time();
if (mav.recv_match(condition='MAV.flightmode=="GUIDED"', blocking=False) != None):
print "Triggered!"
return True
if m.alt >= alt_dne :
print("Altitude Exceeded")
return False
return False
return False
| gpl-3.0 |
mshafiq9/django | tests/template_tests/test_response.py | 199 | 14388 | from __future__ import unicode_literals
import pickle
import time
from datetime import datetime
from django.conf import settings
from django.template import Context, engines
from django.template.response import (
ContentNotRenderedError, SimpleTemplateResponse, TemplateResponse,
)
from django.test import (
RequestFactory, SimpleTestCase, ignore_warnings, override_settings,
)
from django.test.utils import require_jinja2
from django.utils.deprecation import RemovedInDjango110Warning
from .utils import TEMPLATE_DIR
def test_processor(request):
return {'processors': 'yes'}
test_processor_name = 'template_tests.test_response.test_processor'
# A test middleware that installs a temporary URLConf
class CustomURLConfMiddleware(object):
def process_request(self, request):
request.urlconf = 'template_tests.alternate_urls'
class SimpleTemplateResponseTest(SimpleTestCase):
def _response(self, template='foo', *args, **kwargs):
template = engines['django'].from_string(template)
return SimpleTemplateResponse(template, *args, **kwargs)
def test_template_resolving(self):
response = SimpleTemplateResponse('first/test.html')
response.render()
self.assertEqual(response.content, b'First template\n')
templates = ['foo.html', 'second/test.html', 'first/test.html']
response = SimpleTemplateResponse(templates)
response.render()
self.assertEqual(response.content, b'Second template\n')
response = self._response()
response.render()
self.assertEqual(response.content, b'foo')
def test_explicit_baking(self):
# explicit baking
response = self._response()
self.assertFalse(response.is_rendered)
response.render()
self.assertTrue(response.is_rendered)
def test_render(self):
# response is not re-rendered without the render call
response = self._response().render()
self.assertEqual(response.content, b'foo')
# rebaking doesn't change the rendered content
template = engines['django'].from_string('bar{{ baz }}')
response.template_name = template
response.render()
self.assertEqual(response.content, b'foo')
# but rendered content can be overridden by manually
# setting content
response.content = 'bar'
self.assertEqual(response.content, b'bar')
def test_iteration_unrendered(self):
# unrendered response raises an exception on iteration
response = self._response()
self.assertFalse(response.is_rendered)
def iteration():
for x in response:
pass
self.assertRaises(ContentNotRenderedError, iteration)
self.assertFalse(response.is_rendered)
def test_iteration_rendered(self):
# iteration works for rendered responses
response = self._response().render()
res = [x for x in response]
self.assertEqual(res, [b'foo'])
def test_content_access_unrendered(self):
# unrendered response raises an exception when content is accessed
response = self._response()
self.assertFalse(response.is_rendered)
self.assertRaises(ContentNotRenderedError, lambda: response.content)
self.assertFalse(response.is_rendered)
def test_content_access_rendered(self):
# rendered response content can be accessed
response = self._response().render()
self.assertEqual(response.content, b'foo')
def test_set_content(self):
# content can be overridden
response = self._response()
self.assertFalse(response.is_rendered)
response.content = 'spam'
self.assertTrue(response.is_rendered)
self.assertEqual(response.content, b'spam')
response.content = 'baz'
self.assertEqual(response.content, b'baz')
def test_dict_context(self):
response = self._response('{{ foo }}{{ processors }}',
{'foo': 'bar'})
self.assertEqual(response.context_data, {'foo': 'bar'})
response.render()
self.assertEqual(response.content, b'bar')
@ignore_warnings(category=RemovedInDjango110Warning)
def test_context_instance(self):
response = self._response('{{ foo }}{{ processors }}',
Context({'foo': 'bar'}))
self.assertEqual(response.context_data.__class__, Context)
response.render()
self.assertEqual(response.content, b'bar')
def test_kwargs(self):
response = self._response(content_type='application/json', status=504)
self.assertEqual(response['content-type'], 'application/json')
self.assertEqual(response.status_code, 504)
def test_args(self):
response = SimpleTemplateResponse('', {}, 'application/json', 504)
self.assertEqual(response['content-type'], 'application/json')
self.assertEqual(response.status_code, 504)
@require_jinja2
def test_using(self):
response = SimpleTemplateResponse('template_tests/using.html').render()
self.assertEqual(response.content, b'DTL\n')
response = SimpleTemplateResponse('template_tests/using.html', using='django').render()
self.assertEqual(response.content, b'DTL\n')
response = SimpleTemplateResponse('template_tests/using.html', using='jinja2').render()
self.assertEqual(response.content, b'Jinja2\n')
def test_post_callbacks(self):
"Rendering a template response triggers the post-render callbacks"
post = []
def post1(obj):
post.append('post1')
def post2(obj):
post.append('post2')
response = SimpleTemplateResponse('first/test.html', {})
response.add_post_render_callback(post1)
response.add_post_render_callback(post2)
# When the content is rendered, all the callbacks are invoked, too.
response.render()
self.assertEqual(response.content, b'First template\n')
self.assertEqual(post, ['post1', 'post2'])
def test_pickling(self):
# Create a template response. The context is
# known to be unpickleable (e.g., a function).
response = SimpleTemplateResponse('first/test.html', {
'value': 123,
'fn': datetime.now,
})
self.assertRaises(ContentNotRenderedError,
pickle.dumps, response)
# But if we render the response, we can pickle it.
response.render()
pickled_response = pickle.dumps(response)
unpickled_response = pickle.loads(pickled_response)
self.assertEqual(unpickled_response.content, response.content)
self.assertEqual(unpickled_response['content-type'], response['content-type'])
self.assertEqual(unpickled_response.status_code, response.status_code)
# ...and the unpickled response doesn't have the
# template-related attributes, so it can't be re-rendered
template_attrs = ('template_name', 'context_data', '_post_render_callbacks')
for attr in template_attrs:
self.assertFalse(hasattr(unpickled_response, attr))
# ...and requesting any of those attributes raises an exception
for attr in template_attrs:
with self.assertRaises(AttributeError):
getattr(unpickled_response, attr)
def test_repickling(self):
response = SimpleTemplateResponse('first/test.html', {
'value': 123,
'fn': datetime.now,
})
self.assertRaises(ContentNotRenderedError,
pickle.dumps, response)
response.render()
pickled_response = pickle.dumps(response)
unpickled_response = pickle.loads(pickled_response)
pickle.dumps(unpickled_response)
def test_pickling_cookie(self):
response = SimpleTemplateResponse('first/test.html', {
'value': 123,
'fn': datetime.now,
})
response.cookies['key'] = 'value'
response.render()
pickled_response = pickle.dumps(response, pickle.HIGHEST_PROTOCOL)
unpickled_response = pickle.loads(pickled_response)
self.assertEqual(unpickled_response.cookies['key'].value, 'value')
@override_settings(TEMPLATES=[{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [TEMPLATE_DIR],
'OPTIONS': {
'context_processors': [test_processor_name],
},
}])
class TemplateResponseTest(SimpleTestCase):
def setUp(self):
self.factory = RequestFactory()
def _response(self, template='foo', *args, **kwargs):
self._request = self.factory.get('/')
template = engines['django'].from_string(template)
return TemplateResponse(self._request, template, *args, **kwargs)
def test_render(self):
response = self._response('{{ foo }}{{ processors }}').render()
self.assertEqual(response.content, b'yes')
def test_render_with_requestcontext(self):
response = self._response('{{ foo }}{{ processors }}',
{'foo': 'bar'}).render()
self.assertEqual(response.content, b'baryes')
@ignore_warnings(category=RemovedInDjango110Warning)
def test_render_with_context(self):
response = self._response('{{ foo }}{{ processors }}',
Context({'foo': 'bar'})).render()
self.assertEqual(response.content, b'bar')
def test_context_processor_priority(self):
# context processors should be overridden by passed-in context
response = self._response('{{ foo }}{{ processors }}',
{'processors': 'no'}).render()
self.assertEqual(response.content, b'no')
def test_kwargs(self):
response = self._response(content_type='application/json',
status=504)
self.assertEqual(response['content-type'], 'application/json')
self.assertEqual(response.status_code, 504)
def test_args(self):
response = TemplateResponse(self.factory.get('/'), '', {},
'application/json', 504)
self.assertEqual(response['content-type'], 'application/json')
self.assertEqual(response.status_code, 504)
@require_jinja2
def test_using(self):
request = self.factory.get('/')
response = TemplateResponse(request, 'template_tests/using.html').render()
self.assertEqual(response.content, b'DTL\n')
response = TemplateResponse(request, 'template_tests/using.html', using='django').render()
self.assertEqual(response.content, b'DTL\n')
response = TemplateResponse(request, 'template_tests/using.html', using='jinja2').render()
self.assertEqual(response.content, b'Jinja2\n')
@ignore_warnings(category=RemovedInDjango110Warning)
def test_custom_app(self):
self._response('{{ foo }}', current_app="foobar")
self.assertEqual(self._request.current_app, 'foobar')
def test_pickling(self):
# Create a template response. The context is
# known to be unpickleable (e.g., a function).
response = TemplateResponse(self.factory.get('/'),
'first/test.html', {
'value': 123,
'fn': datetime.now,
}
)
self.assertRaises(ContentNotRenderedError,
pickle.dumps, response)
# But if we render the response, we can pickle it.
response.render()
pickled_response = pickle.dumps(response)
unpickled_response = pickle.loads(pickled_response)
self.assertEqual(unpickled_response.content, response.content)
self.assertEqual(unpickled_response['content-type'], response['content-type'])
self.assertEqual(unpickled_response.status_code, response.status_code)
# ...and the unpickled response doesn't have the
# template-related attributes, so it can't be re-rendered
template_attrs = ('template_name', 'context_data',
'_post_render_callbacks', '_request', '_current_app')
for attr in template_attrs:
self.assertFalse(hasattr(unpickled_response, attr))
# ...and requesting any of those attributes raises an exception
for attr in template_attrs:
with self.assertRaises(AttributeError):
getattr(unpickled_response, attr)
def test_repickling(self):
response = SimpleTemplateResponse('first/test.html', {
'value': 123,
'fn': datetime.now,
})
self.assertRaises(ContentNotRenderedError,
pickle.dumps, response)
response.render()
pickled_response = pickle.dumps(response)
unpickled_response = pickle.loads(pickled_response)
pickle.dumps(unpickled_response)
@override_settings(
MIDDLEWARE_CLASSES=settings.MIDDLEWARE_CLASSES + [
'template_tests.test_response.CustomURLConfMiddleware'
],
ROOT_URLCONF='template_tests.urls',
)
class CustomURLConfTest(SimpleTestCase):
def test_custom_urlconf(self):
response = self.client.get('/template_response_view/')
self.assertEqual(response.status_code, 200)
self.assertContains(response, 'This is where you can find the snark: /snark/')
@override_settings(
CACHE_MIDDLEWARE_SECONDS=2.0,
MIDDLEWARE_CLASSES=settings.MIDDLEWARE_CLASSES + [
'django.middleware.cache.FetchFromCacheMiddleware',
'django.middleware.cache.UpdateCacheMiddleware',
],
ROOT_URLCONF='template_tests.alternate_urls',
)
class CacheMiddlewareTest(SimpleTestCase):
def test_middleware_caching(self):
response = self.client.get('/template_response_view/')
self.assertEqual(response.status_code, 200)
time.sleep(1.0)
response2 = self.client.get('/template_response_view/')
self.assertEqual(response2.status_code, 200)
self.assertEqual(response.content, response2.content)
time.sleep(2.0)
# Let the cache expire and test again
response2 = self.client.get('/template_response_view/')
self.assertEqual(response2.status_code, 200)
self.assertNotEqual(response.content, response2.content)
| bsd-3-clause |
KrzyHonk/bpmn-python | bpmn_python/graph/classes/root_element/callable_element_type.py | 2 | 1075 | # coding=utf-8
"""
Class used for representing tCallableElement of BPMN 2.0 graph
"""
import graph.classes.root_element.root_element_type as root_element
class CallableElement(root_element.RootElement):
"""
Class used for representing tCallableElement of BPMN 2.0 graph.
"""
def __init__(self):
"""
Default constructor, initializes object fields with new instances.
"""
super(CallableElement, self).__init__()
self.__name = None
def get_name(self):
"""
Getter for 'name' field.
:return:a value of 'name' field.
"""
return self.__name
def set_name(self, value):
"""
Setter for 'name' field.
:param value - a new value of 'name' field. Must be either None (name is optional according to BPMN 2.0 XML
Schema) or String.
"""
if value is None:
self.__name = value
elif not isinstance(value, str):
raise TypeError("Name must be set to a String")
else:
self.__name = value
| gpl-3.0 |
capitalone/cloud-custodian | tests/test_simpledb.py | 1 | 1242 | # Copyright 2016-2017 Capital One Services, LLC
# Copyright The Cloud Custodian Authors.
# SPDX-License-Identifier: Apache-2.0
from .common import BaseTest
class SimpleDB(BaseTest):
def test_delete(self):
session_factory = self.replay_flight_data("test_simpledb_delete")
p = self.load_policy(
{
"name": "sdb-del",
"resource": "simpledb",
"filters": [{"DomainName": "supersuper"}],
"actions": ["delete"],
},
session_factory=session_factory,
)
resources = p.run()
self.assertEqual(len(resources), 1)
self.assertEqual(resources[0]["DomainName"], "supersuper")
extant_domains = session_factory().client("sdb").list_domains()["DomainNames"]
self.assertTrue(resources[0]["DomainName"] not in extant_domains)
def test_simpledb(self):
session_factory = self.replay_flight_data("test_simpledb_query")
p = self.load_policy(
{"name": "sdbtest", "resource": "simpledb"}, session_factory=session_factory
)
resources = p.run()
self.assertEqual(len(resources), 1)
self.assertEqual(resources[0]["DomainName"], "devtest")
| apache-2.0 |
sputnick-dev/weboob | modules/gdfsuez/module.py | 7 | 3510 | # -*- coding: utf-8 -*-
# Copyright(C) 2013 Mathieu Jourdan
#
# This file is part of weboob.
#
# weboob is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# weboob is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with weboob. If not, see <http://www.gnu.org/licenses/>.
from weboob.capabilities.bill import CapBill, SubscriptionNotFound,\
BillNotFound, Subscription, Bill
from weboob.tools.backend import Module, BackendConfig
from weboob.tools.value import ValueBackendPassword
from .browser import GdfSuez
__all__ = ['GdfSuezModule']
class GdfSuezModule(Module, CapBill):
NAME = 'gdfsuez'
MAINTAINER = u'Mathieu Jourdan'
EMAIL = 'mathieu.jourdan@gresille.org'
VERSION = '1.1'
LICENSE = 'AGPLv3+'
DESCRIPTION = u'GDF-Suez French energy provider'
CONFIG = BackendConfig(ValueBackendPassword('login',
label='Account ID (e-mail)',
masked=False),
ValueBackendPassword('password',
label='Password',
masked=True)
)
BROWSER = GdfSuez
def create_default_browser(self):
return self.create_browser(self.config['login'].get(),
self.config['password'].get())
def iter_subscription(self):
for subscription in self.browser.get_subscription_list():
yield subscription
def get_subscription(self, _id):
if not _id.isdigit():
raise SubscriptionNotFound()
with self.browser:
subscription = self.browser.get_subscription(_id)
if not subscription:
raise SubscriptionNotFound()
else:
return subscription
def iter_bills_history(self, subscription):
if not isinstance(subscription, Subscription):
subscription = self.get_subscription(subscription)
with self.browser:
for history in self.browser.get_history(subscription):
yield history
def get_details(self, subscription):
if not isinstance(subscription, Subscription):
subscription = self.get_subscription(subscription)
with self.browser:
for detail in self.browser.get_details(subscription):
yield detail
def iter_bills(self, subscription):
if not isinstance(subscription, Subscription):
subscription = self.get_subscription(subscription)
with self.browser:
for bill in self.browser.iter_bills():
yield bill
def get_bill(self, id):
with self.browser:
bill = self.browser.get_bill(id)
if not bill:
raise BillNotFound()
else:
return bill
def download_bill(self, bill):
if not isinstance(bill, Bill):
bill = self.get_bill(bill)
with self.browser:
return self.browser.readurl(bill._url)
| agpl-3.0 |
tysonclugg/django | tests/expressions_case/models.py | 55 | 2542 | from django.db import models
try:
from PIL import Image
except ImportError:
Image = None
class CaseTestModel(models.Model):
integer = models.IntegerField()
integer2 = models.IntegerField(null=True)
string = models.CharField(max_length=100, default='')
big_integer = models.BigIntegerField(null=True)
binary = models.BinaryField(default=b'')
boolean = models.BooleanField(default=False)
date = models.DateField(null=True, db_column='date_field')
date_time = models.DateTimeField(null=True)
decimal = models.DecimalField(max_digits=2, decimal_places=1, null=True, db_column='decimal_field')
duration = models.DurationField(null=True)
email = models.EmailField(default='')
file = models.FileField(null=True, db_column='file_field')
file_path = models.FilePathField(null=True)
float = models.FloatField(null=True, db_column='float_field')
if Image:
image = models.ImageField(null=True)
generic_ip_address = models.GenericIPAddressField(null=True)
null_boolean = models.NullBooleanField()
positive_integer = models.PositiveIntegerField(null=True)
positive_small_integer = models.PositiveSmallIntegerField(null=True)
slug = models.SlugField(default='')
small_integer = models.SmallIntegerField(null=True)
text = models.TextField(default='')
time = models.TimeField(null=True, db_column='time_field')
url = models.URLField(default='')
uuid = models.UUIDField(null=True)
fk = models.ForeignKey('self', models.CASCADE, null=True)
def __str__(self):
return "%i, %s" % (self.integer, self.string)
class O2OCaseTestModel(models.Model):
o2o = models.OneToOneField(CaseTestModel, models.CASCADE, related_name='o2o_rel')
integer = models.IntegerField()
def __str__(self):
return "%i, %s" % (self.id, self.o2o)
class FKCaseTestModel(models.Model):
fk = models.ForeignKey(CaseTestModel, models.CASCADE, related_name='fk_rel')
integer = models.IntegerField()
def __str__(self):
return "%i, %s" % (self.id, self.fk)
class Client(models.Model):
REGULAR = 'R'
GOLD = 'G'
PLATINUM = 'P'
ACCOUNT_TYPE_CHOICES = (
(REGULAR, 'Regular'),
(GOLD, 'Gold'),
(PLATINUM, 'Platinum'),
)
name = models.CharField(max_length=50)
registered_on = models.DateField()
account_type = models.CharField(
max_length=1,
choices=ACCOUNT_TYPE_CHOICES,
default=REGULAR,
)
def __str__(self):
return self.name
| bsd-3-clause |
alex/django-old | setup.py | 8 | 4087 | from distutils.core import setup
from distutils.command.install_data import install_data
from distutils.command.install import INSTALL_SCHEMES
import os
import sys
class osx_install_data(install_data):
# On MacOS, the platform-specific lib dir is /System/Library/Framework/Python/.../
# which is wrong. Python 2.5 supplied with MacOS 10.5 has an Apple-specific fix
# for this in distutils.command.install_data#306. It fixes install_lib but not
# install_data, which is why we roll our own install_data class.
def finalize_options(self):
# By the time finalize_options is called, install.install_lib is set to the
# fixed directory, so we set the installdir to install_lib. The
# install_data class uses ('install_data', 'install_dir') instead.
self.set_undefined_options('install', ('install_lib', 'install_dir'))
install_data.finalize_options(self)
if sys.platform == "darwin":
cmdclasses = {'install_data': osx_install_data}
else:
cmdclasses = {'install_data': install_data}
def fullsplit(path, result=None):
"""
Split a pathname into components (the opposite of os.path.join) in a
platform-neutral way.
"""
if result is None:
result = []
head, tail = os.path.split(path)
if head == '':
return [tail] + result
if head == path:
return result
return fullsplit(head, [tail] + result)
# Tell distutils to put the data_files in platform-specific installation
# locations. See here for an explanation:
# http://groups.google.com/group/comp.lang.python/browse_thread/thread/35ec7b2fed36eaec/2105ee4d9e8042cb
for scheme in INSTALL_SCHEMES.values():
scheme['data'] = scheme['purelib']
# Compile the list of packages available, because distutils doesn't have
# an easy way to do this.
packages, data_files = [], []
root_dir = os.path.dirname(__file__)
if root_dir != '':
os.chdir(root_dir)
django_dir = 'django'
for dirpath, dirnames, filenames in os.walk(django_dir):
# Ignore dirnames that start with '.'
for i, dirname in enumerate(dirnames):
if dirname.startswith('.'): del dirnames[i]
if '__init__.py' in filenames:
packages.append('.'.join(fullsplit(dirpath)))
elif filenames:
data_files.append([dirpath, [os.path.join(dirpath, f) for f in filenames]])
# Small hack for working with bdist_wininst.
# See http://mail.python.org/pipermail/distutils-sig/2004-August/004134.html
if len(sys.argv) > 1 and sys.argv[1] == 'bdist_wininst':
for file_info in data_files:
file_info[0] = '\\PURELIB\\%s' % file_info[0]
# Dynamically calculate the version based on django.VERSION.
version = __import__('django').get_version()
if u'SVN' in version:
version = ' '.join(version.split(' ')[:-1])
setup(
name = "Django",
version = version.replace(' ', '-'),
url = 'http://www.djangoproject.com/',
author = 'Django Software Foundation',
author_email = 'foundation@djangoproject.com',
description = 'A high-level Python Web framework that encourages rapid development and clean, pragmatic design.',
download_url = 'http://media.djangoproject.com/releases/1.2/Django-1.2.1.tar.gz',
packages = packages,
cmdclass = cmdclasses,
data_files = data_files,
scripts = ['django/bin/django-admin.py'],
classifiers = ['Development Status :: 5 - Production/Stable',
'Environment :: Web Environment',
'Framework :: Django',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Internet :: WWW/HTTP',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content',
'Topic :: Internet :: WWW/HTTP :: WSGI',
'Topic :: Software Development :: Libraries :: Application Frameworks',
'Topic :: Software Development :: Libraries :: Python Modules',
],
)
| bsd-3-clause |
nkuhlen/replication-study-financial-macro | .mywaflib/waflib/Node.py | 3 | 19062 | #!/usr/bin/env python
# encoding: utf-8
# Thomas Nagy, 2005-2010 (ita)
"""
Node: filesystem structure, contains lists of nodes
#. Each file/folder is represented by exactly one node.
#. Some potential class properties are stored on :py:class:`waflib.Build.BuildContext` : nodes to depend on, etc.
Unused class members can increase the `.wafpickle` file size sensibly.
#. Node objects should never be created directly, use
the methods :py:func:`Node.make_node` or :py:func:`Node.find_node`
#. The methods :py:func:`Node.find_resource`, :py:func:`Node.find_dir` :py:func:`Node.find_or_declare` should be
used when a build context is present
#. Each instance of :py:class:`waflib.Context.Context` has a unique :py:class:`Node` subclass.
(:py:class:`waflib.Node.Nod3`, see the :py:class:`waflib.Context.Context` initializer). A reference to the context owning a node is held as self.ctx
"""
import os, re, sys, shutil
from waflib import Utils, Errors
exclude_regs = '''
**/*~
**/#*#
**/.#*
**/%*%
**/._*
**/CVS
**/CVS/**
**/.cvsignore
**/SCCS
**/SCCS/**
**/vssver.scc
**/.svn
**/.svn/**
**/BitKeeper
**/.git
**/.git/**
**/.gitignore
**/.bzr
**/.bzrignore
**/.bzr/**
**/.hg
**/.hg/**
**/_MTN
**/_MTN/**
**/.arch-ids
**/{arch}
**/_darcs
**/_darcs/**
**/.intlcache
**/.DS_Store'''
"""
Ant patterns for files and folders to exclude while doing the
recursive traversal in :py:meth:`waflib.Node.Node.ant_glob`
"""
# TODO waf 1.9
split_path = Utils.split_path_unix
split_path_cygwin = Utils.split_path_cygwin
split_path_win32 = Utils.split_path_win32
if sys.platform == 'cygwin':
split_path = split_path_cygwin
elif Utils.is_win32:
split_path = split_path_win32
class Node(object):
"""
This class is organized in two parts
* The basic methods meant for filesystem access (compute paths, create folders, etc)
* The methods bound to a :py:class:`waflib.Build.BuildContext` (require ``bld.srcnode`` and ``bld.bldnode``)
The Node objects are not thread safe in any way.
"""
dict_class = dict
__slots__ = ('name', 'sig', 'children', 'parent', 'cache_abspath', 'cache_isdir', 'cache_sig')
def __init__(self, name, parent):
self.name = name
self.parent = parent
if parent:
if name in parent.children:
raise Errors.WafError('node %s exists in the parent files %r already' % (name, parent))
parent.children[name] = self
def __setstate__(self, data):
"Deserializes from data"
self.name = data[0]
self.parent = data[1]
if data[2] is not None:
# Issue 1480
self.children = self.dict_class(data[2])
if data[3] is not None:
self.sig = data[3]
def __getstate__(self):
"Serialize the node info"
return (self.name, self.parent, getattr(self, 'children', None), getattr(self, 'sig', None))
def __str__(self):
"String representation (name), for debugging purposes"
return self.name
def __repr__(self):
"String representation (abspath), for debugging purposes"
return self.abspath()
def __hash__(self):
"Node hash, used for storage in dicts. This hash is not persistent."
return id(self)
def __eq__(self, node):
"Node comparison, based on the IDs"
return id(self) == id(node)
def __copy__(self):
"Implemented to prevent nodes from being copied (raises an exception)"
raise Errors.WafError('nodes are not supposed to be copied')
def read(self, flags='r', encoding='ISO8859-1'):
"""
Return the contents of the file represented by this node::
def build(bld):
bld.path.find_node('wscript').read()
:type fname: string
:param fname: Path to file
:type m: string
:param m: Open mode
:rtype: string
:return: File contents
"""
return Utils.readf(self.abspath(), flags, encoding)
def write(self, data, flags='w', encoding='ISO8859-1'):
"""
Write some text to the physical file represented by this node::
def build(bld):
bld.path.make_node('foo.txt').write('Hello, world!')
:type data: string
:param data: data to write
:type flags: string
:param flags: Write mode
"""
Utils.writef(self.abspath(), data, flags, encoding)
def chmod(self, val):
"""
Change file/dir permissions::
def build(bld):
bld.path.chmod(493) # 0755
"""
os.chmod(self.abspath(), val)
def delete(self):
"""Delete the file/folder, and remove this node from the tree. Do not use this object after calling this method."""
try:
try:
if hasattr(self, 'children'):
shutil.rmtree(self.abspath())
else:
os.remove(self.abspath())
except OSError as e:
if os.path.exists(self.abspath()):
raise e
finally:
self.evict()
def evict(self):
"""Internal - called when a node is removed"""
del self.parent.children[self.name]
def suffix(self):
"""Return the file extension"""
k = max(0, self.name.rfind('.'))
return self.name[k:]
def height(self):
"""Depth in the folder hierarchy from the filesystem root or from all the file drives"""
d = self
val = -1
while d:
d = d.parent
val += 1
return val
def listdir(self):
"""List the folder contents"""
lst = Utils.listdir(self.abspath())
lst.sort()
return lst
def mkdir(self):
"""
Create a folder represented by this node, creating intermediate nodes as needed
An exception will be raised only when the folder cannot possibly exist there
"""
if getattr(self, 'cache_isdir', None):
return
try:
self.parent.mkdir()
except OSError:
pass
if self.name:
try:
os.makedirs(self.abspath())
except OSError:
pass
if not os.path.isdir(self.abspath()):
raise Errors.WafError('Could not create the directory %s' % self.abspath())
try:
self.children
except AttributeError:
self.children = self.dict_class()
self.cache_isdir = True
def find_node(self, lst):
"""
Find a node on the file system (files or folders), create intermediate nodes as needed
:param lst: path
:type lst: string or list of string
"""
if isinstance(lst, str):
lst = [x for x in split_path(lst) if x and x != '.']
cur = self
for x in lst:
if x == '..':
cur = cur.parent or cur
continue
try:
ch = cur.children
except AttributeError:
cur.children = self.dict_class()
else:
try:
cur = cur.children[x]
continue
except KeyError:
pass
# optimistic: create the node first then look if it was correct to do so
cur = self.__class__(x, cur)
try:
os.stat(cur.abspath())
except OSError:
cur.evict()
return None
ret = cur
try:
os.stat(ret.abspath())
except OSError:
ret.evict()
return None
try:
while not getattr(cur.parent, 'cache_isdir', None):
cur = cur.parent
cur.cache_isdir = True
except AttributeError:
pass
return ret
def make_node(self, lst):
"""
Find or create a node without looking on the filesystem
:param lst: path
:type lst: string or list of string
"""
if isinstance(lst, str):
lst = [x for x in split_path(lst) if x and x != '.']
cur = self
for x in lst:
if x == '..':
cur = cur.parent or cur
continue
if getattr(cur, 'children', {}):
if x in cur.children:
cur = cur.children[x]
continue
else:
cur.children = self.dict_class()
cur = self.__class__(x, cur)
return cur
def search_node(self, lst):
"""
Search for a node without looking on the filesystem
:param lst: path
:type lst: string or list of string
"""
if isinstance(lst, str):
lst = [x for x in split_path(lst) if x and x != '.']
cur = self
for x in lst:
if x == '..':
cur = cur.parent or cur
else:
try:
cur = cur.children[x]
except (AttributeError, KeyError):
return None
return cur
def path_from(self, node):
"""
Path of this node seen from the other::
def build(bld):
n1 = bld.path.find_node('foo/bar/xyz.txt')
n2 = bld.path.find_node('foo/stuff/')
n1.path_from(n2) # '../bar/xyz.txt'
:param node: path to use as a reference
:type node: :py:class:`waflib.Node.Node`
"""
c1 = self
c2 = node
c1h = c1.height()
c2h = c2.height()
lst = []
up = 0
while c1h > c2h:
lst.append(c1.name)
c1 = c1.parent
c1h -= 1
while c2h > c1h:
up += 1
c2 = c2.parent
c2h -= 1
while id(c1) != id(c2):
lst.append(c1.name)
up += 1
c1 = c1.parent
c2 = c2.parent
if c1.parent:
for i in range(up):
lst.append('..')
else:
if lst and not Utils.is_win32:
lst.append('')
lst.reverse()
return os.sep.join(lst) or '.'
def abspath(self):
"""
Absolute path. A cache is kept in the context as ``cache_node_abspath``
"""
try:
return self.cache_abspath
except AttributeError:
pass
# think twice before touching this (performance + complexity + correctness)
if not self.parent:
val = os.sep
elif not self.parent.name:
val = os.sep + self.name
else:
val = self.parent.abspath() + os.sep + self.name
self.cache_abspath = val
return val
if Utils.is_win32:
def abspath(self):
try:
return self.cache_abspath
except AttributeError:
pass
if not self.parent:
val = ''
elif not self.parent.name:
val = self.name + os.sep
else:
val = self.parent.abspath().rstrip(os.sep) + os.sep + self.name
self.cache_abspath = val
return val
def is_child_of(self, node):
"""
Does this node belong to the subtree node?::
def build(bld):
node = bld.path.find_node('wscript')
node.is_child_of(bld.path) # True
:param node: path to use as a reference
:type node: :py:class:`waflib.Node.Node`
"""
p = self
diff = self.height() - node.height()
while diff > 0:
diff -= 1
p = p.parent
return id(p) == id(node)
def ant_iter(self, accept=None, maxdepth=25, pats=[], dir=False, src=True, remove=True):
"""
Semi-private and recursive method used by ant_glob.
:param accept: function used for accepting/rejecting a node, returns the patterns that can be still accepted in recursion
:type accept: function
:param maxdepth: maximum depth in the filesystem (25)
:type maxdepth: int
:param pats: list of patterns to accept and list of patterns to exclude
:type pats: tuple
:param dir: return folders too (False by default)
:type dir: bool
:param src: return files (True by default)
:type src: bool
:param remove: remove files/folders that do not exist (True by default)
:type remove: bool
"""
dircont = self.listdir()
dircont.sort()
try:
lst = set(self.children.keys())
except AttributeError:
self.children = self.dict_class()
else:
if remove:
for x in lst - set(dircont):
self.children[x].evict()
for name in dircont:
npats = accept(name, pats)
if npats and npats[0]:
accepted = [] in npats[0]
node = self.make_node([name])
isdir = os.path.isdir(node.abspath())
if accepted:
if isdir:
if dir:
yield node
else:
if src:
yield node
if getattr(node, 'cache_isdir', None) or isdir:
node.cache_isdir = True
if maxdepth:
for k in node.ant_iter(accept=accept, maxdepth=maxdepth - 1, pats=npats, dir=dir, src=src, remove=remove):
yield k
raise StopIteration
def ant_glob(self, *k, **kw):
"""
This method is used for finding files across folders. It behaves like ant patterns:
* ``**/*`` find all files recursively
* ``**/*.class`` find all files ending by .class
* ``..`` find files having two dot characters
For example::
def configure(cfg):
cfg.path.ant_glob('**/*.cpp') # find all .cpp files
cfg.root.ant_glob('etc/*.txt') # using the filesystem root can be slow
cfg.path.ant_glob('*.cpp', excl=['*.c'], src=True, dir=False)
For more information see http://ant.apache.org/manual/dirtasks.html
The nodes that correspond to files and folders that do not exist will be removed. To prevent this
behaviour, pass 'remove=False'
:param incl: ant patterns or list of patterns to include
:type incl: string or list of strings
:param excl: ant patterns or list of patterns to exclude
:type excl: string or list of strings
:param dir: return folders too (False by default)
:type dir: bool
:param src: return files (True by default)
:type src: bool
:param remove: remove files/folders that do not exist (True by default)
:type remove: bool
:param maxdepth: maximum depth of recursion
:type maxdepth: int
:param ignorecase: ignore case while matching (False by default)
:type ignorecase: bool
"""
src = kw.get('src', True)
dir = kw.get('dir', False)
excl = kw.get('excl', exclude_regs)
incl = k and k[0] or kw.get('incl', '**')
reflags = kw.get('ignorecase', 0) and re.I
def to_pat(s):
lst = Utils.to_list(s)
ret = []
for x in lst:
x = x.replace('\\', '/').replace('//', '/')
if x.endswith('/'):
x += '**'
lst2 = x.split('/')
accu = []
for k in lst2:
if k == '**':
accu.append(k)
else:
k = k.replace('.', '[.]').replace('*','.*').replace('?', '.').replace('+', '\\+')
k = '^%s$' % k
try:
#print "pattern", k
accu.append(re.compile(k, flags=reflags))
except Exception as e:
raise Errors.WafError("Invalid pattern: %s" % k, e)
ret.append(accu)
return ret
def filtre(name, nn):
ret = []
for lst in nn:
if not lst:
pass
elif lst[0] == '**':
ret.append(lst)
if len(lst) > 1:
if lst[1].match(name):
ret.append(lst[2:])
else:
ret.append([])
elif lst[0].match(name):
ret.append(lst[1:])
return ret
def accept(name, pats):
nacc = filtre(name, pats[0])
nrej = filtre(name, pats[1])
if [] in nrej:
nacc = []
return [nacc, nrej]
ret = [x for x in self.ant_iter(accept=accept, pats=[to_pat(incl), to_pat(excl)], maxdepth=kw.get('maxdepth', 25), dir=dir, src=src, remove=kw.get('remove', True))]
if kw.get('flat', False):
return ' '.join([x.path_from(self) for x in ret])
return ret
# --------------------------------------------------------------------------------
# the following methods require the source/build folders (bld.srcnode/bld.bldnode)
# using a subclass is a possibility, but is that really necessary?
# --------------------------------------------------------------------------------
def is_src(self):
"""
True if the node is below the source directory
note: !is_src does not imply is_bld()
:rtype: bool
"""
cur = self
x = id(self.ctx.srcnode)
y = id(self.ctx.bldnode)
while cur.parent:
if id(cur) == y:
return False
if id(cur) == x:
return True
cur = cur.parent
return False
def is_bld(self):
"""
True if the node is below the build directory
note: !is_bld does not imply is_src
:rtype: bool
"""
cur = self
y = id(self.ctx.bldnode)
while cur.parent:
if id(cur) == y:
return True
cur = cur.parent
return False
def get_src(self):
"""
Return the equivalent src node (or self if not possible)
:rtype: :py:class:`waflib.Node.Node`
"""
cur = self
x = id(self.ctx.srcnode)
y = id(self.ctx.bldnode)
lst = []
while cur.parent:
if id(cur) == y:
lst.reverse()
return self.ctx.srcnode.make_node(lst)
if id(cur) == x:
return self
lst.append(cur.name)
cur = cur.parent
return self
def get_bld(self):
"""
Return the equivalent bld node (or self if not possible)
:rtype: :py:class:`waflib.Node.Node`
"""
cur = self
x = id(self.ctx.srcnode)
y = id(self.ctx.bldnode)
lst = []
while cur.parent:
if id(cur) == y:
return self
if id(cur) == x:
lst.reverse()
return self.ctx.bldnode.make_node(lst)
lst.append(cur.name)
cur = cur.parent
# the file is external to the current project, make a fake root in the current build directory
lst.reverse()
if lst and Utils.is_win32 and len(lst[0]) == 2 and lst[0].endswith(':'):
lst[0] = lst[0][0]
return self.ctx.bldnode.make_node(['__root__'] + lst)
def find_resource(self, lst):
"""
Try to find a declared build node or a source file
:param lst: path
:type lst: string or list of string
"""
if isinstance(lst, str):
lst = [x for x in split_path(lst) if x and x != '.']
node = self.get_bld().search_node(lst)
if not node:
self = self.get_src()
node = self.find_node(lst)
if node:
if os.path.isdir(node.abspath()):
return None
return node
def find_or_declare(self, lst):
"""
if 'self' is in build directory, try to return an existing node
if no node is found, go to the source directory
try to find an existing node in the source directory
if no node is found, create it in the build directory
:param lst: path
:type lst: string or list of string
"""
if isinstance(lst, str):
lst = [x for x in split_path(lst) if x and x != '.']
node = self.get_bld().search_node(lst)
if node:
if not os.path.isfile(node.abspath()):
node.sig = None
node.parent.mkdir()
return node
self = self.get_src()
node = self.find_node(lst)
if node:
if not os.path.isfile(node.abspath()):
node.sig = None
node.parent.mkdir()
return node
node = self.get_bld().make_node(lst)
node.parent.mkdir()
return node
def find_dir(self, lst):
"""
Search for a folder in the filesystem
:param lst: path
:type lst: string or list of string
"""
if isinstance(lst, str):
lst = [x for x in split_path(lst) if x and x != '.']
node = self.find_node(lst)
try:
if not os.path.isdir(node.abspath()):
return None
except (OSError, AttributeError):
# the node might be None, and raise an AttributeError
return None
return node
# helpers for building things
def change_ext(self, ext, ext_in=None):
"""
:return: A build node of the same path, but with a different extension
:rtype: :py:class:`waflib.Node.Node`
"""
name = self.name
if ext_in is None:
k = name.rfind('.')
if k >= 0:
name = name[:k] + ext
else:
name = name + ext
else:
name = name[:- len(ext_in)] + ext
return self.parent.find_or_declare([name])
def bldpath(self):
"Path seen from the build directory default/src/foo.cpp"
return self.path_from(self.ctx.bldnode)
def srcpath(self):
"Path seen from the source directory ../src/foo.cpp"
return self.path_from(self.ctx.srcnode)
def relpath(self):
"If a file in the build directory, bldpath, else srcpath"
cur = self
x = id(self.ctx.bldnode)
while cur.parent:
if id(cur) == x:
return self.bldpath()
cur = cur.parent
return self.srcpath()
def bld_dir(self):
"Build path without the file name"
return self.parent.bldpath()
def get_bld_sig(self):
"""
Node signature, assuming the file is in the build directory
"""
try:
return self.cache_sig
except AttributeError:
pass
if not self.is_bld() or self.ctx.bldnode is self.ctx.srcnode:
self.sig = Utils.h_file(self.abspath())
self.cache_sig = ret = self.sig
return ret
pickle_lock = Utils.threading.Lock()
"""Lock mandatory for thread-safe node serialization"""
class Nod3(Node):
"""Mandatory subclass for thread-safe node serialization"""
pass # do not remove
| gpl-3.0 |
sexroute/commandergenius | project/jni/python/src/Lib/Bastion.py | 298 | 5744 | """Bastionification utility.
A bastion (for another object -- the 'original') is an object that has
the same methods as the original but does not give access to its
instance variables. Bastions have a number of uses, but the most
obvious one is to provide code executing in restricted mode with a
safe interface to an object implemented in unrestricted mode.
The bastionification routine has an optional second argument which is
a filter function. Only those methods for which the filter method
(called with the method name as argument) returns true are accessible.
The default filter method returns true unless the method name begins
with an underscore.
There are a number of possible implementations of bastions. We use a
'lazy' approach where the bastion's __getattr__() discipline does all
the work for a particular method the first time it is used. This is
usually fastest, especially if the user doesn't call all available
methods. The retrieved methods are stored as instance variables of
the bastion, so the overhead is only occurred on the first use of each
method.
Detail: the bastion class has a __repr__() discipline which includes
the repr() of the original object. This is precomputed when the
bastion is created.
"""
from warnings import warnpy3k
warnpy3k("the Bastion module has been removed in Python 3.0", stacklevel=2)
del warnpy3k
__all__ = ["BastionClass", "Bastion"]
from types import MethodType
class BastionClass:
"""Helper class used by the Bastion() function.
You could subclass this and pass the subclass as the bastionclass
argument to the Bastion() function, as long as the constructor has
the same signature (a get() function and a name for the object).
"""
def __init__(self, get, name):
"""Constructor.
Arguments:
get - a function that gets the attribute value (by name)
name - a human-readable name for the original object
(suggestion: use repr(object))
"""
self._get_ = get
self._name_ = name
def __repr__(self):
"""Return a representation string.
This includes the name passed in to the constructor, so that
if you print the bastion during debugging, at least you have
some idea of what it is.
"""
return "<Bastion for %s>" % self._name_
def __getattr__(self, name):
"""Get an as-yet undefined attribute value.
This calls the get() function that was passed to the
constructor. The result is stored as an instance variable so
that the next time the same attribute is requested,
__getattr__() won't be invoked.
If the get() function raises an exception, this is simply
passed on -- exceptions are not cached.
"""
attribute = self._get_(name)
self.__dict__[name] = attribute
return attribute
def Bastion(object, filter = lambda name: name[:1] != '_',
name=None, bastionclass=BastionClass):
"""Create a bastion for an object, using an optional filter.
See the Bastion module's documentation for background.
Arguments:
object - the original object
filter - a predicate that decides whether a function name is OK;
by default all names are OK that don't start with '_'
name - the name of the object; default repr(object)
bastionclass - class used to create the bastion; default BastionClass
"""
raise RuntimeError, "This code is not secure in Python 2.2 and later"
# Note: we define *two* ad-hoc functions here, get1 and get2.
# Both are intended to be called in the same way: get(name).
# It is clear that the real work (getting the attribute
# from the object and calling the filter) is done in get1.
# Why can't we pass get1 to the bastion? Because the user
# would be able to override the filter argument! With get2,
# overriding the default argument is no security loophole:
# all it does is call it.
# Also notice that we can't place the object and filter as
# instance variables on the bastion object itself, since
# the user has full access to all instance variables!
def get1(name, object=object, filter=filter):
"""Internal function for Bastion(). See source comments."""
if filter(name):
attribute = getattr(object, name)
if type(attribute) == MethodType:
return attribute
raise AttributeError, name
def get2(name, get1=get1):
"""Internal function for Bastion(). See source comments."""
return get1(name)
if name is None:
name = repr(object)
return bastionclass(get2, name)
def _test():
"""Test the Bastion() function."""
class Original:
def __init__(self):
self.sum = 0
def add(self, n):
self._add(n)
def _add(self, n):
self.sum = self.sum + n
def total(self):
return self.sum
o = Original()
b = Bastion(o)
testcode = """if 1:
b.add(81)
b.add(18)
print "b.total() =", b.total()
try:
print "b.sum =", b.sum,
except:
print "inaccessible"
else:
print "accessible"
try:
print "b._add =", b._add,
except:
print "inaccessible"
else:
print "accessible"
try:
print "b._get_.func_defaults =", map(type, b._get_.func_defaults),
except:
print "inaccessible"
else:
print "accessible"
\n"""
exec testcode
print '='*20, "Using rexec:", '='*20
import rexec
r = rexec.RExec()
m = r.add_module('__main__')
m.b = b
r.r_exec(testcode)
if __name__ == '__main__':
_test()
| lgpl-2.1 |
messagebird/python-rest-api | examples/voice_recording_view.py | 1 | 1148 | #!/usr/bin/env python
import messagebird
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--accessKey', help='access key for MessageBird API', type=str, required=True)
parser.add_argument('--callID', help='identifier for the call', type=str, required=True)
parser.add_argument('--legID', help='identifier for the leg object you wish to list the recordings for', type=str, required=True)
parser.add_argument('--recordingID', help='identifier for the recording', type=str, required=True)
args = vars(parser.parse_args())
try:
client = messagebird.Client(args['accessKey'])
voiceRecording = client.voice_recording_view(args['callID'], args['legID'], args['recordingID'])
# Print the object information.
print('The following information was returned as a Voice Recording object:')
print(voiceRecording)
except messagebird.client.ErrorException as e:
print('An error occured while requesting a Message object:')
for error in e.errors:
print(' code : %d' % error.code)
print(' description : %s' % error.description)
print(' parameter : %s\n' % error.parameter)
| bsd-2-clause |
mooniak/animager | src/genImages.py | 1 | 1174 | ### generate temporary images using GIT commits
### dependencies : imagemagick
import os
def genTempImage( gitImage, options,
outputName, outputExt, outputDir ):
os.system( 'convert ' + gitImage
+ ' ' + options
+ ' ' + outputDir
+ outputName
+ outputExt )
def gitCommitArray( image ):
os.system( 'git log --pretty=%h '+ image + ' > log' )
commits = open('log').read().splitlines()
return commits
def gitCheckoutOld( commitHash ):
os.system( 'git checkout ' + commitHash )
def gitGenTempImages( image, outputDir ):
os.system( 'mkdir -p ' + outputDir )
commits = gitCommitArray( image )
commits.reverse()
count = 1
for commitHash in commits:
gitCheckoutOld( commitHash )
genTempImage( gitImage = image,
options = '',
outputDir = outputDir,
outputName = str('%05d'%(count)),
outputExt = '.png' )
count += 1
print ("\nRolling changes back to master branch...\n")
os.system( 'git checkout master' )
| gpl-3.0 |
hlerebours/lambda-calculus | lambdax/builtins_as_lambdas.py | 1 | 2296 | """ Expose most of the built-in functions and classes but suffixed with '_λ'
and to be only used in λ-abstractions.
See module `lambdax.builtins_overridden` to keep built-in names and have a mixed behavior,
working as expected both inside and outside λ-abstractions.
"""
import builtins
from lambdax.lambda_calculus import λ as _λ
# pylint: disable=redefined-builtin
abs_λ = _λ(abs)
all_λ = _λ(all)
any_λ = _λ(any)
ascii_λ = _λ(ascii)
bin_λ = _λ(bin)
callable_λ = _λ(callable)
chr_λ = _λ(chr)
delattr_λ = _λ(delattr)
dir_λ = _λ(dir)
divmod_λ = _λ(divmod)
exit_λ = _λ(exit)
quit_λ = _λ(quit)
format_λ = _λ(format)
getattr_λ = _λ(getattr)
hasattr_λ = _λ(hasattr)
hash_λ = _λ(hash)
hex_λ = _λ(hex)
id_λ = _λ(id)
isinstance_λ = _λ(isinstance)
issubclass_λ = _λ(issubclass)
iter_λ = _λ(iter)
len_λ = _λ(len)
max_λ = _λ(max)
min_λ = _λ(min)
next_λ = _λ(next)
oct_λ = _λ(oct)
ord_λ = _λ(ord)
pow_λ = _λ(pow)
print_λ = _λ(print)
repr_λ = _λ(repr)
round_λ = _λ(round)
setattr_λ = _λ(setattr)
sorted_λ = _λ(sorted)
sum_λ = _λ(sum)
vars_λ = _λ(vars)
# Classes overridden as functions: calling them for instantiation and calling their
# class-methods and static methods behave as usual when parameters are not abstractions,
# however using them as second argument in `isinstance` or `issubclass` requires to use
# the overridden versions of these functions provided by this module too.
bool_λ = _λ(bool)
complex_λ = _λ(complex)
enumerate_λ = _λ(enumerate)
filter_λ = _λ(filter)
frozenset_λ = _λ(frozenset)
list_λ = _λ(list)
map_λ = _λ(map)
memoryview_λ = _λ(memoryview)
range_λ = _λ(range)
reversed_λ = _λ(reversed)
set_λ = _λ(set)
slice_λ = _λ(slice)
str_λ = _λ(str)
tuple_λ = _λ(tuple)
type_λ = _λ(type)
zip_λ = _λ(zip)
bytearray_λ = _λ(bytearray)
bytearray_fromhex_λ = _λ(builtins.bytearray.fromhex)
bytearray_maketrans_λ = _λ(builtins.bytearray.maketrans)
bytes_λ = _λ(bytes)
bytes_fromhex_λ = _λ(builtins.bytes.fromhex)
bytes_maketrans_λ = _λ(builtins.bytes.maketrans)
dict_λ = _λ(dict)
dict_fromkeys_λ = _λ(builtins.dict.fromkeys)
float_λ = _λ(float)
float_fromhex_λ = _λ(builtins.float.fromhex)
int_λ = _λ(int)
int_from_bytes_λ = _λ(builtins.int.from_bytes)
| mit |
ghchinoy/tensorflow | tensorflow/java/maven/tensorflow-android/update.py | 27 | 3972 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Fetch android artifacts and update pom properties."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import json
import string
import sys
import urllib2
def get_args():
"""Parse command line args."""
parser = argparse.ArgumentParser()
parser.add_argument(
'--version', required=True, help='Version for the artifact.')
parser.add_argument(
'--dir',
required=True,
help='Directory where the pom and aar artifact will be written.')
parser.add_argument(
'--template', required=True, help='Path to pom template file.')
return parser.parse_args()
def get_json(url):
"""Load the contents of the URL as a json object."""
return json.load(urllib2.urlopen(url))
def get_commit_id(build_info):
"""Fetch the git commit id from the build info json object."""
release_commit_id = build_info.get('build_commit_id')
if release_commit_id:
return release_commit_id
actions = build_info.get('actions')
build_data = next(
a for a in actions
if a.get('_class') == 'hudson.plugins.git.util.BuildData')
if not build_data:
raise ValueError('Missing BuildData: %s' % build_info)
revision_info = build_data.get('lastBuiltRevision')
if not revision_info:
raise ValueError('Missing lastBuiltRevision: %s' % build_info)
return revision_info.get('SHA1')
def get_aar_url(build_info):
"""Given the json build info, find the URL to the tensorflow.aar artifact."""
base_url = build_info.get('url')
if not base_url:
raise ValueError('Missing url: %s' % build_info)
build_class = build_info.get('_class')
if (build_class == 'hudson.model.FreeStyleBuild' or
build_class == 'hudson.matrix.MatrixRun'):
aar_info = next(
a for a in build_info.get('artifacts')
if a.get('fileName') == 'tensorflow.aar')
if not aar_info:
raise ValueError('Missing aar artifact: %s' % build_info)
return '%s/artifact/%s' % (base_url, aar_info.get('relativePath'))
raise ValueError('Unknown build_type %s' % build_info)
def read_template(path):
with open(path) as f:
return string.Template(f.read())
def main():
args = get_args()
release_prefix = 'https://storage.googleapis.com/tensorflow/libtensorflow'
info_url = '%s/android_buildinfo-%s.json' % (release_prefix, args.version)
aar_url = '%s/tensorflow-%s.aar' % (release_prefix, args.version)
build_type = 'release-android'
# Retrieve build information
build_info = get_json(info_url)
# Check all required build info is present
build_commit_id = get_commit_id(build_info)
if not build_commit_id:
raise ValueError('Missing commit id: %s' % build_info)
# Write the pom file updated with build attributes.
template = read_template(args.template)
with open('%s/pom-android.xml' % args.dir, 'w') as f:
f.write(
template.substitute({
'build_commit_id': build_commit_id,
'build_type': build_type,
'version': args.version
}))
# Retrieve the aar location if needed.
if not aar_url:
aar_url = get_aar_url(build_info)
# And download the aar to the desired location.
with open('%s/tensorflow.aar' % args.dir, 'w') as f:
aar = urllib2.urlopen(aar_url)
f.write(aar.read())
if __name__ == '__main__':
sys.exit(main())
| apache-2.0 |
aschampion/CATMAID | django/applications/catmaid/migrations/0042_add_reviewer_whitelist.py | 3 | 37027 | # -*- coding: utf-8 -*-
from south.utils import datetime_utils as datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'ReviewerWhitelist'
db.create_table('reviewer_whitelist', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('project', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['catmaid.Project'])),
('user', self.gf('django.db.models.fields.related.ForeignKey')(to=orm['auth.User'])),
('reviewer', self.gf('django.db.models.fields.related.ForeignKey')(related_name='+', to=orm['auth.User'])),
('accept_after', self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime(1, 1, 1, 0, 0))),
))
db.send_create_signal(u'catmaid', ['ReviewerWhitelist'])
# Adding unique constraint on 'ReviewerWhitelist', fields ['project', 'user', 'reviewer']
db.create_unique('reviewer_whitelist', ['project_id', 'user_id', 'reviewer_id'])
def backwards(self, orm):
# Removing unique constraint on 'ReviewerWhitelist', fields ['project', 'user', 'reviewer']
db.delete_unique('reviewer_whitelist', ['project_id', 'user_id', 'reviewer_id'])
# Deleting model 'ReviewerWhitelist'
db.delete_table('reviewer_whitelist')
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Group']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "u'user_set'", 'blank': 'True', 'to': u"orm['auth.Permission']"}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'catmaid.apikey': {
'Meta': {'object_name': 'ApiKey'},
'description': ('django.db.models.fields.TextField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '128'})
},
u'catmaid.brokenslice': {
'Meta': {'object_name': 'BrokenSlice', 'db_table': "'broken_slice'"},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'index': ('django.db.models.fields.IntegerField', [], {}),
'stack': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['catmaid.Stack']"})
},
u'catmaid.cardinalityrestriction': {
'Meta': {'object_name': 'CardinalityRestriction', 'db_table': "'cardinality_restriction'"},
'cardinality_type': ('django.db.models.fields.IntegerField', [], {}),
'creation_time': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'edition_time': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['catmaid.Project']"}),
'restricted_link': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['catmaid.ClassClass']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"}),
'value': ('django.db.models.fields.IntegerField', [], {})
},
u'catmaid.changerequest': {
'Meta': {'object_name': 'ChangeRequest', 'db_table': "'change_request'"},
'approve_action': ('django.db.models.fields.TextField', [], {}),
'completion_time': ('django.db.models.fields.DateTimeField', [], {'default': 'None', 'null': 'True'}),
'connector': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['catmaid.Connector']"}),
'creation_time': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'description': ('django.db.models.fields.TextField', [], {}),
'edition_time': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'location': ('catmaid.fields.Double3DField', [], {}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['catmaid.Project']"}),
'recipient': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'change_recipient'", 'db_column': "'recipient_id'", 'to': u"orm['auth.User']"}),
'reject_action': ('django.db.models.fields.TextField', [], {}),
'status': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'treenode': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['catmaid.Treenode']"}),
'type': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"}),
'validate_action': ('django.db.models.fields.TextField', [], {})
},
u'catmaid.class': {
'Meta': {'object_name': 'Class', 'db_table': "'class'"},
'class_name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'creation_time': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'description': ('django.db.models.fields.TextField', [], {}),
'edition_time': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['catmaid.Project']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"})
},
u'catmaid.classclass': {
'Meta': {'object_name': 'ClassClass', 'db_table': "'class_class'"},
'class_a': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'classes_a'", 'db_column': "'class_a'", 'to': u"orm['catmaid.Class']"}),
'class_b': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'classes_b'", 'db_column': "'class_b'", 'to': u"orm['catmaid.Class']"}),
'creation_time': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'edition_time': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['catmaid.Project']"}),
'relation': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['catmaid.Relation']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"})
},
u'catmaid.classinstance': {
'Meta': {'object_name': 'ClassInstance', 'db_table': "'class_instance'"},
'class_column': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['catmaid.Class']", 'db_column': "'class_id'"}),
'creation_time': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'edition_time': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['catmaid.Project']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"})
},
u'catmaid.classinstanceclassinstance': {
'Meta': {'object_name': 'ClassInstanceClassInstance', 'db_table': "'class_instance_class_instance'"},
'class_instance_a': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'cici_via_a'", 'db_column': "'class_instance_a'", 'to': u"orm['catmaid.ClassInstance']"}),
'class_instance_b': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'cici_via_b'", 'db_column': "'class_instance_b'", 'to': u"orm['catmaid.ClassInstance']"}),
'creation_time': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'edition_time': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['catmaid.Project']"}),
'relation': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['catmaid.Relation']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"})
},
u'catmaid.concept': {
'Meta': {'object_name': 'Concept', 'db_table': "'concept'"},
'creation_time': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'edition_time': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['catmaid.Project']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"})
},
u'catmaid.connector': {
'Meta': {'object_name': 'Connector', 'db_table': "'connector'"},
'confidence': ('django.db.models.fields.IntegerField', [], {'default': '5'}),
'creation_time': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'edition_time': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'editor': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'connector_editor'", 'db_column': "'editor_id'", 'to': u"orm['auth.User']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'location_x': ('django.db.models.fields.FloatField', [], {}),
'location_y': ('django.db.models.fields.FloatField', [], {}),
'location_z': ('django.db.models.fields.FloatField', [], {}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['catmaid.Project']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"})
},
u'catmaid.connectorclassinstance': {
'Meta': {'object_name': 'ConnectorClassInstance', 'db_table': "'connector_class_instance'"},
'class_instance': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['catmaid.ClassInstance']"}),
'connector': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['catmaid.Connector']"}),
'creation_time': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'edition_time': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['catmaid.Project']"}),
'relation': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['catmaid.Relation']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"})
},
u'catmaid.dataview': {
'Meta': {'ordering': "('position',)", 'object_name': 'DataView', 'db_table': "'data_view'"},
'comment': ('django.db.models.fields.TextField', [], {'default': "''", 'null': 'True', 'blank': 'True'}),
'config': ('django.db.models.fields.TextField', [], {'default': "'{}'"}),
'data_view_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['catmaid.DataViewType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_default': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'position': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'title': ('django.db.models.fields.TextField', [], {})
},
u'catmaid.dataviewtype': {
'Meta': {'object_name': 'DataViewType', 'db_table': "'data_view_type'"},
'code_type': ('django.db.models.fields.TextField', [], {}),
'comment': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'title': ('django.db.models.fields.TextField', [], {})
},
u'catmaid.deprecatedappliedmigrations': {
'Meta': {'object_name': 'DeprecatedAppliedMigrations', 'db_table': "'applied_migrations'"},
'id': ('django.db.models.fields.CharField', [], {'max_length': '32', 'primary_key': 'True'})
},
u'catmaid.deprecatedsession': {
'Meta': {'object_name': 'DeprecatedSession', 'db_table': "'sessions'"},
'data': ('django.db.models.fields.TextField', [], {'default': "''"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_accessed': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'session_id': ('django.db.models.fields.CharField', [], {'max_length': '26'})
},
u'catmaid.location': {
'Meta': {'object_name': 'Location', 'db_table': "'location'"},
'creation_time': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'edition_time': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'editor': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'location_editor'", 'db_column': "'editor_id'", 'to': u"orm['auth.User']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'location_x': ('django.db.models.fields.FloatField', [], {}),
'location_y': ('django.db.models.fields.FloatField', [], {}),
'location_z': ('django.db.models.fields.FloatField', [], {}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['catmaid.Project']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"})
},
u'catmaid.log': {
'Meta': {'object_name': 'Log', 'db_table': "'log'"},
'creation_time': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'edition_time': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'freetext': ('django.db.models.fields.TextField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'location': ('catmaid.fields.Double3DField', [], {}),
'operation_type': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['catmaid.Project']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"})
},
u'catmaid.message': {
'Meta': {'object_name': 'Message', 'db_table': "'message'"},
'action': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'read': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'text': ('django.db.models.fields.TextField', [], {'default': "'New message'", 'null': 'True', 'blank': 'True'}),
'time': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'title': ('django.db.models.fields.TextField', [], {}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"})
},
u'catmaid.overlay': {
'Meta': {'object_name': 'Overlay', 'db_table': "'overlay'"},
'default_opacity': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'file_extension': ('django.db.models.fields.TextField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image_base': ('django.db.models.fields.TextField', [], {}),
'stack': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['catmaid.Stack']"}),
'tile_height': ('django.db.models.fields.IntegerField', [], {'default': '512'}),
'tile_source_type': ('django.db.models.fields.IntegerField', [], {'default': '1'}),
'tile_width': ('django.db.models.fields.IntegerField', [], {'default': '512'}),
'title': ('django.db.models.fields.TextField', [], {})
},
u'catmaid.project': {
'Meta': {'object_name': 'Project', 'db_table': "'project'"},
'comment': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'stacks': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['catmaid.Stack']", 'through': u"orm['catmaid.ProjectStack']", 'symmetrical': 'False'}),
'title': ('django.db.models.fields.TextField', [], {})
},
u'catmaid.projectstack': {
'Meta': {'object_name': 'ProjectStack', 'db_table': "'project_stack'"},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'orientation': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['catmaid.Project']"}),
'stack': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['catmaid.Stack']"}),
'translation': ('catmaid.fields.Double3DField', [], {'default': '(0, 0, 0)'})
},
u'catmaid.regionofinterest': {
'Meta': {'object_name': 'RegionOfInterest', 'db_table': "'region_of_interest'"},
'creation_time': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'edition_time': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'editor': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'roi_editor'", 'db_column': "'editor_id'", 'to': u"orm['auth.User']"}),
'height': ('django.db.models.fields.FloatField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'location_x': ('django.db.models.fields.FloatField', [], {}),
'location_y': ('django.db.models.fields.FloatField', [], {}),
'location_z': ('django.db.models.fields.FloatField', [], {}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['catmaid.Project']"}),
'rotation_cw': ('django.db.models.fields.FloatField', [], {}),
'stack': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['catmaid.Stack']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"}),
'width': ('django.db.models.fields.FloatField', [], {}),
'zoom_level': ('django.db.models.fields.IntegerField', [], {})
},
u'catmaid.regionofinterestclassinstance': {
'Meta': {'object_name': 'RegionOfInterestClassInstance', 'db_table': "'region_of_interest_class_instance'"},
'class_instance': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['catmaid.ClassInstance']"}),
'creation_time': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'edition_time': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['catmaid.Project']"}),
'region_of_interest': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['catmaid.RegionOfInterest']"}),
'relation': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['catmaid.Relation']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"})
},
u'catmaid.relation': {
'Meta': {'object_name': 'Relation', 'db_table': "'relation'"},
'creation_time': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'description': ('django.db.models.fields.TextField', [], {}),
'edition_time': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'isreciprocal': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['catmaid.Project']"}),
'relation_name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'uri': ('django.db.models.fields.TextField', [], {}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"})
},
u'catmaid.relationinstance': {
'Meta': {'object_name': 'RelationInstance', 'db_table': "'relation_instance'"},
'creation_time': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'edition_time': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['catmaid.Project']"}),
'relation': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['catmaid.Relation']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"})
},
u'catmaid.restriction': {
'Meta': {'object_name': 'Restriction', 'db_table': "'restriction'"},
'creation_time': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'edition_time': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'enabled': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['catmaid.Project']"}),
'restricted_link': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['catmaid.ClassClass']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"})
},
u'catmaid.review': {
'Meta': {'object_name': 'Review', 'db_table': "'review'"},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['catmaid.Project']"}),
'review_time': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'reviewer': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"}),
'skeleton': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['catmaid.ClassInstance']"}),
'treenode': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['catmaid.Treenode']"})
},
u'catmaid.reviewerwhitelist': {
'Meta': {'unique_together': "(('project', 'user', 'reviewer'),)", 'object_name': 'ReviewerWhitelist', 'db_table': "'reviewer_whitelist'"},
'accept_after': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(1, 1, 1, 0, 0)'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['catmaid.Project']"}),
'reviewer': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'+'", 'to': u"orm['auth.User']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"})
},
u'catmaid.settings': {
'Meta': {'object_name': 'Settings', 'db_table': "'settings'"},
'key': ('django.db.models.fields.TextField', [], {'primary_key': 'True'}),
'value': ('django.db.models.fields.TextField', [], {'null': 'True'})
},
u'catmaid.stack': {
'Meta': {'object_name': 'Stack', 'db_table': "'stack'"},
'comment': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'dimension': ('catmaid.fields.Integer3DField', [], {}),
'file_extension': ('django.db.models.fields.TextField', [], {'default': "'jpg'", 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image_base': ('django.db.models.fields.TextField', [], {}),
'metadata': ('django.db.models.fields.TextField', [], {'default': "''", 'blank': 'True'}),
'num_zoom_levels': ('django.db.models.fields.IntegerField', [], {'default': '-1'}),
'resolution': ('catmaid.fields.Double3DField', [], {}),
'tile_height': ('django.db.models.fields.IntegerField', [], {'default': '256'}),
'tile_source_type': ('django.db.models.fields.IntegerField', [], {'default': '1'}),
'tile_width': ('django.db.models.fields.IntegerField', [], {'default': '256'}),
'title': ('django.db.models.fields.TextField', [], {}),
'trakem2_project': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
u'catmaid.textlabel': {
'Meta': {'object_name': 'Textlabel', 'db_table': "'textlabel'"},
'colour': ('catmaid.fields.RGBAField', [], {'default': '(1, 0.5, 0, 1)'}),
'creation_time': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'deleted': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'edition_time': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'font_name': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'font_size': ('django.db.models.fields.FloatField', [], {'default': '32'}),
'font_style': ('django.db.models.fields.TextField', [], {'null': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['catmaid.Project']"}),
'scaling': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'text': ('django.db.models.fields.TextField', [], {'default': "'Edit this text ...'"}),
'type': ('django.db.models.fields.CharField', [], {'max_length': '32'})
},
u'catmaid.textlabellocation': {
'Meta': {'object_name': 'TextlabelLocation', 'db_table': "'textlabel_location'"},
'deleted': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'location': ('catmaid.fields.Double3DField', [], {}),
'textlabel': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['catmaid.Textlabel']"})
},
u'catmaid.treenode': {
'Meta': {'object_name': 'Treenode', 'db_table': "'treenode'"},
'confidence': ('django.db.models.fields.IntegerField', [], {'default': '5'}),
'creation_time': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'edition_time': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'editor': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'treenode_editor'", 'db_column': "'editor_id'", 'to': u"orm['auth.User']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'location_x': ('django.db.models.fields.FloatField', [], {}),
'location_y': ('django.db.models.fields.FloatField', [], {}),
'location_z': ('django.db.models.fields.FloatField', [], {}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'children'", 'null': 'True', 'to': u"orm['catmaid.Treenode']"}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['catmaid.Project']"}),
'radius': ('django.db.models.fields.FloatField', [], {}),
'skeleton': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['catmaid.ClassInstance']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"})
},
u'catmaid.treenodeclassinstance': {
'Meta': {'object_name': 'TreenodeClassInstance', 'db_table': "'treenode_class_instance'"},
'class_instance': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['catmaid.ClassInstance']"}),
'creation_time': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'edition_time': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['catmaid.Project']"}),
'relation': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['catmaid.Relation']"}),
'treenode': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['catmaid.Treenode']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"})
},
u'catmaid.treenodeconnector': {
'Meta': {'object_name': 'TreenodeConnector', 'db_table': "'treenode_connector'"},
'confidence': ('django.db.models.fields.IntegerField', [], {'default': '5'}),
'connector': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['catmaid.Connector']"}),
'creation_time': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'edition_time': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'project': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['catmaid.Project']"}),
'relation': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['catmaid.Relation']"}),
'skeleton': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['catmaid.ClassInstance']"}),
'treenode': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['catmaid.Treenode']"}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"})
},
u'catmaid.userprofile': {
'Meta': {'object_name': 'UserProfile'},
'color': ('catmaid.fields.RGBAField', [], {'default': '(0.8122197914467499, 1.0, 0.9295521795841548, 1)'}),
'display_stack_reference_lines': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'independent_ontology_workspace_is_default': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'inverse_mouse_wheel': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'show_cropping_tool': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'show_ontology_tool': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'show_roi_tool': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'show_segmentation_tool': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'show_tagging_tool': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'show_text_label_tool': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'show_tracing_tool': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'tracing_overlay_scale': ('django.db.models.fields.FloatField', [], {'default': '1'}),
'tracing_overlay_screen_scaling': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['auth.User']", 'unique': 'True'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
}
}
complete_apps = ['catmaid'] | gpl-3.0 |
russss/Diamond | src/collectors/apcupsd/apcupsd.py | 31 | 3108 | # coding=utf-8
"""
Collects the complete status of most American Power Conversion Corp. (APC) UPSes
provided you have the apcupsd daemon installed, properly configured and
running. It can access status information from any APC UPS attached to the
localhost or attached to any computer on the network which is running
apcuspd in NIS mode.
#### Dependencies
* apcuspd in NIS mode
"""
import diamond.collector
import socket
from struct import pack
import re
import time
class ApcupsdCollector(diamond.collector.Collector):
def get_default_config_help(self):
config_help = super(ApcupsdCollector, self).get_default_config_help()
config_help.update({
'hostname': 'Hostname to collect from',
'port': 'port to collect from. defaults to 3551',
'metrics':
'List of metrics. Valid metric keys can be found [here]' +
'(http://www.apcupsd.com/manual/' +
'manual.html#status-report-fields)'
})
return config_help
def get_default_config(self):
"""
Returns the default collector settings
"""
config = super(ApcupsdCollector, self).get_default_config()
config.update({
'path': 'apcupsd',
'hostname': 'localhost',
'port': 3551,
'metrics': ['LINEV', 'LOADPCT', 'BCHARGE', 'TIMELEFT', 'BATTV',
'NUMXFERS', 'TONBATT', 'MAXLINEV', 'MINLINEV',
'OUTPUTV', 'ITEMP', 'LINEFREQ', 'CUMONBATT', ],
})
return config
def getData(self):
# Get the data via TCP stream
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect((self.config['hostname'], int(self.config['port'])))
# Packet is pad byte, size byte, and command
s.send(pack('xb6s', 6, 'status'))
# Ditch the header
s.recv(1024)
time.sleep(.25)
data = s.recv(4096)
# We're done. Close the socket
s.close()
return data
def collect(self):
metrics = {}
raw = {}
data = self.getData()
data = data.split('\n\x00')
for d in data:
matches = re.search("([A-Z]+)\s+:\s+(.*)$", d)
if matches:
value = matches.group(2).strip()
raw[matches.group(1)] = matches.group(2).strip()
vmatch = re.search("([0-9.]+)", value)
if not vmatch:
continue
try:
value = float(vmatch.group(1))
except ValueError:
continue
metrics[matches.group(1)] = value
for metric in self.config['metrics']:
if metric not in metrics:
continue
metric_name = "%s.%s" % (raw['UPSNAME'], metric)
value = metrics[metric]
if metric in ['TONBATT', 'CUMONBATT', 'NUMXFERS']:
value = self.derivative(metric_name, metrics[metric])
self.publish(metric_name, value)
return True
| mit |
petewarden/tensorflow | tensorflow/python/debug/cli/cli_shared.py | 25 | 15940 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Shared functions and classes for tfdbg command-line interface."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
import numpy as np
import six
from tensorflow.python.debug.cli import command_parser
from tensorflow.python.debug.cli import debugger_cli_common
from tensorflow.python.debug.cli import tensor_format
from tensorflow.python.debug.lib import common
from tensorflow.python.framework import ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import gfile
RL = debugger_cli_common.RichLine
# Default threshold number of elements above which ellipses will be used
# when printing the value of the tensor.
DEFAULT_NDARRAY_DISPLAY_THRESHOLD = 2000
COLOR_BLACK = "black"
COLOR_BLUE = "blue"
COLOR_CYAN = "cyan"
COLOR_GRAY = "gray"
COLOR_GREEN = "green"
COLOR_MAGENTA = "magenta"
COLOR_RED = "red"
COLOR_WHITE = "white"
COLOR_YELLOW = "yellow"
TIME_UNIT_US = "us"
TIME_UNIT_MS = "ms"
TIME_UNIT_S = "s"
TIME_UNITS = [TIME_UNIT_US, TIME_UNIT_MS, TIME_UNIT_S]
def bytes_to_readable_str(num_bytes, include_b=False):
"""Generate a human-readable string representing number of bytes.
The units B, kB, MB and GB are used.
Args:
num_bytes: (`int` or None) Number of bytes.
include_b: (`bool`) Include the letter B at the end of the unit.
Returns:
(`str`) A string representing the number of bytes in a human-readable way,
including a unit at the end.
"""
if num_bytes is None:
return str(num_bytes)
if num_bytes < 1024:
result = "%d" % num_bytes
elif num_bytes < 1048576:
result = "%.2fk" % (num_bytes / 1024.0)
elif num_bytes < 1073741824:
result = "%.2fM" % (num_bytes / 1048576.0)
else:
result = "%.2fG" % (num_bytes / 1073741824.0)
if include_b:
result += "B"
return result
def time_to_readable_str(value_us, force_time_unit=None):
"""Convert time value to human-readable string.
Args:
value_us: time value in microseconds.
force_time_unit: force the output to use the specified time unit. Must be
in TIME_UNITS.
Returns:
Human-readable string representation of the time value.
Raises:
ValueError: if force_time_unit value is not in TIME_UNITS.
"""
if not value_us:
return "0"
if force_time_unit:
if force_time_unit not in TIME_UNITS:
raise ValueError("Invalid time unit: %s" % force_time_unit)
order = TIME_UNITS.index(force_time_unit)
time_unit = force_time_unit
return "{:.10g}{}".format(value_us / math.pow(10.0, 3*order), time_unit)
else:
order = min(len(TIME_UNITS) - 1, int(math.log(value_us, 10) / 3))
time_unit = TIME_UNITS[order]
return "{:.3g}{}".format(value_us / math.pow(10.0, 3*order), time_unit)
def parse_ranges_highlight(ranges_string):
"""Process ranges highlight string.
Args:
ranges_string: (str) A string representing a numerical range of a list of
numerical ranges. See the help info of the -r flag of the print_tensor
command for more details.
Returns:
An instance of tensor_format.HighlightOptions, if range_string is a valid
representation of a range or a list of ranges.
"""
ranges = None
def ranges_filter(x):
r = np.zeros(x.shape, dtype=bool)
for range_start, range_end in ranges:
r = np.logical_or(r, np.logical_and(x >= range_start, x <= range_end))
return r
if ranges_string:
ranges = command_parser.parse_ranges(ranges_string)
return tensor_format.HighlightOptions(
ranges_filter, description=ranges_string)
else:
return None
def numpy_printoptions_from_screen_info(screen_info):
if screen_info and "cols" in screen_info:
return {"linewidth": screen_info["cols"]}
else:
return {}
def format_tensor(tensor,
tensor_name,
np_printoptions,
print_all=False,
tensor_slicing=None,
highlight_options=None,
include_numeric_summary=False,
write_path=None):
"""Generate formatted str to represent a tensor or its slices.
Args:
tensor: (numpy ndarray) The tensor value.
tensor_name: (str) Name of the tensor, e.g., the tensor's debug watch key.
np_printoptions: (dict) Numpy tensor formatting options.
print_all: (bool) Whether the tensor is to be displayed in its entirety,
instead of printing ellipses, even if its number of elements exceeds
the default numpy display threshold.
(Note: Even if this is set to true, the screen output can still be cut
off by the UI frontend if it consist of more lines than the frontend
can handle.)
tensor_slicing: (str or None) Slicing of the tensor, e.g., "[:, 1]". If
None, no slicing will be performed on the tensor.
highlight_options: (tensor_format.HighlightOptions) options to highlight
elements of the tensor. See the doc of tensor_format.format_tensor()
for more details.
include_numeric_summary: Whether a text summary of the numeric values (if
applicable) will be included.
write_path: A path to save the tensor value (after any slicing) to
(optional). `numpy.save()` is used to save the value.
Returns:
An instance of `debugger_cli_common.RichTextLines` representing the
(potentially sliced) tensor.
"""
if tensor_slicing:
# Validate the indexing.
value = command_parser.evaluate_tensor_slice(tensor, tensor_slicing)
sliced_name = tensor_name + tensor_slicing
else:
value = tensor
sliced_name = tensor_name
auxiliary_message = None
if write_path:
with gfile.Open(write_path, "wb") as output_file:
np.save(output_file, value)
line = debugger_cli_common.RichLine("Saved value to: ")
line += debugger_cli_common.RichLine(write_path, font_attr="bold")
line += " (%sB)" % bytes_to_readable_str(gfile.Stat(write_path).length)
auxiliary_message = debugger_cli_common.rich_text_lines_from_rich_line_list(
[line, debugger_cli_common.RichLine("")])
if print_all:
np_printoptions["threshold"] = value.size
else:
np_printoptions["threshold"] = DEFAULT_NDARRAY_DISPLAY_THRESHOLD
return tensor_format.format_tensor(
value,
sliced_name,
include_metadata=True,
include_numeric_summary=include_numeric_summary,
auxiliary_message=auxiliary_message,
np_printoptions=np_printoptions,
highlight_options=highlight_options)
def error(msg):
"""Generate a RichTextLines output for error.
Args:
msg: (str) The error message.
Returns:
(debugger_cli_common.RichTextLines) A representation of the error message
for screen output.
"""
return debugger_cli_common.rich_text_lines_from_rich_line_list([
RL("ERROR: " + msg, COLOR_RED)])
def _recommend_command(command, description, indent=2, create_link=False):
"""Generate a RichTextLines object that describes a recommended command.
Args:
command: (str) The command to recommend.
description: (str) A description of what the command does.
indent: (int) How many spaces to indent in the beginning.
create_link: (bool) Whether a command link is to be applied to the command
string.
Returns:
(RichTextLines) Formatted text (with font attributes) for recommending the
command.
"""
indent_str = " " * indent
if create_link:
font_attr = [debugger_cli_common.MenuItem("", command), "bold"]
else:
font_attr = "bold"
lines = [RL(indent_str) + RL(command, font_attr) + ":",
indent_str + " " + description]
return debugger_cli_common.rich_text_lines_from_rich_line_list(lines)
def get_tfdbg_logo():
"""Make an ASCII representation of the tfdbg logo."""
lines = [
"",
"TTTTTT FFFF DDD BBBB GGG ",
" TT F D D B B G ",
" TT FFF D D BBBB G GG",
" TT F D D B B G G",
" TT F DDD BBBB GGG ",
"",
]
return debugger_cli_common.RichTextLines(lines)
_HORIZONTAL_BAR = "======================================"
def get_run_start_intro(run_call_count,
fetches,
feed_dict,
tensor_filters,
is_callable_runner=False):
"""Generate formatted intro for run-start UI.
Args:
run_call_count: (int) Run call counter.
fetches: Fetches of the `Session.run()` call. See doc of `Session.run()`
for more details.
feed_dict: Feeds to the `Session.run()` call. See doc of `Session.run()`
for more details.
tensor_filters: (dict) A dict from tensor-filter name to tensor-filter
callable.
is_callable_runner: (bool) whether a runner returned by
Session.make_callable is being run.
Returns:
(RichTextLines) Formatted intro message about the `Session.run()` call.
"""
fetch_lines = common.get_flattened_names(fetches)
if not feed_dict:
feed_dict_lines = [debugger_cli_common.RichLine(" (Empty)")]
else:
feed_dict_lines = []
for feed_key in feed_dict:
feed_key_name = common.get_graph_element_name(feed_key)
feed_dict_line = debugger_cli_common.RichLine(" ")
feed_dict_line += debugger_cli_common.RichLine(
feed_key_name,
debugger_cli_common.MenuItem(None, "pf '%s'" % feed_key_name))
# Surround the name string with quotes, because feed_key_name may contain
# spaces in some cases, e.g., SparseTensors.
feed_dict_lines.append(feed_dict_line)
feed_dict_lines = debugger_cli_common.rich_text_lines_from_rich_line_list(
feed_dict_lines)
out = debugger_cli_common.RichTextLines(_HORIZONTAL_BAR)
if is_callable_runner:
out.append("Running a runner returned by Session.make_callable()")
else:
out.append("Session.run() call #%d:" % run_call_count)
out.append("")
out.append("Fetch(es):")
out.extend(debugger_cli_common.RichTextLines(
[" " + line for line in fetch_lines]))
out.append("")
out.append("Feed dict:")
out.extend(feed_dict_lines)
out.append(_HORIZONTAL_BAR)
out.append("")
out.append("Select one of the following commands to proceed ---->")
out.extend(
_recommend_command(
"run",
"Execute the run() call with debug tensor-watching",
create_link=True))
out.extend(
_recommend_command(
"run -n",
"Execute the run() call without debug tensor-watching",
create_link=True))
out.extend(
_recommend_command(
"run -t <T>",
"Execute run() calls (T - 1) times without debugging, then "
"execute run() once more with debugging and drop back to the CLI"))
out.extend(
_recommend_command(
"run -f <filter_name>",
"Keep executing run() calls until a dumped tensor passes a given, "
"registered filter (conditional breakpoint mode)"))
more_lines = [" Registered filter(s):"]
if tensor_filters:
filter_names = []
for filter_name in tensor_filters:
filter_names.append(filter_name)
command_menu_node = debugger_cli_common.MenuItem(
"", "run -f %s" % filter_name)
more_lines.append(RL(" * ") + RL(filter_name, command_menu_node))
else:
more_lines.append(" (None)")
out.extend(
debugger_cli_common.rich_text_lines_from_rich_line_list(more_lines))
out.append("")
out.append_rich_line(RL("For more details, see ") +
RL("help.", debugger_cli_common.MenuItem("", "help")) +
".")
out.append("")
# Make main menu for the run-start intro.
menu = debugger_cli_common.Menu()
menu.append(debugger_cli_common.MenuItem("run", "run"))
menu.append(debugger_cli_common.MenuItem("exit", "exit"))
out.annotations[debugger_cli_common.MAIN_MENU_KEY] = menu
return out
def get_run_short_description(run_call_count,
fetches,
feed_dict,
is_callable_runner=False):
"""Get a short description of the run() call.
Args:
run_call_count: (int) Run call counter.
fetches: Fetches of the `Session.run()` call. See doc of `Session.run()`
for more details.
feed_dict: Feeds to the `Session.run()` call. See doc of `Session.run()`
for more details.
is_callable_runner: (bool) whether a runner returned by
Session.make_callable is being run.
Returns:
(str) A short description of the run() call, including information about
the fetche(s) and feed(s).
"""
if is_callable_runner:
return "runner from make_callable()"
description = "run #%d: " % run_call_count
if isinstance(fetches, (ops.Tensor, ops.Operation, variables.Variable)):
description += "1 fetch (%s); " % common.get_graph_element_name(fetches)
else:
# Could be (nested) list, tuple, dict or namedtuple.
num_fetches = len(common.get_flattened_names(fetches))
if num_fetches > 1:
description += "%d fetches; " % num_fetches
else:
description += "%d fetch; " % num_fetches
if not feed_dict:
description += "0 feeds"
else:
if len(feed_dict) == 1:
for key in feed_dict:
description += "1 feed (%s)" % (
key if isinstance(key, six.string_types) or not hasattr(key, "name")
else key.name)
else:
description += "%d feeds" % len(feed_dict)
return description
def get_error_intro(tf_error):
"""Generate formatted intro for TensorFlow run-time error.
Args:
tf_error: (errors.OpError) TensorFlow run-time error object.
Returns:
(RichTextLines) Formatted intro message about the run-time OpError, with
sample commands for debugging.
"""
if hasattr(tf_error, "op") and hasattr(tf_error.op, "name"):
op_name = tf_error.op.name
else:
op_name = None
intro_lines = [
"--------------------------------------",
RL("!!! An error occurred during the run !!!", "blink"),
"",
]
out = debugger_cli_common.rich_text_lines_from_rich_line_list(intro_lines)
if op_name is not None:
out.extend(debugger_cli_common.RichTextLines(
["You may use the following commands to debug:"]))
out.extend(
_recommend_command("ni -a -d -t %s" % op_name,
"Inspect information about the failing op.",
create_link=True))
out.extend(
_recommend_command("li -r %s" % op_name,
"List inputs to the failing op, recursively.",
create_link=True))
out.extend(
_recommend_command(
"lt",
"List all tensors dumped during the failing run() call.",
create_link=True))
else:
out.extend(debugger_cli_common.RichTextLines([
"WARNING: Cannot determine the name of the op that caused the error."]))
more_lines = [
"",
"Op name: %s" % op_name,
"Error type: " + str(type(tf_error)),
"",
"Details:",
str(tf_error),
"",
"--------------------------------------",
"",
]
out.extend(debugger_cli_common.RichTextLines(more_lines))
return out
| apache-2.0 |
yannikbehr/spectroscopy | src/spectroscopy/flux/watcher.py | 2 | 17856 | #Copyright (C) Nial Peters 2014
#
#This file is part of plumetrack.
#
#plumetrack is free software: you can redistribute it and/or modify
#it under the terms of the GNU General Public License as published by
#the Free Software Foundation, either version 3 of the License, or
#(at your option) any later version.
#
#plumetrack is distributed in the hope that it will be useful,
#but WITHOUT ANY WARRANTY; without even the implied warranty of
#MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
#GNU General Public License for more details.
#
#You should have received a copy of the GNU General Public License
#along with plumetrack. If not, see <http://www.gnu.org/licenses/>.
#
#Large parts of this implementation (in particular the Windows
#implementation) is copied from Wim Leers' fsmonitor.py module:
#https://github.com/wimleers/fileconveyor/blob/master/fileconveyor/fsmonitor.py
#this is licensed under GPL.
"""
The watcher module provides functions and classes for monitoring a directory
structure for new files. It uses system-level functions for detecting new files
(rather than repeatedly listing the files in a directory and looking for
changes).
The following code shows how the watcher module can be used to print the names
of any new files created in a directory:
from plumetrack import watcher
import sys
import time
def print_name(s, t):
print s
if not watcher.can_watch_directories():
print "No directory watching implementation available."
sys.exit()
dir_watcher = watcher.create_dir_watcher("my_directory", False, print_name)
dir_watcher.start()
#during the next minute, the names of any files created in "my_directory"
#will be printed
time.sleep(60)
dir_watcher.stop()
"""
import sys
import os.path
import datetime
import calendar
import time
import threading
import Queue
def can_watch_directories():
"""
Returns true if there is a directory watching implementation for
this system type.
"""
#because this function takes a long time to complete - we cache the result
#and return that for future calls
if not hasattr(can_watch_directories, 'result_cache'):
#check if we have better than 1 second resolution on the system clock
t1 = datetime.datetime.utcnow()
time.sleep(0.5)
t2 = datetime.datetime.utcnow()
if abs((0.5 - (t2 - t1).microseconds * 1e-6)) > 0.1:
result = False
#check if the modules needed for directory watching are installed.
#For Linux systems we use the inotify Python bindings and for Windows
#we use the win32 Python module.
if sys.platform == 'linux2':
try:
import pyinotify
result = True
except ImportError:
result = False
elif sys.platform == 'win32':
try:
import win32file
import win32con
result = True
except ImportError:
result = False
else:
result = False
#cache the result so that future calls to this function return faster
can_watch_directories.result_cache = result
return can_watch_directories.result_cache
def create_dir_watcher(dir_name, recursive, func, *args, **kwargs):
"""
Returns a watcher class suitable for this system type, or
None if no implementation is available.
* dir_name - path to directory to be watched
* recursive - boolean specifying whether to watch subdirectories as well.
* func - callable which will be called each time a new file is detected,
this will be passed the the full path to the created file, the
creation time of the file (a datetime object), and any additional
args or kwargs specified.
* args/kwargs - additional arguments to pass to func.
"""
if can_watch_directories():
if sys.platform == 'linux2':
return LinuxDirectoryWatcher(dir_name, func, recursive, *args, **kwargs)
elif sys.platform == 'win32':
return WindowsDirectoryWatcher(dir_name, func, recursive, *args, **kwargs)
else:
raise RuntimeError, "Failed to create DirectoryWatcher. Unsupported OS"
else:
return None
class _DirectoryWatcherBase:
"""
Base class for watcher classes.
"""
def __init__(self, dir_name, func, recursive, *args, **kwargs):
self.dir_to_watch = dir_name
self.__func = func
self.__args = args
self.__kwargs = kwargs
def _on_new_file(self, filename, time_):
self.__func(filename, time_, *self.__args, **self.__kwargs)
def start(self):
raise NotImplementedError
def stop(self):
raise NotImplementedError
try:
import pyinotify
class LinuxDirectoryWatcher(_DirectoryWatcherBase,pyinotify.ProcessEvent):
"""
Watcher class using inotify.
"""
def __init__(self, dir_name, func, recursive, *args, **kwargs):
#note that you are not supposed to override the __init__ method
# of the ProcessEvent class, however, in order to pass args
# through to the DirWatcherBase class I need to.
pyinotify.ProcessEvent.__init__(self)
_DirectoryWatcherBase.__init__(self, dir_name, func, recursive, *args, **kwargs)
watch_manager = pyinotify.WatchManager()
mask = pyinotify.EventsCodes.OP_FLAGS['IN_CREATE'] | pyinotify.EventsCodes.OP_FLAGS['IN_CLOSE_WRITE']
watch_manager.add_watch(dir_name, mask, rec=recursive, auto_add=recursive)
self.notifier = pyinotify.ThreadedNotifier(watch_manager, self)
self.__created_files = {}
def start(self):
self.notifier.start()
def stop(self):
self.notifier.stop()
def process_IN_CREATE(self, event):
#we want to make sure that the process that created the new file has
#finished writing to it before we call the on_new_file function, otherwise
#we may end up reading a partly written file. To do this, we store the
#creation time of each file in a dictionary, and then wait for the in_close_write
#event to trigger before calling the on_new_file function. This also means
#that the on_new_file function will only be called if the file is newly created,
#editing an existing file will not result in on_new_file being called.
t = datetime.datetime.utcnow()
self.__created_files[os.path.join(event.path, event.name)] = calendar.timegm(t.timetuple()) + t.microsecond*1e-6
def process_IN_CLOSE_WRITE(self, event):
try:
filename = os.path.join(event.path, event.name)
creation_time = self.__created_files.pop(filename)
self._on_new_file(filename,creation_time)
except KeyError:
#file has only been edited, not newly created
pass
except ImportError:
pass
try:
import win32file
import winerror
import win32con
import pywintypes
import ctypes
class FSMonitorError(Exception):
pass
class FSMonitorOSError(OSError, FSMonitorError):
pass
class FSEvent(object):
def __init__(self, watch, action, name=""):
self.watch = watch
self.name = name
self.action = action
@property
def path(self):
return self.watch.path
@property
def user(self):
return self.watch.user
Access = 0x01
Modify = 0x02
Attrib = 0x04
Create = 0x08
Delete = 0x10
DeleteSelf = 0x20
MoveFrom = 0x40
MoveTo = 0x80
All = 0xFF
class FSMonitorWindowsError(FSMonitorError):
pass
class FSMonitorWatch(object):
def __init__(self, path, flags, recursive):
self.path = path
self.enabled = True
self._recursive = recursive
self._win32_flags = flags
self._key = None
self._hDir = None
self._hDir = win32file.CreateFile(
path,
0x0001,
win32con.FILE_SHARE_READ | win32con.FILE_SHARE_WRITE | win32con.FILE_SHARE_DELETE,
None,
win32con.OPEN_EXISTING,
win32con.FILE_FLAG_BACKUP_SEMANTICS | win32con.FILE_FLAG_OVERLAPPED,
None)
self._overlapped = pywintypes.OVERLAPPED()
self._buf = ctypes.create_string_buffer(1024)
self._removed = False
def __del__(self):
close_watch(self)
def __repr__(self):
return "<FSMonitorWatch %r>" % self.path
def close_watch(watch):
if watch._hDir is not None:
win32file.CancelIo(watch._hDir)
win32file.CloseHandle(watch._hDir)
watch._hDir = None
def read_changes(watch):
win32file.ReadDirectoryChangesW (
watch._hDir,
watch._buf,
True,
win32con.FILE_NOTIFY_CHANGE_FILE_NAME |
win32con.FILE_NOTIFY_CHANGE_LAST_WRITE,
watch._overlapped,
None
)
def process_events(watch, num):
for action, name in win32file.FILE_NOTIFY_INFORMATION(watch._buf.raw, num):
if action is not None and (action & win32con.FILE_NOTIFY_CHANGE_FILE_NAME | win32con.FILE_NOTIFY_CHANGE_LAST_WRITE):
yield FSEvent(watch, action, name)
try:
read_changes(watch)
except pywintypes.error, e:
if e.args[0] == 5:
close_watch(watch)
yield FSEvent(watch, FSEvent.DeleteSelf)
else:
raise FSMonitorWindowsError(*e.args)
class FSMonitor(object):
def __init__(self):
self.__key_to_watch = {}
self.__last_key = 0
self.__lock = threading.Lock()
self.__cphandle = win32file.CreateIoCompletionPort(-1, None, 0, 0)
def __del__(self):
self.close()
def close(self):
if self.__cphandle is not None:
win32file.CloseHandle(self.__cphandle)
self.__cphandle = None
def add_dir_watch(self, path, flags, recursive=False):
try:
flags |= FSEvent.DeleteSelf
watch = FSMonitorWatch(path, flags, recursive)
with self.__lock:
key = self.__last_key
win32file.CreateIoCompletionPort(watch._hDir, self.__cphandle, key, 0)
self.__last_key += 1
read_changes(watch)
watch._key = key
self.__key_to_watch[key] = watch
return watch
except pywintypes.error, e:
raise FSMonitorWindowsError(*e.args)
def __remove_watch(self, watch):
if not watch._removed:
try:
watch._removed = True
close_watch(watch)
return True
except pywintypes.error:
pass
return False
def remove_all_watches(self):
with self.__lock:
for watch in self.__key_to_watch.itervalues():
self.__remove_watch(watch)
def read_events(self):
try:
events = []
rc, num, key, _ = win32file.GetQueuedCompletionStatus(self.__cphandle, 1000)
if rc == 0:
with self.__lock:
watch = self.__key_to_watch.get(key)
if watch is not None and watch.enabled and not watch._removed:
for evt in process_events(watch, num):
events.append(evt)
elif rc == 5:
with self.__lock:
watch = self.__key_to_watch.get(key)
if watch is not None and watch.enabled:
close_watch(watch)
del self.__key_to_watch[key]
events.append(FSEvent(watch, FSEvent.DeleteSelf))
return events
except pywintypes.error, e:
raise FSMonitorWindowsError(*e.args)
class WindowsDirectoryWatcher(_DirectoryWatcherBase):
def __init__(self, dir_name, func, recursive, *args, **kwargs):
_DirectoryWatcherBase.__init__(self, dir_name, func, recursive, *args, **kwargs)
self.recursive = recursive
self.worker_thread = None
self.processing_thread = None
self.stay_alive = True
self.monitor = FSMonitor()
self.__created_files = {}
self.__new_files_q = Queue.Queue()
def start(self):
self.stay_alive = True
self.monitor.add_dir_watch(self.dir_to_watch, win32con.FILE_NOTIFY_CHANGE_FILE_NAME | win32con.FILE_NOTIFY_CHANGE_LAST_WRITE, recursive=self.recursive)
self.worker_thread = threading.Thread(target=self._do_watching)
self.processing_thread = threading.Thread(target=self.__do_processing)
self.worker_thread.start()
self.processing_thread.start()
def _do_watching(self):
while self.stay_alive:
#try:
for event in self.monitor.read_events():
file_path = os.path.join(self.dir_to_watch, event.name)
if event.action == 1: #file creation event
t = datetime.datetime.utcnow()
self.__created_files[file_path] = calendar.timegm(t.timetuple()) + t.microsecond*1e-6
self.__new_files_q.put(file_path)
if event.action == 3: #file update event
if not self.__created_files.has_key(file_path):
#file has just been modified - not newly created
continue
def __do_processing(self):
"""
Wait for the newly created file to be closed and then run the
processing function on it (this is required since there is no such
thing as a close_write event on Windows)
"""
while self.stay_alive:
path = self.__new_files_q.get()
if not path: #probably an exit request
continue
file_is_closed = False
while not file_is_closed and self.stay_alive:
#try to open the file for reading, specifying exclusive access
#this will fail if the file is already open in an another process
try:
handle = win32file.CreateFile(
path,
win32file.GENERIC_READ,
0, #share mode - 0 == no sharing
None,
win32file.OPEN_ALWAYS,
win32file.FILE_ATTRIBUTE_NORMAL,
None)
handle.close()
file_is_closed = True
except pywintypes.error, e:
if e[0] == winerror.ERROR_SHARING_VIOLATION:
time.sleep(0.01)
else:
raise
if not self.stay_alive:
return
self._on_new_file(path, self.__created_files[path])
def stop(self):
self.stay_alive = False
self.monitor.remove_all_watches()
if self.worker_thread is not None:
self.worker_thread.join()
if self.processing_thread is not None:
self.__new_files_q.put(None)
self.processing_thread.join()
#empty the new_files queue
try:
while True:
self.__new_files_q.get_nowait()
except Queue.Empty:
pass
self.worker_thread = None
self.processing_thread = None
except ImportError:
pass
| gpl-3.0 |
nico202/pyNeMo | WorkerInit.py | 1 | 3502 | #!/usr/bin/env python2
# A worker. Waits for incoming connection on port 10666
# When data are received, process them, and sends back to host (using requests), port 10665
import web #Listen for requests
from libs.web import ip_port, get_self_ip
web.config.debug = False
import requests #Answer to requests when data are processed
import json #Send data
import dill
from uuid import getnode as get_mac #Identify machine, debugging purpose
#This will be removed, all these part should be managed by HistoryExplorer
from multiprocessing import cpu_count
from HistoryExplorer import main_loop
from libs.multiProcess import dispatch_jobs
web.config.debug = False
urls = (
'/', 'index'
, '/cores', 'cores' #Return number of available cores (add --cores cli)
, '/append', 'append' #Append a work to the queue
, '/init', 'init' #Start all works in the queue
, '/start', 'start' #Start newly appended
)
master_ip = ""
class cores:
def GET(self):
import config
global master_ip
master_ip = web.ctx["ip"]
multiplier = 1 if not hasattr(config, 'MULTIPLIER') else config.MULTIPLIER
return json.dumps(
{
"cores": cpu_count()
, "multiplier": multiplier})
class workQueue:
def __init__(self):
self.workqueue = []
def append(self, data):
self.workqueue.append(data)
def pop(self):
try:
ret = self.workqueue.pop()
except IndexError:
ret = False
return ret
class append:
def POST(self):
global Work
import dill
loaded = dill.loads(web.data())
Work.append(loaded)
return True #Allow error codes
class start:
#Start one only
#It's multiprocessing since web.py provides it, and
#We are spawning 1 new process as soon as one finish
def POST(self):
global titles
next_work = Work.pop()
outputs = []; titles = []
if next_work:
for i in next_work: #Should be just 1
outputs.append(next[i]["data"])
titles.append(next_work[i]["title"])
main_loop(titles, web.ctx['ip'], outputs)
class init: #Maybe should be a GET?
#Use multiprocessing implemented by me
#(thanks to StackO)
def POST(self):
global Work
global master_ip
cpu_number = cpu_count()
outputs = []
titles = []
for i in range(cpu_number):
next_work = Work.pop()
if next_work:
outputs.append(next_work[i]["data"])
titles.append(next_work[i]["title"])
#check if we are really remote or same machine
dispatch_jobs(titles, cpu_number, remote = master_ip, in_data = outputs)
return True
class index:
def GET(self):
return "Hello, world!"
def POST(self): #Input: {filename: raw_compressed_data}
print(web.input())
return {mac: "starting"}
class Worker(web.application):
def run(self, port=8080, *middleware):
func = self.wsgifunc(*middleware)
return web.httpserver.runsimple(func, ('0.0.0.0', port))
class RequestHandler():
def POST():
data = web.data() # you can get data use this method
port = 10666
mac = str(get_mac())
Work = workQueue()
if __name__ == "__main__":
app = Worker(urls, globals())
print("Waiting host connection on: %s:%s"
%
(get_self_ip(), port))
app.run(port)
| gpl-2.0 |
rotofly/odoo | addons/membership/wizard/membership_invoice.py | 380 | 3229 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv
import openerp.addons.decimal_precision as dp
class membership_invoice(osv.osv_memory):
"""Membership Invoice"""
_name = "membership.invoice"
_description = "Membership Invoice"
_columns = {
'product_id': fields.many2one('product.product','Membership', required=True),
'member_price': fields.float('Member Price', digits_compute= dp.get_precision('Product Price'), required=True),
}
def onchange_product(self, cr, uid, ids, product_id=False):
"""This function returns value of product's member price based on product id.
"""
if not product_id:
return {'value': {'member_price': False}}
return {'value': {'member_price': self.pool.get('product.product').price_get(cr, uid, [product_id])[product_id]}}
def membership_invoice(self, cr, uid, ids, context=None):
mod_obj = self.pool.get('ir.model.data')
partner_obj = self.pool.get('res.partner')
datas = {}
if context is None:
context = {}
data = self.browse(cr, uid, ids, context=context)
if data:
data = data[0]
datas = {
'membership_product_id': data.product_id.id,
'amount': data.member_price
}
invoice_list = partner_obj.create_membership_invoice(cr, uid, context.get('active_ids', []), datas=datas, context=context)
try:
search_view_id = mod_obj.get_object_reference(cr, uid, 'account', 'view_account_invoice_filter')[1]
except ValueError:
search_view_id = False
try:
form_view_id = mod_obj.get_object_reference(cr, uid, 'account', 'invoice_form')[1]
except ValueError:
form_view_id = False
return {
'domain': [('id', 'in', invoice_list)],
'name': 'Membership Invoices',
'view_type': 'form',
'view_mode': 'tree,form',
'res_model': 'account.invoice',
'type': 'ir.actions.act_window',
'views': [(False, 'tree'), (form_view_id, 'form')],
'search_view_id': search_view_id,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
jpautom/scikit-learn | examples/text/document_clustering.py | 230 | 8356 | """
=======================================
Clustering text documents using k-means
=======================================
This is an example showing how the scikit-learn can be used to cluster
documents by topics using a bag-of-words approach. This example uses
a scipy.sparse matrix to store the features instead of standard numpy arrays.
Two feature extraction methods can be used in this example:
- TfidfVectorizer uses a in-memory vocabulary (a python dict) to map the most
frequent words to features indices and hence compute a word occurrence
frequency (sparse) matrix. The word frequencies are then reweighted using
the Inverse Document Frequency (IDF) vector collected feature-wise over
the corpus.
- HashingVectorizer hashes word occurrences to a fixed dimensional space,
possibly with collisions. The word count vectors are then normalized to
each have l2-norm equal to one (projected to the euclidean unit-ball) which
seems to be important for k-means to work in high dimensional space.
HashingVectorizer does not provide IDF weighting as this is a stateless
model (the fit method does nothing). When IDF weighting is needed it can
be added by pipelining its output to a TfidfTransformer instance.
Two algorithms are demoed: ordinary k-means and its more scalable cousin
minibatch k-means.
Additionally, latent sematic analysis can also be used to reduce dimensionality
and discover latent patterns in the data.
It can be noted that k-means (and minibatch k-means) are very sensitive to
feature scaling and that in this case the IDF weighting helps improve the
quality of the clustering by quite a lot as measured against the "ground truth"
provided by the class label assignments of the 20 newsgroups dataset.
This improvement is not visible in the Silhouette Coefficient which is small
for both as this measure seem to suffer from the phenomenon called
"Concentration of Measure" or "Curse of Dimensionality" for high dimensional
datasets such as text data. Other measures such as V-measure and Adjusted Rand
Index are information theoretic based evaluation scores: as they are only based
on cluster assignments rather than distances, hence not affected by the curse
of dimensionality.
Note: as k-means is optimizing a non-convex objective function, it will likely
end up in a local optimum. Several runs with independent random init might be
necessary to get a good convergence.
"""
# Author: Peter Prettenhofer <peter.prettenhofer@gmail.com>
# Lars Buitinck <L.J.Buitinck@uva.nl>
# License: BSD 3 clause
from __future__ import print_function
from sklearn.datasets import fetch_20newsgroups
from sklearn.decomposition import TruncatedSVD
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.feature_extraction.text import HashingVectorizer
from sklearn.feature_extraction.text import TfidfTransformer
from sklearn.pipeline import make_pipeline
from sklearn.preprocessing import Normalizer
from sklearn import metrics
from sklearn.cluster import KMeans, MiniBatchKMeans
import logging
from optparse import OptionParser
import sys
from time import time
import numpy as np
# Display progress logs on stdout
logging.basicConfig(level=logging.INFO,
format='%(asctime)s %(levelname)s %(message)s')
# parse commandline arguments
op = OptionParser()
op.add_option("--lsa",
dest="n_components", type="int",
help="Preprocess documents with latent semantic analysis.")
op.add_option("--no-minibatch",
action="store_false", dest="minibatch", default=True,
help="Use ordinary k-means algorithm (in batch mode).")
op.add_option("--no-idf",
action="store_false", dest="use_idf", default=True,
help="Disable Inverse Document Frequency feature weighting.")
op.add_option("--use-hashing",
action="store_true", default=False,
help="Use a hashing feature vectorizer")
op.add_option("--n-features", type=int, default=10000,
help="Maximum number of features (dimensions)"
" to extract from text.")
op.add_option("--verbose",
action="store_true", dest="verbose", default=False,
help="Print progress reports inside k-means algorithm.")
print(__doc__)
op.print_help()
(opts, args) = op.parse_args()
if len(args) > 0:
op.error("this script takes no arguments.")
sys.exit(1)
###############################################################################
# Load some categories from the training set
categories = [
'alt.atheism',
'talk.religion.misc',
'comp.graphics',
'sci.space',
]
# Uncomment the following to do the analysis on all the categories
#categories = None
print("Loading 20 newsgroups dataset for categories:")
print(categories)
dataset = fetch_20newsgroups(subset='all', categories=categories,
shuffle=True, random_state=42)
print("%d documents" % len(dataset.data))
print("%d categories" % len(dataset.target_names))
print()
labels = dataset.target
true_k = np.unique(labels).shape[0]
print("Extracting features from the training dataset using a sparse vectorizer")
t0 = time()
if opts.use_hashing:
if opts.use_idf:
# Perform an IDF normalization on the output of HashingVectorizer
hasher = HashingVectorizer(n_features=opts.n_features,
stop_words='english', non_negative=True,
norm=None, binary=False)
vectorizer = make_pipeline(hasher, TfidfTransformer())
else:
vectorizer = HashingVectorizer(n_features=opts.n_features,
stop_words='english',
non_negative=False, norm='l2',
binary=False)
else:
vectorizer = TfidfVectorizer(max_df=0.5, max_features=opts.n_features,
min_df=2, stop_words='english',
use_idf=opts.use_idf)
X = vectorizer.fit_transform(dataset.data)
print("done in %fs" % (time() - t0))
print("n_samples: %d, n_features: %d" % X.shape)
print()
if opts.n_components:
print("Performing dimensionality reduction using LSA")
t0 = time()
# Vectorizer results are normalized, which makes KMeans behave as
# spherical k-means for better results. Since LSA/SVD results are
# not normalized, we have to redo the normalization.
svd = TruncatedSVD(opts.n_components)
normalizer = Normalizer(copy=False)
lsa = make_pipeline(svd, normalizer)
X = lsa.fit_transform(X)
print("done in %fs" % (time() - t0))
explained_variance = svd.explained_variance_ratio_.sum()
print("Explained variance of the SVD step: {}%".format(
int(explained_variance * 100)))
print()
###############################################################################
# Do the actual clustering
if opts.minibatch:
km = MiniBatchKMeans(n_clusters=true_k, init='k-means++', n_init=1,
init_size=1000, batch_size=1000, verbose=opts.verbose)
else:
km = KMeans(n_clusters=true_k, init='k-means++', max_iter=100, n_init=1,
verbose=opts.verbose)
print("Clustering sparse data with %s" % km)
t0 = time()
km.fit(X)
print("done in %0.3fs" % (time() - t0))
print()
print("Homogeneity: %0.3f" % metrics.homogeneity_score(labels, km.labels_))
print("Completeness: %0.3f" % metrics.completeness_score(labels, km.labels_))
print("V-measure: %0.3f" % metrics.v_measure_score(labels, km.labels_))
print("Adjusted Rand-Index: %.3f"
% metrics.adjusted_rand_score(labels, km.labels_))
print("Silhouette Coefficient: %0.3f"
% metrics.silhouette_score(X, km.labels_, sample_size=1000))
print()
if not opts.use_hashing:
print("Top terms per cluster:")
if opts.n_components:
original_space_centroids = svd.inverse_transform(km.cluster_centers_)
order_centroids = original_space_centroids.argsort()[:, ::-1]
else:
order_centroids = km.cluster_centers_.argsort()[:, ::-1]
terms = vectorizer.get_feature_names()
for i in range(true_k):
print("Cluster %d:" % i, end='')
for ind in order_centroids[i, :10]:
print(' %s' % terms[ind], end='')
print()
| bsd-3-clause |
GrumpyPaw/google-appengine-wx-launcher | launcher/app.py | 28 | 8520 | #!/usr/bin/env python
#
# Copyright 2008 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import logging
import os
import re
import sys
import urllib
import wx
import launcher
class App(wx.App):
"""The main wx.App."""
# Awkwardly, wx.App.__init__ calls OnInit().
# Keep that in mind if writing an App.__init__.
# Thus, "basic __init__() stuff" is in here.
def OnInit(self):
"""Create top-level objects (e.g. main model, view, and controller objects).
Logically similar to a 'load nib and set IBOutlets'.
Note that views (MainFrame) have pointers to controllers, and these same
controllers have pointers to the views. To break this cycle,
we have a convention:
VIEWS take controllers as input args for __init__,
CONTROLLERS have SetViewName() methods.
(Why not the other way round? Because views like to self.Bind()
to a controller at __init__, whereas controllers generally do
nothing on __init__.)
"""
self._table = None # a MainTable for our data (M)
self._project_frame = None # the main view for our projects (V)
self._task_controller = None # a TaskController (C)
self._app_controller = None # AppController, the main app controller (C)
self._InitializeLogging()
self._SetCorrectDirectory()
self._CreateModels()
self._CreateControllers()
self._CreateViews()
self._ConnectControllersToModelsViews()
self._DisplayMainFrame()
self._VersionCheck()
return True
def Initialized(self):
"""Return whether we have been initialized properly."""
# TODO(jrg): if we agree on py2.5, use all(). (My current Linux
# comes with py2.4, and it's a pain to compile wxWidgets from
# scratch on a 32/64 machine).
for attr in ('_table', '_project_frame',
'_task_controller', '_app_controller'):
if not getattr(self, attr):
return False
return True
def _InitializeLogging(self):
"""Initialize a GUI-oriented warning mechanism.
If this method isn't called, the launcher's warning mechanism
defaults to text output (to be unittest friendly). This method
redirects warnings to dialog boxes to notify the user of a
problem.
"""
logging.basicConfig(level=logging.INFO, stream=sys.stderr)
handler = launcher.DialogHandler(level=logging.WARNING)
logging.getLogger('').addHandler(handler)
def _SetCorrectDirectory(self):
"""Set the correct current directory for launcher happiness.
Some items, like the toolbar, reference icons with a relative
path.
Do nothing if this is a unit test run.
"""
if not sys.argv[0].endswith('_unittest.py'):
dirname = os.path.abspath(os.path.dirname(sys.argv[0]))
os.chdir(dirname)
def _CreateModels(self):
"""Create models (MVC) for this application."""
self._table = launcher.MainTable()
self._preferences = launcher.Preferences()
self._runtime = launcher.Runtime(preferences=self._preferences)
def _CreateControllers(self):
"""Create controllers (MVC) for this application."""
self._app_controller = launcher.AppController(self)
self._task_controller = launcher.TaskController(self._app_controller)
def _CreateViews(self):
"""Create views (MVC) for this application.
Assumes M and C have been created.
"""
self._project_frame = launcher.MainFrame(
None,
-1,
table=self._table,
preferences=self._preferences,
app_controller=self._app_controller,
task_controller=self._task_controller)
def _ConnectControllersToModelsViews(self):
"""Tell controller about views and data which may have been created later.
This prevents a cyclic dependency.
"""
self._task_controller.SetModelsViews(frame=self._project_frame,
runtime=self._runtime,
preferences=self._preferences)
self._app_controller.SetModelsViews(frame=self._project_frame,
table=self._table,
preferences=self._preferences)
def _DisplayMainFrame(self):
# Last chance to get UI up!
self._app_controller.RefreshMainView()
self._project_frame.Show()
self.SetTopWindow(self._project_frame)
def _VersionCheck(self, url=None, always_dialog=False):
"""Quick check of version; yell if mismatch.
Example format from the default URL:
release: "1.2.3"
timestamp: 1243913623
api_versions: ['1']
Args:
url: URL to find the latest version; if None, use a default.
always_dialog: If True, always bring up a dialog even if
versions match. Else only bring up a dialog on mismatch.
"""
url = url or 'http://appengine.google.com/api/updatecheck'
try:
url_file = urllib.urlopen(url)
except IOError:
new_version_data = 'cannot_contact_server'
else:
new_version_data = url_file.read()
current_version_data = self._CurrentVersionData()
# Watch out for a 404 or undefined SDK
if ((not 'api_versions' in new_version_data) or
(not 'api_versions' in current_version_data)):
if always_dialog:
logging.warning('Cannot perform proper version check.')
logging.warning(new_version_data)
logging.warning(current_version_data)
return
my_timestamp = self._TimestampFromVersionData(current_version_data)
new_timestamp = self._TimestampFromVersionData(new_version_data)
if my_timestamp < new_timestamp:
self._NewVersionNeeded(current_version_data,
new_version_data,
always_dialog)
else:
if always_dialog:
self._NoNewVersionNeeded(current_version_data)
def _TimestampFromVersionData(self, data):
"""Return an timestamp from the given VERSION data.
Returns:
timestamp as an int, or 0 if none found.
"""
for line in data.split('\n'):
if 'timestamp' in line:
try:
return int(line.split()[1])
except IndexError:
pass # lost part of our VERSION file?
except ValueError:
pass # no longer using an int as a timestamp?
return 0
def _CurrentVersionData(self):
"""Read current version data.
Returns:
Contents of the SDK's VERSION file, or an "old" version.
"""
sdk_dir = self._preferences[launcher.Preferences.PREF_APPENGINE]
if not sdk_dir:
return 'Cannot find SDK VERSION file.'
sdk_version_file = os.path.join(sdk_dir, 'VERSION')
try:
data = open(sdk_version_file).read()
return data
except IOError:
return 'release: "0"\ntimestamp: 0\napi_versions: [\'1\']'
def _NewVersionNeeded(self, old_version, new_version, always_dialog):
"""Tell the user a new version of the SDK is needed.
Args:
old_version: our version data.
new_version: the latest version data available.
always_dialog: If True, always show the dialog even if disabled
by a preference.
"""
message = """
A new version of Google App Engine is available.
Please visit http://code.google.com/appengine/downloads.html
Current:
%s
Latest:
%s
"""
if (self._preferences[launcher.Preferences.PREF_NOVERSIONCHECK] and
not always_dialog):
return
# TODO(jrg): add a checkbox to disable the update check.
# See preferences.py for info on eding your preference file.
# Add a "noversioncheck = True" line to disable it.
logging.warning(message % (old_version, new_version))
def _NoNewVersionNeeded(self, version_data):
"""Tell the user NO new version of the SDK is needed.
Args:
old_version: our version data.
"""
message = """
Your Google App Engine SDK is up to date.
Version:
%s
"""
logging.warning(message % (version_data))
def OnExit(self):
"""Called when the app will exit."""
self._task_controller.StopAll(None)
self.ExitMainLoop()
| apache-2.0 |
FireWRT/OpenWrt-Firefly-Libraries | staging_dir/target-mipsel_1004kc+dsp_uClibc-0.9.33.2/usr/lib/python2.7/formatter.py | 252 | 14911 | """Generic output formatting.
Formatter objects transform an abstract flow of formatting events into
specific output events on writer objects. Formatters manage several stack
structures to allow various properties of a writer object to be changed and
restored; writers need not be able to handle relative changes nor any sort
of ``change back'' operation. Specific writer properties which may be
controlled via formatter objects are horizontal alignment, font, and left
margin indentations. A mechanism is provided which supports providing
arbitrary, non-exclusive style settings to a writer as well. Additional
interfaces facilitate formatting events which are not reversible, such as
paragraph separation.
Writer objects encapsulate device interfaces. Abstract devices, such as
file formats, are supported as well as physical devices. The provided
implementations all work with abstract devices. The interface makes
available mechanisms for setting the properties which formatter objects
manage and inserting data into the output.
"""
import sys
AS_IS = None
class NullFormatter:
"""A formatter which does nothing.
If the writer parameter is omitted, a NullWriter instance is created.
No methods of the writer are called by NullFormatter instances.
Implementations should inherit from this class if implementing a writer
interface but don't need to inherit any implementation.
"""
def __init__(self, writer=None):
if writer is None:
writer = NullWriter()
self.writer = writer
def end_paragraph(self, blankline): pass
def add_line_break(self): pass
def add_hor_rule(self, *args, **kw): pass
def add_label_data(self, format, counter, blankline=None): pass
def add_flowing_data(self, data): pass
def add_literal_data(self, data): pass
def flush_softspace(self): pass
def push_alignment(self, align): pass
def pop_alignment(self): pass
def push_font(self, x): pass
def pop_font(self): pass
def push_margin(self, margin): pass
def pop_margin(self): pass
def set_spacing(self, spacing): pass
def push_style(self, *styles): pass
def pop_style(self, n=1): pass
def assert_line_data(self, flag=1): pass
class AbstractFormatter:
"""The standard formatter.
This implementation has demonstrated wide applicability to many writers,
and may be used directly in most circumstances. It has been used to
implement a full-featured World Wide Web browser.
"""
# Space handling policy: blank spaces at the boundary between elements
# are handled by the outermost context. "Literal" data is not checked
# to determine context, so spaces in literal data are handled directly
# in all circumstances.
def __init__(self, writer):
self.writer = writer # Output device
self.align = None # Current alignment
self.align_stack = [] # Alignment stack
self.font_stack = [] # Font state
self.margin_stack = [] # Margin state
self.spacing = None # Vertical spacing state
self.style_stack = [] # Other state, e.g. color
self.nospace = 1 # Should leading space be suppressed
self.softspace = 0 # Should a space be inserted
self.para_end = 1 # Just ended a paragraph
self.parskip = 0 # Skipped space between paragraphs?
self.hard_break = 1 # Have a hard break
self.have_label = 0
def end_paragraph(self, blankline):
if not self.hard_break:
self.writer.send_line_break()
self.have_label = 0
if self.parskip < blankline and not self.have_label:
self.writer.send_paragraph(blankline - self.parskip)
self.parskip = blankline
self.have_label = 0
self.hard_break = self.nospace = self.para_end = 1
self.softspace = 0
def add_line_break(self):
if not (self.hard_break or self.para_end):
self.writer.send_line_break()
self.have_label = self.parskip = 0
self.hard_break = self.nospace = 1
self.softspace = 0
def add_hor_rule(self, *args, **kw):
if not self.hard_break:
self.writer.send_line_break()
self.writer.send_hor_rule(*args, **kw)
self.hard_break = self.nospace = 1
self.have_label = self.para_end = self.softspace = self.parskip = 0
def add_label_data(self, format, counter, blankline = None):
if self.have_label or not self.hard_break:
self.writer.send_line_break()
if not self.para_end:
self.writer.send_paragraph((blankline and 1) or 0)
if isinstance(format, str):
self.writer.send_label_data(self.format_counter(format, counter))
else:
self.writer.send_label_data(format)
self.nospace = self.have_label = self.hard_break = self.para_end = 1
self.softspace = self.parskip = 0
def format_counter(self, format, counter):
label = ''
for c in format:
if c == '1':
label = label + ('%d' % counter)
elif c in 'aA':
if counter > 0:
label = label + self.format_letter(c, counter)
elif c in 'iI':
if counter > 0:
label = label + self.format_roman(c, counter)
else:
label = label + c
return label
def format_letter(self, case, counter):
label = ''
while counter > 0:
counter, x = divmod(counter-1, 26)
# This makes a strong assumption that lowercase letters
# and uppercase letters form two contiguous blocks, with
# letters in order!
s = chr(ord(case) + x)
label = s + label
return label
def format_roman(self, case, counter):
ones = ['i', 'x', 'c', 'm']
fives = ['v', 'l', 'd']
label, index = '', 0
# This will die of IndexError when counter is too big
while counter > 0:
counter, x = divmod(counter, 10)
if x == 9:
label = ones[index] + ones[index+1] + label
elif x == 4:
label = ones[index] + fives[index] + label
else:
if x >= 5:
s = fives[index]
x = x-5
else:
s = ''
s = s + ones[index]*x
label = s + label
index = index + 1
if case == 'I':
return label.upper()
return label
def add_flowing_data(self, data):
if not data: return
prespace = data[:1].isspace()
postspace = data[-1:].isspace()
data = " ".join(data.split())
if self.nospace and not data:
return
elif prespace or self.softspace:
if not data:
if not self.nospace:
self.softspace = 1
self.parskip = 0
return
if not self.nospace:
data = ' ' + data
self.hard_break = self.nospace = self.para_end = \
self.parskip = self.have_label = 0
self.softspace = postspace
self.writer.send_flowing_data(data)
def add_literal_data(self, data):
if not data: return
if self.softspace:
self.writer.send_flowing_data(" ")
self.hard_break = data[-1:] == '\n'
self.nospace = self.para_end = self.softspace = \
self.parskip = self.have_label = 0
self.writer.send_literal_data(data)
def flush_softspace(self):
if self.softspace:
self.hard_break = self.para_end = self.parskip = \
self.have_label = self.softspace = 0
self.nospace = 1
self.writer.send_flowing_data(' ')
def push_alignment(self, align):
if align and align != self.align:
self.writer.new_alignment(align)
self.align = align
self.align_stack.append(align)
else:
self.align_stack.append(self.align)
def pop_alignment(self):
if self.align_stack:
del self.align_stack[-1]
if self.align_stack:
self.align = align = self.align_stack[-1]
self.writer.new_alignment(align)
else:
self.align = None
self.writer.new_alignment(None)
def push_font(self, font):
size, i, b, tt = font
if self.softspace:
self.hard_break = self.para_end = self.softspace = 0
self.nospace = 1
self.writer.send_flowing_data(' ')
if self.font_stack:
csize, ci, cb, ctt = self.font_stack[-1]
if size is AS_IS: size = csize
if i is AS_IS: i = ci
if b is AS_IS: b = cb
if tt is AS_IS: tt = ctt
font = (size, i, b, tt)
self.font_stack.append(font)
self.writer.new_font(font)
def pop_font(self):
if self.font_stack:
del self.font_stack[-1]
if self.font_stack:
font = self.font_stack[-1]
else:
font = None
self.writer.new_font(font)
def push_margin(self, margin):
self.margin_stack.append(margin)
fstack = filter(None, self.margin_stack)
if not margin and fstack:
margin = fstack[-1]
self.writer.new_margin(margin, len(fstack))
def pop_margin(self):
if self.margin_stack:
del self.margin_stack[-1]
fstack = filter(None, self.margin_stack)
if fstack:
margin = fstack[-1]
else:
margin = None
self.writer.new_margin(margin, len(fstack))
def set_spacing(self, spacing):
self.spacing = spacing
self.writer.new_spacing(spacing)
def push_style(self, *styles):
if self.softspace:
self.hard_break = self.para_end = self.softspace = 0
self.nospace = 1
self.writer.send_flowing_data(' ')
for style in styles:
self.style_stack.append(style)
self.writer.new_styles(tuple(self.style_stack))
def pop_style(self, n=1):
del self.style_stack[-n:]
self.writer.new_styles(tuple(self.style_stack))
def assert_line_data(self, flag=1):
self.nospace = self.hard_break = not flag
self.para_end = self.parskip = self.have_label = 0
class NullWriter:
"""Minimal writer interface to use in testing & inheritance.
A writer which only provides the interface definition; no actions are
taken on any methods. This should be the base class for all writers
which do not need to inherit any implementation methods.
"""
def __init__(self): pass
def flush(self): pass
def new_alignment(self, align): pass
def new_font(self, font): pass
def new_margin(self, margin, level): pass
def new_spacing(self, spacing): pass
def new_styles(self, styles): pass
def send_paragraph(self, blankline): pass
def send_line_break(self): pass
def send_hor_rule(self, *args, **kw): pass
def send_label_data(self, data): pass
def send_flowing_data(self, data): pass
def send_literal_data(self, data): pass
class AbstractWriter(NullWriter):
"""A writer which can be used in debugging formatters, but not much else.
Each method simply announces itself by printing its name and
arguments on standard output.
"""
def new_alignment(self, align):
print "new_alignment(%r)" % (align,)
def new_font(self, font):
print "new_font(%r)" % (font,)
def new_margin(self, margin, level):
print "new_margin(%r, %d)" % (margin, level)
def new_spacing(self, spacing):
print "new_spacing(%r)" % (spacing,)
def new_styles(self, styles):
print "new_styles(%r)" % (styles,)
def send_paragraph(self, blankline):
print "send_paragraph(%r)" % (blankline,)
def send_line_break(self):
print "send_line_break()"
def send_hor_rule(self, *args, **kw):
print "send_hor_rule()"
def send_label_data(self, data):
print "send_label_data(%r)" % (data,)
def send_flowing_data(self, data):
print "send_flowing_data(%r)" % (data,)
def send_literal_data(self, data):
print "send_literal_data(%r)" % (data,)
class DumbWriter(NullWriter):
"""Simple writer class which writes output on the file object passed in
as the file parameter or, if file is omitted, on standard output. The
output is simply word-wrapped to the number of columns specified by
the maxcol parameter. This class is suitable for reflowing a sequence
of paragraphs.
"""
def __init__(self, file=None, maxcol=72):
self.file = file or sys.stdout
self.maxcol = maxcol
NullWriter.__init__(self)
self.reset()
def reset(self):
self.col = 0
self.atbreak = 0
def send_paragraph(self, blankline):
self.file.write('\n'*blankline)
self.col = 0
self.atbreak = 0
def send_line_break(self):
self.file.write('\n')
self.col = 0
self.atbreak = 0
def send_hor_rule(self, *args, **kw):
self.file.write('\n')
self.file.write('-'*self.maxcol)
self.file.write('\n')
self.col = 0
self.atbreak = 0
def send_literal_data(self, data):
self.file.write(data)
i = data.rfind('\n')
if i >= 0:
self.col = 0
data = data[i+1:]
data = data.expandtabs()
self.col = self.col + len(data)
self.atbreak = 0
def send_flowing_data(self, data):
if not data: return
atbreak = self.atbreak or data[0].isspace()
col = self.col
maxcol = self.maxcol
write = self.file.write
for word in data.split():
if atbreak:
if col + len(word) >= maxcol:
write('\n')
col = 0
else:
write(' ')
col = col + 1
write(word)
col = col + len(word)
atbreak = 1
self.col = col
self.atbreak = data[-1].isspace()
def test(file = None):
w = DumbWriter()
f = AbstractFormatter(w)
if file is not None:
fp = open(file)
elif sys.argv[1:]:
fp = open(sys.argv[1])
else:
fp = sys.stdin
for line in fp:
if line == '\n':
f.end_paragraph(1)
else:
f.add_flowing_data(line)
f.end_paragraph(0)
if __name__ == '__main__':
test()
| gpl-2.0 |
MyRookie/SentimentAnalyse | venv/lib/python2.7/site-packages/numpy/lib/ufunclike.py | 173 | 4844 | """
Module of functions that are like ufuncs in acting on arrays and optionally
storing results in an output array.
"""
from __future__ import division, absolute_import, print_function
__all__ = ['fix', 'isneginf', 'isposinf']
import numpy.core.numeric as nx
def fix(x, y=None):
"""
Round to nearest integer towards zero.
Round an array of floats element-wise to nearest integer towards zero.
The rounded values are returned as floats.
Parameters
----------
x : array_like
An array of floats to be rounded
y : ndarray, optional
Output array
Returns
-------
out : ndarray of floats
The array of rounded numbers
See Also
--------
trunc, floor, ceil
around : Round to given number of decimals
Examples
--------
>>> np.fix(3.14)
3.0
>>> np.fix(3)
3.0
>>> np.fix([2.1, 2.9, -2.1, -2.9])
array([ 2., 2., -2., -2.])
"""
x = nx.asanyarray(x)
y1 = nx.floor(x)
y2 = nx.ceil(x)
if y is None:
y = nx.asanyarray(y1)
y[...] = nx.where(x >= 0, y1, y2)
return y
def isposinf(x, y=None):
"""
Test element-wise for positive infinity, return result as bool array.
Parameters
----------
x : array_like
The input array.
y : array_like, optional
A boolean array with the same shape as `x` to store the result.
Returns
-------
y : ndarray
A boolean array with the same dimensions as the input.
If second argument is not supplied then a boolean array is returned
with values True where the corresponding element of the input is
positive infinity and values False where the element of the input is
not positive infinity.
If a second argument is supplied the result is stored there. If the
type of that array is a numeric type the result is represented as zeros
and ones, if the type is boolean then as False and True.
The return value `y` is then a reference to that array.
See Also
--------
isinf, isneginf, isfinite, isnan
Notes
-----
Numpy uses the IEEE Standard for Binary Floating-Point for Arithmetic
(IEEE 754).
Errors result if the second argument is also supplied when `x` is a
scalar input, or if first and second arguments have different shapes.
Examples
--------
>>> np.isposinf(np.PINF)
array(True, dtype=bool)
>>> np.isposinf(np.inf)
array(True, dtype=bool)
>>> np.isposinf(np.NINF)
array(False, dtype=bool)
>>> np.isposinf([-np.inf, 0., np.inf])
array([False, False, True], dtype=bool)
>>> x = np.array([-np.inf, 0., np.inf])
>>> y = np.array([2, 2, 2])
>>> np.isposinf(x, y)
array([0, 0, 1])
>>> y
array([0, 0, 1])
"""
if y is None:
x = nx.asarray(x)
y = nx.empty(x.shape, dtype=nx.bool_)
nx.logical_and(nx.isinf(x), ~nx.signbit(x), y)
return y
def isneginf(x, y=None):
"""
Test element-wise for negative infinity, return result as bool array.
Parameters
----------
x : array_like
The input array.
y : array_like, optional
A boolean array with the same shape and type as `x` to store the
result.
Returns
-------
y : ndarray
A boolean array with the same dimensions as the input.
If second argument is not supplied then a numpy boolean array is
returned with values True where the corresponding element of the
input is negative infinity and values False where the element of
the input is not negative infinity.
If a second argument is supplied the result is stored there. If the
type of that array is a numeric type the result is represented as
zeros and ones, if the type is boolean then as False and True. The
return value `y` is then a reference to that array.
See Also
--------
isinf, isposinf, isnan, isfinite
Notes
-----
Numpy uses the IEEE Standard for Binary Floating-Point for Arithmetic
(IEEE 754).
Errors result if the second argument is also supplied when x is a scalar
input, or if first and second arguments have different shapes.
Examples
--------
>>> np.isneginf(np.NINF)
array(True, dtype=bool)
>>> np.isneginf(np.inf)
array(False, dtype=bool)
>>> np.isneginf(np.PINF)
array(False, dtype=bool)
>>> np.isneginf([-np.inf, 0., np.inf])
array([ True, False, False], dtype=bool)
>>> x = np.array([-np.inf, 0., np.inf])
>>> y = np.array([2, 2, 2])
>>> np.isneginf(x, y)
array([1, 0, 0])
>>> y
array([1, 0, 0])
"""
if y is None:
x = nx.asarray(x)
y = nx.empty(x.shape, dtype=nx.bool_)
nx.logical_and(nx.isinf(x), nx.signbit(x), y)
return y
| mit |
minorua/QGIS | python/plugins/processing/core/defaultproviders.py | 45 | 1178 | # -*- coding: utf-8 -*-
"""
***************************************************************************
defaultproviders.py
---------------------
Date : May 2016
Copyright : (C) 2016 by Victor Olaya
Email : volayaf at gmail dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Victor Olaya'
__date__ = 'May 2016'
__copyright__ = '(C) 2016, Victor Olaya'
def loadDefaultProviders():
# this is here just to "trigger" the above imports so providers are loaded
# and can be found by the Processing.initialize() method
pass
| gpl-2.0 |
lowrie/pyRouterJig | doc.py | 1 | 6098 | ###########################################################################
#
# Copyright 2015-2018 Robert B. Lowrie (http://github.com/lowrie)
#
# This file is part of pyRouterJig.
#
# pyRouterJig is free software: you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free Software
# Foundation, either version 3 of the License, or (at your option) any later
# version.
#
# pyRouterJig is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
# A PARTICULAR PURPOSE. See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along with
# pyRouterJig; see the file LICENSE. If not, see <http://www.gnu.org/licenses/>.
#
###########################################################################
'''
Defines documentation helpers.
'''
import spacing
class Doc(object):
'''
Defines documentation strings.
'''
def __init__(self, units):
self.sunits = units.units_string(verbose=True)
self.transl = units.transl
self._short_desc = self.transl.tr(
'<i>pyRouterJig</i> is a joint layout tool for woodworking.')
self._license = self.transl.tr('<p>\
Copyright 2015-2018 Robert B. Lowrie (pyrouterjig@lowrielodge.org)\
<p>\
<i>pyRouterJig</i> is free software: you can redistribute it and/or modify it under\
the terms of the GNU General Public License as published by the Free Software\
Foundation, either version 3 of the License, or (at your option) any later\
version.\
<p>\
<i>pyRouterJig</i> is distributed in the hope that it will be useful, but WITHOUT ANY\
WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR\
A PARTICULAR PURPOSE. See the GNU General Public License for more details.\
You should have received a copy of the GNU General Public License along with\
<i>pyRouterJig</i>; see the file LICENSE. If not, see \
<a href=http://www.gnu.org/licenses/>http://www.gnu.org/licenses/</a>.\
<p>\
This software and its output is not an INCRA product. The authors have no\
connection with INCRA (or Taylor Design Group, Inc.), aside from being users\
themselves of their products.\
<p>\
<h3>USE AT YOUR OWN RISK!</h3>')
self._board_width = self.transl.tr('<b>Board Width</b> is the board width (in%s) of'
' the joint.')
self._bit_width = self.transl.tr('<b>Bit Width</b> is the maximum cutting width (in%s) of'
' the router bit.')
self._bit_depth = self.transl.tr('<b>Bit Depth</b> is the cutting depth (in%s) of the'
' router bit.')
self._bit_angle = self.transl.tr('<b>Bit Angle</b> is the angle (in degrees) of the router'
' bit for dovetail bits. Set to zero for straight bits.')
self._top_board = self.transl.tr(
'<b>Top Board</b> is the wood image to use for the top board.')
self._bottom_board = self.transl.tr(
'<b>Bottom Board</b> is the wood image to use for the bottom board.')
self._double_board = self.transl.tr(
'<b>Double Board</b> is the wood image to use for the double board.'
' If NONE, there is no double board.')
self._dd_board = self.transl.tr(
'<b>Double-Double Board</b> is the wood image to use for the double-double board.'
' If NONE, there is no double-double board.')
self._double_thickness = self.transl.tr(
'<b>Thickness</b> is the thickness (in%s) of the double board.')
self._dd_thickness = self.transl.tr(
'<b>Thickness</b> is the thickness (in%s) of the double-double board.')
self._es_slider0 = self.transl.tr('<b>%s</b> slider allows you to specify additional \
spacing between the fingers')
self._es_slider1 = self.transl.tr('<b>%s</b> slider allows you to specify additional \
width added to the fingers.')
self._es_centered = self.transl.tr('Check <b>%s</b> to force a finger to be centered on \
the board.')
self._cb_vsfingers = self.transl.tr('<b>%s</b> specifies the approximate number of \
fingers. At its minimum value, the width of the center finger is \
maximized. At its maximum value, the width of the center finger is \
minimized, and the result is the roughly the same as equally-spaced \
using zero "Spacing", zero "Width", and the "Centered" option \
checked.')
def short_desc(self):
return self._short_desc
def license(self):
return self._license
def board_width(self):
return self._board_width % self.sunits
def bit_width(self):
return self._bit_width % self.sunits
def bit_depth(self):
return self._bit_depth % self.sunits
def bit_angle(self):
return self._bit_angle
def top_board(self):
return self._top_board
def bottom_board(self):
return self._bottom_board
def double_board(self):
return self._double_board
def dd_board(self):
return self._dd_board
def double_thickness(self):
return self._double_thickness % self.sunits
def dd_thickness(self):
return self._dd_thickness % self.sunits
def es_slider0(self):
return self._es_slider0 % spacing.Equally_Spaced.keys[0]
def es_slider1(self):
return self._es_slider1 % spacing.Equally_Spaced.keys[1]
def es_centered(self):
return self._es_centered % spacing.Equally_Spaced.keys[2]
def cb_vsfingers(self):
return self._cb_vsfingers % spacing.Variable_Spaced.keys[0]
| gpl-3.0 |
jollyroger/debian-buildbot | buildbot/status/progress.py | 2 | 12005 | # This file is part of Buildbot. Buildbot is free software: you can
# redistribute it and/or modify it under the terms of the GNU General Public
# License as published by the Free Software Foundation, version 2.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more
# details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 51
# Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# Copyright Buildbot Team Members
from buildbot import util
from collections import defaultdict
from twisted.internet import reactor
from twisted.python import log
from twisted.spread import pb
class StepProgress:
"""I keep track of how much progress a single BuildStep has made.
Progress is measured along various axes. Time consumed is one that is
available for all steps. Amount of command output is another, and may be
better quantified by scanning the output for markers to derive number of
files compiled, directories walked, tests run, etc.
I am created when the build begins, and given to a BuildProgress object
so it can track the overall progress of the whole build.
"""
startTime = None
stopTime = None
expectedTime = None
buildProgress = None
debug = False
def __init__(self, name, metricNames):
self.name = name
self.progress = {}
self.expectations = {}
for m in metricNames:
self.progress[m] = None
self.expectations[m] = None
def setBuildProgress(self, bp):
self.buildProgress = bp
def setExpectations(self, metrics):
"""The step can call this to explicitly set a target value for one
of its metrics. E.g., ShellCommands knows how many commands it will
execute, so it could set the 'commands' expectation."""
for metric, value in metrics.items():
self.expectations[metric] = value
self.buildProgress.newExpectations()
def setExpectedTime(self, seconds):
self.expectedTime = seconds
self.buildProgress.newExpectations()
def start(self):
if self.debug:
print "StepProgress.start[%s]" % self.name
self.startTime = util.now()
def setProgress(self, metric, value):
"""The step calls this as progress is made along various axes."""
if self.debug:
print "setProgress[%s][%s] = %s" % (self.name, metric, value)
self.progress[metric] = value
if self.debug:
r = self.remaining()
print " step remaining:", r
self.buildProgress.newProgress()
def finish(self):
"""This stops the 'time' metric and marks the step as finished
overall. It should be called after the last .setProgress has been
done for each axis."""
if self.debug:
print "StepProgress.finish[%s]" % self.name
self.stopTime = util.now()
self.buildProgress.stepFinished(self.name)
def totalTime(self):
if self.startTime is not None and self.stopTime is not None:
return self.stopTime - self.startTime
def remaining(self):
if self.startTime is None:
return self.expectedTime
if self.stopTime is not None:
return 0 # already finished
# TODO: replace this with cleverness that graphs each metric vs.
# time, then finds the inverse function. Will probably need to save
# a timestamp with each setProgress update, when finished, go back
# and find the 2% transition points, then save those 50 values in a
# list. On the next build, do linear interpolation between the two
# closest samples to come up with a percentage represented by that
# metric.
# TODO: If no other metrics are available, just go with elapsed
# time. Given the non-time-uniformity of text output from most
# steps, this would probably be better than the text-percentage
# scheme currently implemented.
percentages = []
for metric, value in self.progress.items():
expectation = self.expectations[metric]
if value is not None and expectation is not None:
p = 1.0 * value / expectation
percentages.append(p)
if percentages:
avg = reduce(lambda x, y: x + y, percentages) / len(percentages)
if avg > 1.0:
# overdue
avg = 1.0
if avg < 0.0:
avg = 0.0
if percentages and self.expectedTime is not None:
return self.expectedTime - (avg * self.expectedTime)
if self.expectedTime is not None:
# fall back to pure time
return self.expectedTime - (util.now() - self.startTime)
return None # no idea
class WatcherState:
def __init__(self, interval):
self.interval = interval
self.timer = None
self.needUpdate = 0
class BuildProgress(pb.Referenceable):
"""I keep track of overall build progress. I hold a list of StepProgress
objects.
"""
def __init__(self, stepProgresses):
self.steps = {}
for s in stepProgresses:
self.steps[s.name] = s
s.setBuildProgress(self)
self.finishedSteps = []
self.watchers = {}
self.debug = 0
def setExpectationsFrom(self, exp):
"""Set our expectations from the builder's Expectations object."""
for name, metrics in exp.steps.items():
s = self.steps.get(name)
if s:
s.setExpectedTime(exp.times[name])
s.setExpectations(exp.steps[name])
def newExpectations(self):
"""Call this when one of the steps has changed its expectations.
This should trigger us to update our ETA value and notify any
subscribers."""
pass # subscribers are not implemented: they just poll
def stepFinished(self, stepname):
assert(stepname not in self.finishedSteps)
self.finishedSteps.append(stepname)
if len(self.finishedSteps) == len(self.steps.keys()):
self.sendLastUpdates()
def newProgress(self):
r = self.remaining()
if self.debug:
print " remaining:", r
if r is not None:
self.sendAllUpdates()
def remaining(self):
# sum eta of all steps
sum = 0
for name, step in self.steps.items():
rem = step.remaining()
if rem is None:
return None # not sure
sum += rem
return sum
def eta(self):
left = self.remaining()
if left is None:
return None # not sure
done = util.now() + left
return done
def remote_subscribe(self, remote, interval=5):
# [interval, timer, needUpdate]
# don't send an update more than once per interval
self.watchers[remote] = WatcherState(interval)
remote.notifyOnDisconnect(self.removeWatcher)
self.updateWatcher(remote)
self.startTimer(remote)
log.msg("BuildProgress.remote_subscribe(%s)" % remote)
def remote_unsubscribe(self, remote):
# TODO: this doesn't work. I think 'remote' will always be different
# than the object that appeared in _subscribe.
log.msg("BuildProgress.remote_unsubscribe(%s)" % remote)
self.removeWatcher(remote)
# remote.dontNotifyOnDisconnect(self.removeWatcher)
def removeWatcher(self, remote):
#log.msg("removeWatcher(%s)" % remote)
try:
timer = self.watchers[remote].timer
if timer:
timer.cancel()
del self.watchers[remote]
except KeyError:
log.msg("Weird, removeWatcher on non-existent subscriber:",
remote)
def sendAllUpdates(self):
for r in self.watchers.keys():
self.updateWatcher(r)
def updateWatcher(self, remote):
# an update wants to go to this watcher. Send it if we can, otherwise
# queue it for later
w = self.watchers[remote]
if not w.timer:
# no timer, so send update now and start the timer
self.sendUpdate(remote)
self.startTimer(remote)
else:
# timer is running, just mark as needing an update
w.needUpdate = 1
def startTimer(self, remote):
w = self.watchers[remote]
timer = reactor.callLater(w.interval, self.watcherTimeout, remote)
w.timer = timer
def sendUpdate(self, remote, last=0):
self.watchers[remote].needUpdate = 0
# text = self.asText() # TODO: not text, duh
try:
remote.callRemote("progress", self.remaining())
if last:
remote.callRemote("finished", self)
except:
log.deferr()
self.removeWatcher(remote)
def watcherTimeout(self, remote):
w = self.watchers.get(remote, None)
if not w:
return # went away
w.timer = None
if w.needUpdate:
self.sendUpdate(remote)
self.startTimer(remote)
def sendLastUpdates(self):
for remote in self.watchers.keys():
self.sendUpdate(remote, 1)
self.removeWatcher(remote)
class Expectations:
debug = False
# decay=1.0 ignores all but the last build
# 0.9 is short time constant. 0.1 is very long time constant
# TODO: let decay be specified per-metric
decay = 0.5
def __init__(self, buildprogress):
"""Create us from a successful build. We will expect each step to
take as long as it did in that build."""
# .steps maps stepname to dict2
# dict2 maps metricname to final end-of-step value
self.steps = defaultdict(dict)
# .times maps stepname to per-step elapsed time
self.times = {}
for name, step in buildprogress.steps.items():
self.steps[name] = {}
for metric, value in step.progress.items():
self.steps[name][metric] = value
self.times[name] = None
if step.startTime is not None and step.stopTime is not None:
self.times[name] = step.stopTime - step.startTime
def wavg(self, old, current):
if old is None:
return current
if current is None:
return old
else:
return (current * self.decay) + (old * (1 - self.decay))
def update(self, buildprogress):
for name, stepprogress in buildprogress.steps.items():
old = self.times.get(name)
current = stepprogress.totalTime()
if current is None:
log.msg("Expectations.update: current[%s] was None!" % name)
continue
new = self.wavg(old, current)
self.times[name] = new
if self.debug:
print "new expected time[%s] = %s, old %s, cur %s" % \
(name, new, old, current)
for metric, current in stepprogress.progress.items():
old = self.steps[name].get(metric)
new = self.wavg(old, current)
if self.debug:
print "new expectation[%s][%s] = %s, old %s, cur %s" % \
(name, metric, new, old, current)
self.steps[name][metric] = new
def expectedBuildTime(self):
if None in self.times.values():
return None
# return sum(self.times.values())
# python-2.2 doesn't have 'sum'. TODO: drop python-2.2 support
s = 0
for v in self.times.values():
s += v
return s
| gpl-2.0 |
Jgarcia-IAS/ReporsitorioVacioOdoo | openerp/loglevels.py | 380 | 3930 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import sys
LOG_NOTSET = 'notset'
LOG_DEBUG = 'debug'
LOG_INFO = 'info'
LOG_WARNING = 'warn'
LOG_ERROR = 'error'
LOG_CRITICAL = 'critical'
# TODO get_encodings, ustr and exception_to_unicode were originally from tools.misc.
# There are here until we refactor tools so that this module doesn't depends on tools.
def get_encodings(hint_encoding='utf-8'):
fallbacks = {
'latin1': 'latin9',
'iso-8859-1': 'iso8859-15',
'cp1252': '1252',
}
if hint_encoding:
yield hint_encoding
if hint_encoding.lower() in fallbacks:
yield fallbacks[hint_encoding.lower()]
# some defaults (also taking care of pure ASCII)
for charset in ['utf8','latin1']:
if not hint_encoding or (charset.lower() != hint_encoding.lower()):
yield charset
from locale import getpreferredencoding
prefenc = getpreferredencoding()
if prefenc and prefenc.lower() != 'utf-8':
yield prefenc
prefenc = fallbacks.get(prefenc.lower())
if prefenc:
yield prefenc
def ustr(value, hint_encoding='utf-8', errors='strict'):
"""This method is similar to the builtin `unicode`, except
that it may try multiple encodings to find one that works
for decoding `value`, and defaults to 'utf-8' first.
:param: value: the value to convert
:param: hint_encoding: an optional encoding that was detecte
upstream and should be tried first to decode ``value``.
:param str errors: optional `errors` flag to pass to the unicode
built-in to indicate how illegal character values should be
treated when converting a string: 'strict', 'ignore' or 'replace'
(see ``unicode()`` constructor).
Passing anything other than 'strict' means that the first
encoding tried will be used, even if it's not the correct
one to use, so be careful! Ignored if value is not a string/unicode.
:raise: UnicodeError if value cannot be coerced to unicode
:return: unicode string representing the given value
"""
if isinstance(value, Exception):
return exception_to_unicode(value)
if isinstance(value, unicode):
return value
if not isinstance(value, basestring):
try:
return unicode(value)
except Exception:
raise UnicodeError('unable to convert %r' % (value,))
for ln in get_encodings(hint_encoding):
try:
return unicode(value, ln, errors=errors)
except Exception:
pass
raise UnicodeError('unable to convert %r' % (value,))
def exception_to_unicode(e):
if (sys.version_info[:2] < (2,6)) and hasattr(e, 'message'):
return ustr(e.message)
if hasattr(e, 'args'):
return "\n".join((ustr(a) for a in e.args))
try:
return unicode(e)
except Exception:
return u"Unknown message"
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
goodwinnk/intellij-community | python/helpers/pydev/_pydev_bundle/pydev_ipython_console.py | 3 | 3684 | import traceback
import sys
from _pydev_bundle.pydev_console_utils import BaseInterpreterInterface
from _pydev_bundle.pydev_ipython_console_011 import get_pydev_frontend
from _pydevd_bundle.pydevd_constants import dict_iter_items
# Uncomment to force PyDev standard shell.
# raise ImportError()
#=======================================================================================================================
# InterpreterInterface
#=======================================================================================================================
class InterpreterInterface(BaseInterpreterInterface):
'''
The methods in this class should be registered in the xml-rpc server.
'''
def __init__(self, main_thread, show_banner=True, connect_status_queue=None, rpc_client=None):
BaseInterpreterInterface.__init__(self, main_thread, connect_status_queue, rpc_client)
self.interpreter = get_pydev_frontend(rpc_client)
self._input_error_printed = False
self.notification_succeeded = False
self.notification_tries = 0
self.notification_max_tries = 3
self.show_banner = show_banner
def get_greeting_msg(self):
if self.show_banner:
self.interpreter.show_banner()
return self.interpreter.get_greeting_msg()
def do_add_exec(self, code_fragment):
self.notify_about_magic()
if code_fragment.text.rstrip().endswith('??'):
print('IPython-->')
try:
res = bool(self.interpreter.add_exec(code_fragment.text))
finally:
if code_fragment.text.rstrip().endswith('??'):
print('<--IPython')
return res
def get_namespace(self):
return self.interpreter.get_namespace()
def close(self):
sys.exit(0)
def notify_about_magic(self):
if not self.notification_succeeded:
self.notification_tries+=1
if self.notification_tries>self.notification_max_tries:
return
completions = self.do_get_completions("%", "%")
magic_commands = [x[0] for x in completions]
server = self.get_server()
if server is not None:
try:
server.notifyAboutMagic(magic_commands, self.interpreter.is_automagic())
self.notification_succeeded = True
except :
self.notification_succeeded = False
def get_ipython_hidden_vars_dict(self):
try:
if hasattr(self.interpreter, 'ipython') and hasattr(self.interpreter.ipython, 'user_ns_hidden'):
user_ns_hidden = self.interpreter.ipython.user_ns_hidden
if isinstance(user_ns_hidden, dict):
# Since IPython 2 dict `user_ns_hidden` contains hidden variables and values
user_hidden_dict = user_ns_hidden.copy()
else:
# In IPython 1.x `user_ns_hidden` used to be a set with names of hidden variables
user_hidden_dict = dict([(key, val) for key, val in dict_iter_items(self.interpreter.ipython.user_ns)
if key in user_ns_hidden])
# while `_`, `__` and `___` were not initialized, they are not presented in `user_ns_hidden`
user_hidden_dict.setdefault('_', '')
user_hidden_dict.setdefault('__', '')
user_hidden_dict.setdefault('___', '')
return user_hidden_dict
except:
# Getting IPython variables shouldn't break loading frame variables
traceback.print_exc()
| apache-2.0 |
c-amr/camr | stanfordnlp/unidecode/x016.py | 252 | 4140 | data = (
'kka', # 0x00
'kk', # 0x01
'nu', # 0x02
'no', # 0x03
'ne', # 0x04
'nee', # 0x05
'ni', # 0x06
'na', # 0x07
'mu', # 0x08
'mo', # 0x09
'me', # 0x0a
'mee', # 0x0b
'mi', # 0x0c
'ma', # 0x0d
'yu', # 0x0e
'yo', # 0x0f
'ye', # 0x10
'yee', # 0x11
'yi', # 0x12
'ya', # 0x13
'ju', # 0x14
'ju', # 0x15
'jo', # 0x16
'je', # 0x17
'jee', # 0x18
'ji', # 0x19
'ji', # 0x1a
'ja', # 0x1b
'jju', # 0x1c
'jjo', # 0x1d
'jje', # 0x1e
'jjee', # 0x1f
'jji', # 0x20
'jja', # 0x21
'lu', # 0x22
'lo', # 0x23
'le', # 0x24
'lee', # 0x25
'li', # 0x26
'la', # 0x27
'dlu', # 0x28
'dlo', # 0x29
'dle', # 0x2a
'dlee', # 0x2b
'dli', # 0x2c
'dla', # 0x2d
'lhu', # 0x2e
'lho', # 0x2f
'lhe', # 0x30
'lhee', # 0x31
'lhi', # 0x32
'lha', # 0x33
'tlhu', # 0x34
'tlho', # 0x35
'tlhe', # 0x36
'tlhee', # 0x37
'tlhi', # 0x38
'tlha', # 0x39
'tlu', # 0x3a
'tlo', # 0x3b
'tle', # 0x3c
'tlee', # 0x3d
'tli', # 0x3e
'tla', # 0x3f
'zu', # 0x40
'zo', # 0x41
'ze', # 0x42
'zee', # 0x43
'zi', # 0x44
'za', # 0x45
'z', # 0x46
'z', # 0x47
'dzu', # 0x48
'dzo', # 0x49
'dze', # 0x4a
'dzee', # 0x4b
'dzi', # 0x4c
'dza', # 0x4d
'su', # 0x4e
'so', # 0x4f
'se', # 0x50
'see', # 0x51
'si', # 0x52
'sa', # 0x53
'shu', # 0x54
'sho', # 0x55
'she', # 0x56
'shee', # 0x57
'shi', # 0x58
'sha', # 0x59
'sh', # 0x5a
'tsu', # 0x5b
'tso', # 0x5c
'tse', # 0x5d
'tsee', # 0x5e
'tsi', # 0x5f
'tsa', # 0x60
'chu', # 0x61
'cho', # 0x62
'che', # 0x63
'chee', # 0x64
'chi', # 0x65
'cha', # 0x66
'ttsu', # 0x67
'ttso', # 0x68
'ttse', # 0x69
'ttsee', # 0x6a
'ttsi', # 0x6b
'ttsa', # 0x6c
'X', # 0x6d
'.', # 0x6e
'qai', # 0x6f
'ngai', # 0x70
'nngi', # 0x71
'nngii', # 0x72
'nngo', # 0x73
'nngoo', # 0x74
'nnga', # 0x75
'nngaa', # 0x76
'[?]', # 0x77
'[?]', # 0x78
'[?]', # 0x79
'[?]', # 0x7a
'[?]', # 0x7b
'[?]', # 0x7c
'[?]', # 0x7d
'[?]', # 0x7e
'[?]', # 0x7f
' ', # 0x80
'b', # 0x81
'l', # 0x82
'f', # 0x83
's', # 0x84
'n', # 0x85
'h', # 0x86
'd', # 0x87
't', # 0x88
'c', # 0x89
'q', # 0x8a
'm', # 0x8b
'g', # 0x8c
'ng', # 0x8d
'z', # 0x8e
'r', # 0x8f
'a', # 0x90
'o', # 0x91
'u', # 0x92
'e', # 0x93
'i', # 0x94
'ch', # 0x95
'th', # 0x96
'ph', # 0x97
'p', # 0x98
'x', # 0x99
'p', # 0x9a
'<', # 0x9b
'>', # 0x9c
'[?]', # 0x9d
'[?]', # 0x9e
'[?]', # 0x9f
'f', # 0xa0
'v', # 0xa1
'u', # 0xa2
'yr', # 0xa3
'y', # 0xa4
'w', # 0xa5
'th', # 0xa6
'th', # 0xa7
'a', # 0xa8
'o', # 0xa9
'ac', # 0xaa
'ae', # 0xab
'o', # 0xac
'o', # 0xad
'o', # 0xae
'oe', # 0xaf
'on', # 0xb0
'r', # 0xb1
'k', # 0xb2
'c', # 0xb3
'k', # 0xb4
'g', # 0xb5
'ng', # 0xb6
'g', # 0xb7
'g', # 0xb8
'w', # 0xb9
'h', # 0xba
'h', # 0xbb
'h', # 0xbc
'h', # 0xbd
'n', # 0xbe
'n', # 0xbf
'n', # 0xc0
'i', # 0xc1
'e', # 0xc2
'j', # 0xc3
'g', # 0xc4
'ae', # 0xc5
'a', # 0xc6
'eo', # 0xc7
'p', # 0xc8
'z', # 0xc9
's', # 0xca
's', # 0xcb
's', # 0xcc
'c', # 0xcd
'z', # 0xce
't', # 0xcf
't', # 0xd0
'd', # 0xd1
'b', # 0xd2
'b', # 0xd3
'p', # 0xd4
'p', # 0xd5
'e', # 0xd6
'm', # 0xd7
'm', # 0xd8
'm', # 0xd9
'l', # 0xda
'l', # 0xdb
'ng', # 0xdc
'ng', # 0xdd
'd', # 0xde
'o', # 0xdf
'ear', # 0xe0
'ior', # 0xe1
'qu', # 0xe2
'qu', # 0xe3
'qu', # 0xe4
's', # 0xe5
'yr', # 0xe6
'yr', # 0xe7
'yr', # 0xe8
'q', # 0xe9
'x', # 0xea
'.', # 0xeb
':', # 0xec
'+', # 0xed
'17', # 0xee
'18', # 0xef
'19', # 0xf0
'[?]', # 0xf1
'[?]', # 0xf2
'[?]', # 0xf3
'[?]', # 0xf4
'[?]', # 0xf5
'[?]', # 0xf6
'[?]', # 0xf7
'[?]', # 0xf8
'[?]', # 0xf9
'[?]', # 0xfa
'[?]', # 0xfb
'[?]', # 0xfc
'[?]', # 0xfd
'[?]', # 0xfe
)
| gpl-2.0 |
tadashi-aikawa/gemini | jumeaux/addons/judgement/same.py | 1 | 1698 | # -*- coding:utf-8 -*-
from owlmixin import OwlMixin, TList, TOption
from jumeaux.addons.judgement import JudgementExecutor
from jumeaux.addons.utils import when_filter
from jumeaux.logger import Logger
from jumeaux.models import JudgementAddOnPayload, JudgementAddOnReference
logger: Logger = Logger(__name__)
LOG_PREFIX = "[judgement/same]"
class Config(OwlMixin):
when_any: TList[str]
@classmethod
def validate(cls, config):
if not config or 'when_any' not in config:
logger.error(f'{LOG_PREFIX} `config.when_any` is required !!', exit=True)
class Executor(JudgementExecutor):
config: Config
def __init__(self, config: dict) -> None:
Config.validate(config)
self.config: Config = Config.from_dict(config)
def exec(self, payload: JudgementAddOnPayload, reference: JudgementAddOnReference) -> JudgementAddOnPayload:
if payload.regard_as_same:
return payload
same: TOption[str] = self.config.when_any.find(lambda x: when_filter(x, {
"req": {
"name": reference.name,
"path": reference.path,
"qs": reference.qs,
"headers": reference.headers,
},
"res_one": reference.res_one,
"res_other": reference.res_other,
"dict_one": reference.dict_one,
"dict_other": reference.dict_other,
}))
if same.get():
logger.info_lv3(f"{LOG_PREFIX} Regard as same by `{same.get()}`.")
return JudgementAddOnPayload.from_dict({
"diffs_by_cognition": payload.diffs_by_cognition,
"regard_as_same": not same.is_none(),
})
| mit |
lisael/pg-django | django/test/simple.py | 1 | 16207 | import unittest as real_unittest
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
from django.db.models import get_app, get_apps
from django.db.models.loading import unregister_app
from django.test import _doctest as doctest
from django.test.utils import setup_test_environment, teardown_test_environment
from django.test.testcases import OutputChecker, DocTestRunner, TestCase
from django.utils import unittest
from django.utils.importlib import import_module
from django.utils.module_loading import module_has_submodule
__all__ = ('DjangoTestRunner', 'DjangoTestSuiteRunner')
# The module name for tests outside models.py
TEST_MODULE = 'tests'
doctestOutputChecker = OutputChecker()
class DjangoTestRunner(unittest.TextTestRunner):
def __init__(self, *args, **kwargs):
import warnings
warnings.warn(
"DjangoTestRunner is deprecated; it's functionality is "
"indistinguishable from TextTestRunner",
DeprecationWarning
)
super(DjangoTestRunner, self).__init__(*args, **kwargs)
def get_tests(app_module):
parts = app_module.__name__.split('.')
prefix, last = parts[:-1], parts[-1]
try:
test_module = import_module('.'.join(prefix + [TEST_MODULE]))
except ImportError:
# Couldn't import tests.py. Was it due to a missing file, or
# due to an import error in a tests.py that actually exists?
# app_module either points to a models.py file, or models/__init__.py
# Tests are therefore either in same directory, or one level up
if last == 'models':
app_root = import_module('.'.join(prefix))
else:
app_root = app_module
if not module_has_submodule(app_root, TEST_MODULE):
test_module = None
else:
# The module exists, so there must be an import error in the test
# module itself.
raise
return test_module
def build_suite(app_module):
"""
Create a complete Django test suite for the provided application module.
"""
suite = unittest.TestSuite()
if skip_app(app_module):
unregister_app(app_module.__name__.split('.')[-2])
settings.INSTALLED_APPS.remove('.'.join(app_module.__name__.split('.')[:-1]))
return suite
# Load unit and doctests in the models.py module. If module has
# a suite() method, use it. Otherwise build the test suite ourselves.
if hasattr(app_module, 'suite'):
suite.addTest(app_module.suite())
else:
suite.addTest(unittest.defaultTestLoader.loadTestsFromModule(
app_module))
try:
suite.addTest(doctest.DocTestSuite(app_module,
checker=doctestOutputChecker,
runner=DocTestRunner))
except ValueError:
# No doc tests in models.py
pass
# Check to see if a separate 'tests' module exists parallel to the
# models module
test_module = get_tests(app_module)
if test_module:
# Load unit and doctests in the tests.py module. If module has
# a suite() method, use it. Otherwise build the test suite ourselves.
if hasattr(test_module, 'suite'):
suite.addTest(test_module.suite())
else:
suite.addTest(unittest.defaultTestLoader.loadTestsFromModule(
test_module))
try:
suite.addTest(doctest.DocTestSuite(
test_module, checker=doctestOutputChecker,
runner=DocTestRunner))
except ValueError:
# No doc tests in tests.py
pass
return suite
def skip_app(app_module):
"""return True if the app must be skipped acccording to its
TEST_SKIP_UNLESS_DB_FEATURES and TEST_IF_DB_FEATURE attributes"""
req_db_features = getattr(app_module, 'TEST_SKIP_UNLESS_DB_FEATURES',
[])
if req_db_features:
from django.db import connections
for c in connections:
connection = connections[c]
for feature in req_db_features:
if not getattr(connection.features, feature, False):
return True
forbidden_db_features = getattr(app_module, 'TEST_SKIP_IF_DB_FEATURES',
[])
if forbidden_db_features:
from django.db import connections
for c in connections:
connection = connections[c]
for feature in forbidden_db_features:
if getattr(connection.features, feature, False):
return True
return False
def build_test(label):
"""
Construct a test case with the specified label. Label should be of the
form model.TestClass or model.TestClass.test_method. Returns an
instantiated test or test suite corresponding to the label provided.
"""
parts = label.split('.')
if len(parts) < 2 or len(parts) > 3:
raise ValueError("Test label '%s' should be of the form app.TestCase "
"or app.TestCase.test_method" % label)
#
# First, look for TestCase instances with a name that matches
#
app_module = get_app(parts[0])
if skip_app(app_module):
unregister_app(app_module.__name__.split('.')[-2])
settings.INSTALLED_APPS.remove('.'.join(app_module.__name__.split('.')[:-1]))
return unittest.TestSuite()
test_module = get_tests(app_module)
TestClass = getattr(app_module, parts[1], None)
# Couldn't find the test class in models.py; look in tests.py
if TestClass is None:
if test_module:
TestClass = getattr(test_module, parts[1], None)
try:
if issubclass(TestClass, (unittest.TestCase, real_unittest.TestCase)):
if len(parts) == 2: # label is app.TestClass
try:
return unittest.TestLoader().loadTestsFromTestCase(
TestClass)
except TypeError:
raise ValueError(
"Test label '%s' does not refer to a test class"
% label)
else: # label is app.TestClass.test_method
return TestClass(parts[2])
except TypeError:
# TestClass isn't a TestClass - it must be a method or normal class
pass
#
# If there isn't a TestCase, look for a doctest that matches
#
tests = []
for module in app_module, test_module:
try:
doctests = doctest.DocTestSuite(module,
checker=doctestOutputChecker,
runner=DocTestRunner)
# Now iterate over the suite, looking for doctests whose name
# matches the pattern that was given
for test in doctests:
if test._dt_test.name in (
'%s.%s' % (module.__name__, '.'.join(parts[1:])),
'%s.__test__.%s' % (
module.__name__, '.'.join(parts[1:]))):
tests.append(test)
except ValueError:
# No doctests found.
pass
# If no tests were found, then we were given a bad test label.
if not tests:
raise ValueError("Test label '%s' does not refer to a test" % label)
# Construct a suite out of the tests that matched.
return unittest.TestSuite(tests)
def partition_suite(suite, classes, bins):
"""
Partitions a test suite by test type.
classes is a sequence of types
bins is a sequence of TestSuites, one more than classes
Tests of type classes[i] are added to bins[i],
tests with no match found in classes are place in bins[-1]
"""
for test in suite:
if isinstance(test, unittest.TestSuite):
partition_suite(test, classes, bins)
else:
for i in range(len(classes)):
if isinstance(test, classes[i]):
bins[i].addTest(test)
break
else:
bins[-1].addTest(test)
def reorder_suite(suite, classes):
"""
Reorders a test suite by test type.
`classes` is a sequence of types
All tests of type classes[0] are placed first, then tests of type
classes[1], etc. Tests with no match in classes are placed last.
"""
class_count = len(classes)
bins = [unittest.TestSuite() for i in range(class_count+1)]
partition_suite(suite, classes, bins)
for i in range(class_count):
bins[0].addTests(bins[i+1])
return bins[0]
def dependency_ordered(test_databases, dependencies):
"""Reorder test_databases into an order that honors the dependencies
described in TEST_DEPENDENCIES.
"""
ordered_test_databases = []
resolved_databases = set()
while test_databases:
changed = False
deferred = []
while test_databases:
signature, (db_name, aliases) = test_databases.pop()
dependencies_satisfied = True
for alias in aliases:
if alias in dependencies:
if all(a in resolved_databases
for a in dependencies[alias]):
# all dependencies for this alias are satisfied
dependencies.pop(alias)
resolved_databases.add(alias)
else:
dependencies_satisfied = False
else:
resolved_databases.add(alias)
if dependencies_satisfied:
ordered_test_databases.append((signature, (db_name, aliases)))
changed = True
else:
deferred.append((signature, (db_name, aliases)))
if not changed:
raise ImproperlyConfigured(
"Circular dependency in TEST_DEPENDENCIES")
test_databases = deferred
return ordered_test_databases
class DjangoTestSuiteRunner(object):
def __init__(self, verbosity=1, interactive=True, failfast=True, **kwargs):
self.verbosity = verbosity
self.interactive = interactive
self.failfast = failfast
def setup_test_environment(self, **kwargs):
setup_test_environment()
settings.DEBUG = False
unittest.installHandler()
def build_suite(self, test_labels, extra_tests=None, **kwargs):
suite = unittest.TestSuite()
if test_labels:
for label in test_labels:
if '.' in label:
suite.addTest(build_test(label))
else:
app = get_app(label)
suite.addTest(build_suite(app))
else:
for app in get_apps():
suite.addTest(build_suite(app))
if extra_tests:
for test in extra_tests:
suite.addTest(test)
return reorder_suite(suite, (TestCase,))
def setup_databases(self, **kwargs):
from django.db import connections, DEFAULT_DB_ALIAS
# First pass -- work out which databases actually need to be created,
# and which ones are test mirrors or duplicate entries in DATABASES
mirrored_aliases = {}
test_databases = {}
dependencies = {}
for alias in connections:
connection = connections[alias]
if connection.settings_dict['TEST_MIRROR']:
# If the database is marked as a test mirror, save
# the alias.
mirrored_aliases[alias] = (
connection.settings_dict['TEST_MIRROR'])
else:
# Store a tuple with DB parameters that uniquely identify it.
# If we have two aliases with the same values for that tuple,
# we only need to create the test database once.
item = test_databases.setdefault(
connection.creation.test_db_signature(),
(connection.settings_dict['NAME'], [])
)
item[1].append(alias)
if 'TEST_DEPENDENCIES' in connection.settings_dict:
dependencies[alias] = (
connection.settings_dict['TEST_DEPENDENCIES'])
else:
if alias != DEFAULT_DB_ALIAS:
dependencies[alias] = connection.settings_dict.get(
'TEST_DEPENDENCIES', [DEFAULT_DB_ALIAS])
# Second pass -- actually create the databases.
old_names = []
mirrors = []
for signature, (db_name, aliases) in dependency_ordered(
test_databases.items(), dependencies):
# Actually create the database for the first connection
connection = connections[aliases[0]]
old_names.append((connection, db_name, True))
test_db_name = connection.creation.create_test_db(
self.verbosity, autoclobber=not self.interactive)
for alias in aliases[1:]:
connection = connections[alias]
if db_name:
old_names.append((connection, db_name, False))
connection.settings_dict['NAME'] = test_db_name
else:
# If settings_dict['NAME'] isn't defined, we have a backend
# where the name isn't important -- e.g., SQLite, which
# uses :memory:. Force create the database instead of
# assuming it's a duplicate.
old_names.append((connection, db_name, True))
connection.creation.create_test_db(
self.verbosity, autoclobber=not self.interactive)
for alias, mirror_alias in mirrored_aliases.items():
mirrors.append((alias, connections[alias].settings_dict['NAME']))
connections[alias].settings_dict['NAME'] = (
connections[mirror_alias].settings_dict['NAME'])
connections[alias].features = connections[mirror_alias].features
return old_names, mirrors
def run_suite(self, suite, **kwargs):
return unittest.TextTestRunner(
verbosity=self.verbosity, failfast=self.failfast).run(suite)
def teardown_databases(self, old_config, **kwargs):
"""
Destroys all the non-mirror databases.
"""
old_names, mirrors = old_config
for connection, old_name, destroy in old_names:
if destroy:
connection.creation.destroy_test_db(old_name, self.verbosity)
def teardown_test_environment(self, **kwargs):
unittest.removeHandler()
teardown_test_environment()
def suite_result(self, suite, result, **kwargs):
return len(result.failures) + len(result.errors)
def run_tests(self, test_labels, extra_tests=None, **kwargs):
"""
Run the unit tests for all the test labels in the provided list.
Labels must be of the form:
- app.TestClass.test_method
Run a single specific test method
- app.TestClass
Run all the test methods in a given class
- app
Search for doctests and unittests in the named application.
When looking for tests, the test runner will look in the models and
tests modules for the application.
A list of 'extra' tests may also be provided; these tests
will be added to the test suite.
Returns the number of tests that failed.
"""
self.setup_test_environment()
suite = self.build_suite(test_labels, extra_tests)
for app in get_apps():
if skip_app(app):
unregister_app(app.__name__.split('.')[-2])
settings.INSTALLED_APPS.remove(app.__name__)
old_config = self.setup_databases()
result = self.run_suite(suite)
self.teardown_databases(old_config)
self.teardown_test_environment()
return self.suite_result(suite, result)
| bsd-3-clause |
t0in4/django | django/conf/locale/de/formats.py | 504 | 1100 | # -*- encoding: utf-8 -*-
# This file is distributed under the same license as the Django package.
#
from __future__ import unicode_literals
# The *_FORMAT strings use the Django date format syntax,
# see http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
DATE_FORMAT = 'j. F Y'
TIME_FORMAT = 'H:i'
DATETIME_FORMAT = 'j. F Y H:i'
YEAR_MONTH_FORMAT = 'F Y'
MONTH_DAY_FORMAT = 'j. F'
SHORT_DATE_FORMAT = 'd.m.Y'
SHORT_DATETIME_FORMAT = 'd.m.Y H:i'
FIRST_DAY_OF_WEEK = 1 # Monday
# The *_INPUT_FORMATS strings use the Python strftime format syntax,
# see http://docs.python.org/library/datetime.html#strftime-strptime-behavior
DATE_INPUT_FORMATS = [
'%d.%m.%Y', '%d.%m.%y', # '25.10.2006', '25.10.06'
# '%d. %B %Y', '%d. %b. %Y', # '25. October 2006', '25. Oct. 2006'
]
DATETIME_INPUT_FORMATS = [
'%d.%m.%Y %H:%M:%S', # '25.10.2006 14:30:59'
'%d.%m.%Y %H:%M:%S.%f', # '25.10.2006 14:30:59.000200'
'%d.%m.%Y %H:%M', # '25.10.2006 14:30'
'%d.%m.%Y', # '25.10.2006'
]
DECIMAL_SEPARATOR = ','
THOUSAND_SEPARATOR = '.'
NUMBER_GROUPING = 3
| bsd-3-clause |
edgarRd/incubator-airflow | tests/contrib/hooks/test_jira_hook.py | 15 | 1863 | # -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
import unittest
from mock import Mock
from mock import patch
from airflow import configuration
from airflow.contrib.hooks.jira_hook import JiraHook
from airflow import models
from airflow.utils import db
jira_client_mock = Mock(
name="jira_client"
)
class TestJiraHook(unittest.TestCase):
def setUp(self):
configuration.load_test_config()
db.merge_conn(
models.Connection(
conn_id='jira_default', conn_type='jira',
host='https://localhost/jira/', port=443,
extra='{"verify": "False", "project": "AIRFLOW"}'))
@patch("airflow.contrib.hooks.jira_hook.JIRA", autospec=True,
return_value=jira_client_mock)
def test_jira_client_connection(self, jira_mock):
jira_hook = JiraHook()
self.assertTrue(jira_mock.called)
self.assertIsInstance(jira_hook.client, Mock)
self.assertEqual(jira_hook.client.name, jira_mock.return_value.name)
if __name__ == '__main__':
unittest.main()
| apache-2.0 |
hecchi777/S3-SlaacSecuritySolution | impacket-0.9.11/impacket/testcases/dot11/test-RadioTap.py | 4 | 25693 | #!/usr/bin/env python
# sorry, this is very ugly, but I'm in python 2.5
import sys
sys.path.insert(0,"../..")
from dot11 import Dot11,Dot11Types,Dot11DataFrame,RadioTap
from ImpactPacket import Data
from binascii import hexlify
import unittest
class TestRadioTap(unittest.TestCase):
def setUp(self):
# Radio Tap(Flags,Rate,Channel,Antenna,DBMAntSignal,_
# FCSinHeader)+802.11 Data Frame+LLC SNAP+ARP Reply
self.frame_orig_1='\x00\x00\x18\x00\x0e\x58\x00\x00\x10\x6c\x6c\x09\x80\x04\x00\x1e\x00\x00\x00\x00\x00\x00\x00\x00\x08\x02\x2c\x00\x00\x1f\xe1\x19\xe4\xe4\x00\x1b\x9e\xce\x54\x09\x00\x1b\x9e\xce\x54\x09\xe0\xac\xaa\xaa\x03\x00\x00\x00\x08\x06\x00\x01\x08\x00\x06\x04\x00\x02\x00\x1b\x9e\xce\x54\x09\xc0\xa8\x01\x01\x00\x1f\xe1\x19\xe4\xe4\xc0\xa8\x01\x70\x01\x70\xe0\x00\x00\xfb\x94\x04\x00\x00\x16\x00\x00\x00\xe0\x00\x00\xfb\x17\x5c\xa6\xca'
self.rt1 = RadioTap(self.frame_orig_1)
# RadioTap(TSTF,Flags,Rate,DBMAntSignal,DBMAntNoise,_
# Antenna,XChannel)+802.11 Data Frame+LLC SNAP+ARP Request
self.frame_orig_2='\x00\x00\x20\x00\x67\x08\x04\x00\x30\x03\x1a\x25\x00\x00\x00\x00\x22\x0c\xd9\xa0\x02\x00\x00\x00\x40\x01\x00\x00\x3c\x14\x24\x11\x08\x02\x00\x00\xff\xff\xff\xff\xff\xff\x06\x03\x7f\x07\xa0\x16\x00\x19\xe3\xd3\x53\x52\x90\x7f\xaa\xaa\x03\x00\x00\x00\x08\x06\x00\x01\x08\x00\x06\x04\x00\x01\x00\x19\xe3\xd3\x53\x52\xa9\xfe\xf7\x00\x00\x00\x00\x00\x00\x00\x43\x08\x0e\x36'
self.rt2 = RadioTap(self.frame_orig_2)
def test_01_sizes(self):
'Test RadioTap frame sizes'
self.assertEqual(self.rt1.get_size(), len(self.frame_orig_1))
self.assertEqual(self.rt1.get_header_size(), 24)
self.assertEqual(self.rt1.get_body_size(), len(self.frame_orig_1)-24)
self.assertEqual(self.rt1.get_tail_size(), 0)
self.assertEqual(self.rt2.get_size(), len(self.frame_orig_2))
self.assertEqual(self.rt2.get_header_size(), 32)
self.assertEqual(self.rt2.get_body_size(), len(self.frame_orig_2)-32)
self.assertEqual(self.rt2.get_tail_size(), 0)
def test_02_version(self):
'Test RadioTap version getter/setter'
self.assertEqual(self.rt1.get_version(), 0x00)
self.rt1.set_version(1)
self.assertEqual(self.rt1.get_version(), 0x01)
self.assertEqual(self.rt2.get_version(), 0x00)
self.rt2.set_version(1)
self.assertEqual(self.rt2.get_version(), 0x01)
def test_03_present(self):
'Test RadioTap present getter'
self.assertEqual(self.rt1.get_present(), 0x0000580e)
self.assertEqual(self.rt2.get_present(), 0x00040867)
def test_04_present_bits(self):
'Test RadioTap present bits tester'
self.assertEqual(self.rt1.get_present_bit(RadioTap.RTF_TSFT), False)
self.assertEqual(self.rt1.get_present_bit(RadioTap.RTF_FLAGS), True)
self.assertEqual(self.rt1.get_present_bit(RadioTap.RTF_RATE), True)
self.assertEqual(self.rt1.get_present_bit(RadioTap.RTF_CHANNEL), True)
self.assertEqual(self.rt1.get_present_bit(RadioTap.RTF_FHSS), False)
self.assertEqual(self.rt1.get_present_bit(RadioTap.RTF_DBM_ANTSIGNAL), False)
self.assertEqual(self.rt1.get_present_bit(RadioTap.RTF_DBM_ANTNOISE), False)
self.assertEqual(self.rt1.get_present_bit(RadioTap.RTF_LOCK_QUALITY), False)
self.assertEqual(self.rt1.get_present_bit(RadioTap.RTF_TX_ATTENUATION), False)
self.assertEqual(self.rt1.get_present_bit(RadioTap.RTF_DB_TX_ATTENUATION), False)
self.assertEqual(self.rt1.get_present_bit(RadioTap.RTF_DBM_TX_POWER), False)
self.assertEqual(self.rt1.get_present_bit(RadioTap.RTF_ANTENNA), True)
self.assertEqual(self.rt1.get_present_bit(RadioTap.RTF_DB_ANTSIGNAL), True)
self.assertEqual(self.rt1.get_present_bit(RadioTap.RTF_DB_ANTNOISE), False)
self.assertEqual(self.rt1.get_present_bit(RadioTap.RTF_FCS_IN_HEADER), True)
self.assertEqual(self.rt1.get_present_bit(RadioTap.RTF_TX_FLAGS), False)
self.assertEqual(self.rt1.get_present_bit(RadioTap.RTF_RTS_RETRIES), False)
self.assertEqual(self.rt1.get_present_bit(RadioTap.RTF_DATA_RETRIES), False)
self.assertEqual(self.rt1.get_present_bit(RadioTap.RTF_XCHANNEL), False)
self.assertEqual(self.rt1.get_present_bit(RadioTap.RTF_EXT), False)
self.assertEqual(self.rt2.get_present_bit(RadioTap.RTF_TSFT), True)
self.assertEqual(self.rt2.get_present_bit(RadioTap.RTF_FLAGS), True)
self.assertEqual(self.rt2.get_present_bit(RadioTap.RTF_RATE), True)
self.assertEqual(self.rt2.get_present_bit(RadioTap.RTF_CHANNEL), False)
self.assertEqual(self.rt2.get_present_bit(RadioTap.RTF_FHSS), False)
self.assertEqual(self.rt2.get_present_bit(RadioTap.RTF_DBM_ANTSIGNAL), True)
self.assertEqual(self.rt2.get_present_bit(RadioTap.RTF_DBM_ANTNOISE), True)
self.assertEqual(self.rt2.get_present_bit(RadioTap.RTF_LOCK_QUALITY), False)
self.assertEqual(self.rt2.get_present_bit(RadioTap.RTF_TX_ATTENUATION), False)
self.assertEqual(self.rt2.get_present_bit(RadioTap.RTF_DB_TX_ATTENUATION), False)
self.assertEqual(self.rt2.get_present_bit(RadioTap.RTF_DBM_TX_POWER), False)
self.assertEqual(self.rt2.get_present_bit(RadioTap.RTF_ANTENNA), True)
self.assertEqual(self.rt2.get_present_bit(RadioTap.RTF_DB_ANTSIGNAL), False)
self.assertEqual(self.rt2.get_present_bit(RadioTap.RTF_DB_ANTNOISE), False)
self.assertEqual(self.rt2.get_present_bit(RadioTap.RTF_FCS_IN_HEADER), False)
self.assertEqual(self.rt2.get_present_bit(RadioTap.RTF_TX_FLAGS), False)
self.assertEqual(self.rt2.get_present_bit(RadioTap.RTF_RTS_RETRIES), False)
self.assertEqual(self.rt2.get_present_bit(RadioTap.RTF_DATA_RETRIES), False)
self.assertEqual(self.rt2.get_present_bit(RadioTap.RTF_XCHANNEL), True)
self.assertEqual(self.rt2.get_present_bit(RadioTap.RTF_EXT), False)
def test_05_tsft(self):
'Test RadioTap tstf getter'
self.assertEqual(self.rt1.get_tsft(), None)
self.assertEqual(self.rt2.get_tsft(), 622461744)
def test_06_tsft(self):
'Test RadioTap tstf getter/setter'
# When the field is new
self.assertEqual(self.rt1.get_size(),len(self.frame_orig_1))
self.assertEqual(self.rt1.get_header_size(),24)
self.rt1.set_tsft(0x0102030405060708)
self.assertEqual(self.rt1.get_tsft(),0x0102030405060708)
self.assertEqual(self.rt1.get_header_size(),24+8)
# When exist the field
self.rt1.set_tsft(0x0807060504030201)
self.assertEqual(self.rt1.get_tsft(),0x0807060504030201)
self.assertEqual(self.rt1.get_header_size(),24+8)
def test_07_unset_fields(self):
'Test RadioTap unset field'
self.assertEqual(self.rt1.get_size(),len(self.frame_orig_1))
self.assertEqual(self.rt1.get_header_size(),24)
self.assertEqual(self.rt1.get_present_bit(RadioTap.RTF_FLAGS), True)
self.rt1.unset_field(RadioTap.RTF_FLAGS)
self.assertEqual(self.rt1.get_size(),len(self.frame_orig_1)-1)
self.assertEqual(self.rt1.get_header_size(),24-1)
self.assertEqual(self.rt1.get_present_bit(RadioTap.RTF_FLAGS), False)
self.assertEqual(self.rt2.get_size(),len(self.frame_orig_2))
self.assertEqual(self.rt2.get_header_size(),32)
self.assertEqual(self.rt2.get_present_bit(RadioTap.RTF_TSFT), True)
self.rt2.unset_field(RadioTap.RTF_TSFT)
self.assertEqual(self.rt2.get_size(),len(self.frame_orig_2)-8)
self.assertEqual(self.rt2.get_header_size(),32-8)
self.assertEqual(self.rt2.get_present_bit(RadioTap.RTF_TSFT), False)
def test_08_flags_field(self):
'Test RadioTap flags getter/setter'
# When exist the field
self.assertEqual(self.rt1.get_size(),len(self.frame_orig_1))
self.assertEqual(self.rt1.get_header_size(),24)
self.assertEqual(self.rt1.get_flags(),0x10)
self.rt1.set_flags(0xAB)
self.assertEqual(self.rt1.get_flags(),0xAB)
self.assertEqual(self.rt1.get_size(),len(self.frame_orig_1))
self.assertEqual(self.rt1.get_header_size(),24)
self.assertEqual(self.rt2.get_size(),len(self.frame_orig_2))
self.assertEqual(self.rt2.get_header_size(),32)
self.assertEqual(self.rt2.get_flags(),0x22)
self.rt2.set_flags(0xAB)
self.assertEqual(self.rt2.get_flags(),0xAB)
self.assertEqual(self.rt2.get_size(),len(self.frame_orig_2))
self.assertEqual(self.rt2.get_header_size(),32)
# TODO: Test the size when the field is new
def test_09_rate_field(self):
'Test RadioTap rate getter/setter'
# When exist the field
self.assertEqual(self.rt1.get_size(),len(self.frame_orig_1))
self.assertEqual(self.rt1.get_header_size(),24)
self.assertEqual(self.rt1.get_rate(),0x6c)
self.rt1.set_rate(0xAB)
self.assertEqual(self.rt1.get_rate(),0xAB)
self.assertEqual(self.rt1.get_size(),len(self.frame_orig_1))
self.assertEqual(self.rt1.get_header_size(),24)
self.assertEqual(self.rt2.get_size(),len(self.frame_orig_2))
self.assertEqual(self.rt2.get_header_size(),32)
self.assertEqual(self.rt2.get_rate(),0x0c)
self.rt2.set_rate(0xAB)
self.assertEqual(self.rt2.get_rate(),0xAB)
self.assertEqual(self.rt2.get_size(),len(self.frame_orig_2))
self.assertEqual(self.rt2.get_header_size(),32)
# TODO: Test the size when the field is new
def test_10_channel_field(self):
'Test RadioTap channel getter/setter'
# When exist the field
self.assertEqual(self.rt1.get_size(),len(self.frame_orig_1))
self.assertEqual(self.rt1.get_header_size(),24)
self.assertEqual(self.rt1.get_channel(),(2412,0x0480))
self.rt1.set_channel( freq=1234, flags=0x5678 )
self.assertEqual(self.rt1.get_channel(),(1234,0x5678))
self.assertEqual(self.rt1.get_size(),len(self.frame_orig_1))
self.assertEqual(self.rt1.get_header_size(),24)
# When the field is new
self.assertEqual(self.rt2.get_size(),len(self.frame_orig_2))
self.assertEqual(self.rt2.get_header_size(),32)
self.assertEqual(self.rt2.get_channel(),None)
self.rt2.set_channel( freq=1234, flags=0x5678 )
self.assertEqual(self.rt2.get_channel(),(1234,0x5678))
self.assertEqual(self.rt2.get_size(),len(self.frame_orig_2)+4)
self.assertEqual(self.rt2.get_header_size(),32+4)
def test_11_fhss_field(self):
'Test RadioTap FHSS getter/setter'
# TODO: When exist the field
# When the field is new
self.assertEqual(self.rt2.get_size(),len(self.frame_orig_2))
self.assertEqual(self.rt2.get_header_size(),32)
self.assertEqual(self.rt2.get_FHSS(),None)
self.rt2.set_FHSS( hop_set=0xAB, hop_pattern=0xCD )
self.assertEqual(self.rt2.get_FHSS(),(0xAB,0xCD))
self.assertEqual(self.rt2.get_size(),len(self.frame_orig_2)+2)
self.assertEqual(self.rt2.get_header_size(),32+2)
def test_12_dBm_ant_signal_field(self):
'Test RadioTap dBm Antenna Signal getter/setter'
# When exist the field
self.assertEqual(self.rt2.get_size(),len(self.frame_orig_2))
self.assertEqual(self.rt2.get_header_size(),32)
self.assertEqual(self.rt2.get_dBm_ant_signal(),0xd9)
self.rt2.set_dBm_ant_signal( signal=0xF1 )
self.assertEqual(self.rt2.get_dBm_ant_signal(),0xF1)
self.assertEqual(self.rt2.get_size(),len(self.frame_orig_2))
self.assertEqual(self.rt2.get_header_size(),32)
# When the field is new
self.assertEqual(self.rt1.get_size(),len(self.frame_orig_1))
self.assertEqual(self.rt1.get_header_size(),24)
self.assertEqual(self.rt1.get_dBm_ant_signal(),None)
self.rt1.set_dBm_ant_signal( signal=0xF1 )
self.assertEqual(self.rt1.get_dBm_ant_signal(),0xF1)
self.assertEqual(self.rt1.get_size(),len(self.frame_orig_1)+1)
self.assertEqual(self.rt1.get_header_size(),24+1)
def test_13_dBm_ant_noise_field(self):
'Test RadioTap dBm Antenna Noise getter/setter'
# When exist the field
self.assertEqual(self.rt2.get_size(),len(self.frame_orig_2))
self.assertEqual(self.rt2.get_header_size(),32)
self.assertEqual(self.rt2.get_dBm_ant_noise(),0xa0)
self.rt2.set_dBm_ant_noise( signal=0xF1 )
self.assertEqual(self.rt2.get_dBm_ant_noise(),0xF1)
self.assertEqual(self.rt2.get_size(),len(self.frame_orig_2))
self.assertEqual(self.rt2.get_header_size(),32)
# When the field is new
self.assertEqual(self.rt1.get_size(),len(self.frame_orig_1))
self.assertEqual(self.rt1.get_header_size(),24)
self.assertEqual(self.rt1.get_dBm_ant_noise(),None)
self.rt1.set_dBm_ant_noise( signal=0xF1 )
self.assertEqual(self.rt1.get_dBm_ant_noise(),0xF1)
self.assertEqual(self.rt1.get_size(),len(self.frame_orig_1)+1)
self.assertEqual(self.rt1.get_header_size(),24+1)
def test_14_lock_quality_field(self):
'Test RadioTap Lock Quality getter/setter'
# TODO: When exist the field
# When the field is new
self.assertEqual(self.rt2.get_size(),len(self.frame_orig_2))
self.assertEqual(self.rt2.get_header_size(),32)
self.assertEqual(self.rt2.get_lock_quality(),None)
self.rt2.set_lock_quality(quality=0xABBA )
self.assertEqual(self.rt2.get_lock_quality(),0xABBA)
self.assertEqual(self.rt2.get_size(),len(self.frame_orig_2)+2)
self.assertEqual(self.rt2.get_header_size(),32+2)
def test_15_tx_attenuation_field(self):
'Test RadioTap Tx Attenuation getter/setter'
# TODO: When exist the field
# When the field is new
self.assertEqual(self.rt2.get_size(),len(self.frame_orig_2))
self.assertEqual(self.rt2.get_header_size(),32)
self.assertEqual(self.rt2.get_tx_attenuation(),None)
self.rt2.set_tx_attenuation(power=0xABBA )
self.assertEqual(self.rt2.get_tx_attenuation(),0xABBA)
self.assertEqual(self.rt2.get_size(),len(self.frame_orig_2)+2)
self.assertEqual(self.rt2.get_header_size(),32+2)
def test_16_dB_tx_attenuation_field(self):
'Test RadioTap dB Tx Attenuation getter/setter'
# TODO: When exist the field
# When the field is new
self.assertEqual(self.rt2.get_size(),len(self.frame_orig_2))
self.assertEqual(self.rt2.get_header_size(),32)
self.assertEqual(self.rt2.get_dB_tx_attenuation(),None)
self.rt2.set_dB_tx_attenuation(power=0xABBA )
self.assertEqual(self.rt2.get_dB_tx_attenuation(),0xABBA)
self.assertEqual(self.rt2.get_size(),len(self.frame_orig_2)+2)
self.assertEqual(self.rt2.get_header_size(),32+2)
def test_17_dBm_tx_power_field(self):
'Test RadioTap dBm Tx Power getter/setter'
# TODO: When exist the field
# When the field is new
self.assertEqual(self.rt2.get_size(),len(self.frame_orig_2))
self.assertEqual(self.rt2.get_header_size(),32)
self.assertEqual(self.rt2.get_dBm_tx_power(),None)
self.rt2.set_dBm_tx_power(power=-8)
self.assertEqual(self.rt2.get_dBm_tx_power(),-8)
self.assertEqual(self.rt2.get_size(),len(self.frame_orig_2)+1)
self.assertEqual(self.rt2.get_header_size(),32+1)
def test_18_antenna_field(self):
'Test RadioTap Antenna getter/setter'
# When exist the field
self.assertEqual(self.rt2.get_size(),len(self.frame_orig_2))
self.assertEqual(self.rt2.get_header_size(),32)
self.assertEqual(self.rt2.get_antenna(),0x02)
self.rt2.set_antenna( antenna_index=0xF1 )
self.assertEqual(self.rt2.get_antenna(),0xF1)
self.assertEqual(self.rt2.get_size(),len(self.frame_orig_2))
self.assertEqual(self.rt2.get_header_size(),32)
self.assertEqual(self.rt1.get_size(),len(self.frame_orig_1))
self.assertEqual(self.rt1.get_header_size(),24)
self.assertEqual(self.rt1.get_antenna(),0x00)
self.rt1.set_antenna( antenna_index=0xF1 )
self.assertEqual(self.rt1.get_antenna(),0xF1)
self.assertEqual(self.rt1.get_size(),len(self.frame_orig_1))
self.assertEqual(self.rt1.get_header_size(),24)
# TODO: When the field is new
def test_19_dB_ant_signal_field(self):
'Test RadioTap dB Antenna Signal getter/setter'
# When exist the field
self.assertEqual(self.rt1.get_size(),len(self.frame_orig_1))
self.assertEqual(self.rt1.get_header_size(),24)
self.assertEqual(self.rt1.get_dB_ant_signal(),0x1e)
self.rt1.set_dB_ant_signal( signal=0xF1 )
self.assertEqual(self.rt1.get_dB_ant_signal(),0xF1)
self.assertEqual(self.rt1.get_size(),len(self.frame_orig_1))
self.assertEqual(self.rt1.get_header_size(),24)
# When the field is new
self.assertEqual(self.rt2.get_size(),len(self.frame_orig_2))
self.assertEqual(self.rt2.get_header_size(),32)
self.assertEqual(self.rt2.get_dB_ant_signal(),None)
self.rt2.set_dB_ant_signal( signal=0xF1 )
self.assertEqual(self.rt2.get_dB_ant_signal(),0xF1)
self.assertEqual(self.rt2.get_size(),len(self.frame_orig_2)+1)
self.assertEqual(self.rt2.get_header_size(),32+1)
def test_20_dB_ant_noise_field(self):
'Test RadioTap dB Antenna Noise getter/setter'
# When the field is new
self.assertEqual(self.rt2.get_size(),len(self.frame_orig_2))
self.assertEqual(self.rt2.get_header_size(),32)
self.assertEqual(self.rt2.get_dB_ant_noise(),None)
self.rt2.set_dB_ant_noise( signal=0xF1 )
self.assertEqual(self.rt2.get_dB_ant_noise(),0xF1)
self.assertEqual(self.rt2.get_size(),len(self.frame_orig_2)+1)
self.assertEqual(self.rt2.get_header_size(),32+1)
self.assertEqual(self.rt1.get_size(),len(self.frame_orig_1))
self.assertEqual(self.rt1.get_header_size(),24)
self.assertEqual(self.rt1.get_dB_ant_noise(),None)
self.rt1.set_dB_ant_noise( signal=0xF1 )
self.assertEqual(self.rt1.get_dB_ant_noise(),0xF1)
self.assertEqual(self.rt1.get_size(),len(self.frame_orig_1)+1)
self.assertEqual(self.rt1.get_header_size(),24+1)
# TODO: When exist the field
## def test_21_rx_flags_field(self):
## 'Test RadioTap RX Flags getter/setter'
##
## # When the field is new
## self.assertEqual(self.rt2.get_size(),len(self.frame_orig_2))
## self.assertEqual(self.rt2.get_header_size(),32)
## self.assertEqual(self.rt2.get_rx_flags(),None)
## self.rt2.set_rx_flags( signal=0xABBA )
## self.assertEqual(self.rt2.get_rx_flags(),0xABBA)
## self.assertEqual(self.rt2.get_size(),len(self.frame_orig_2)+2)
## self.assertEqual(self.rt2.get_header_size(),32+2)
##
## # TODO: When exist the field
def test_22_FCS_in_header_field(self):
'Test RadioTap FCS in header getter/setter'
# When exist the field
self.assertEqual(self.rt1.get_size(),len(self.frame_orig_1))
self.assertEqual(self.rt1.get_header_size(),24)
self.assertEqual(self.rt1.get_FCS_in_header(),0x00000000)
self.rt1.set_FCS_in_header( fcs=0x89ABCDEF )
self.assertEqual(self.rt1.get_FCS_in_header(),0x89ABCDEF)
self.assertEqual(self.rt1.get_size(),len(self.frame_orig_1))
self.assertEqual(self.rt1.get_header_size(),24)
# When the field is new
self.assertEqual(self.rt2.get_size(),len(self.frame_orig_2))
self.assertEqual(self.rt2.get_header_size(),32)
self.assertEqual(self.rt2.get_FCS_in_header(),None)
self.rt2.set_FCS_in_header( fcs=0x89ABCDEF )
self.assertEqual(self.rt2.get_FCS_in_header(),0x89ABCDEF)
self.assertEqual(self.rt2.get_size(),len(self.frame_orig_2)+4)
self.assertEqual(self.rt2.get_header_size(),32+4)
## def test_23_rssi_field(self):
## 'Test RadioTap RSSI getter/setter'
##
## # When the field is new
## self.assertEqual(self.rt2.get_size(),len(self.frame_orig_2))
## self.assertEqual(self.rt2.get_header_size(),32)
## self.assertEqual(self.rt2.get_RSSI(),None)
## self.rt2.set_RSSI( rssi=0xBA, max_rssi=0xAB )
## self.assertEqual(self.rt2.get_RSSI(),( 0xBA, 0xAB))
## self.assertEqual(self.rt2.get_size(),len(self.frame_orig_2)+2)
## self.assertEqual(self.rt2.get_header_size(),32+2)
##
## # TODO: When exist the field
def test_24_RTS_retries_field(self):
'Test RadioTap RTS retries getter/setter'
# When the field is new
self.assertEqual(self.rt2.get_size(),len(self.frame_orig_2))
self.assertEqual(self.rt2.get_header_size(),32)
self.assertEqual(self.rt2.get_RTS_retries(),None)
self.rt2.set_RTS_retries( retries=0xBA )
self.assertEqual(self.rt2.get_RTS_retries(), 0xBA)
self.assertEqual(self.rt2.get_size(),len(self.frame_orig_2)+1)
self.assertEqual(self.rt2.get_header_size(),32+1)
# TODO: When exist the field
def test_25_tx_flags_field(self):
'Test RadioTap TX flags getter/setter'
# When the field is new
self.assertEqual(self.rt2.get_size(),len(self.frame_orig_2))
self.assertEqual(self.rt2.get_header_size(),32)
self.assertEqual(self.rt2.get_tx_flags(),None)
self.rt2.set_tx_flags( flags=0xABBA )
self.assertEqual(self.rt2.get_tx_flags(),0xABBA)
self.assertEqual(self.rt2.get_size(),len(self.frame_orig_2)+2)
self.assertEqual(self.rt2.get_header_size(),32+2)
# TODO: When exist the field
def test_26_xchannel_field(self):
'Test RadioTap xchannel getter/setter'
(ch_type,ch_freq,ch_num,ch_maxpower)=self.rt2.get_xchannel()
self.assertEqual(ch_type,0x00000140)
self.assertEqual(ch_freq,5180)
self.assertEqual(ch_num,36)
self.assertEqual(ch_maxpower,0x11)
(ch_type,ch_freq,ch_num,ch_maxpower)=(0x12345678, 1234, 12, 34)
self.rt2.set_xchannel(flags=ch_type, freq=ch_freq, channel=ch_num, maxpower=ch_maxpower)
(nch_type,nch_freq,nch_num,nch_maxpower)=self.rt2.get_xchannel()
self.assertEqual(ch_type,nch_type)
self.assertEqual(ch_freq,nch_freq)
self.assertEqual(ch_num,nch_num)
self.assertEqual(ch_maxpower,nch_maxpower)
def test_27_data_retries_field(self):
'Test RadioTap Data retries getter/setter'
# When the field is new
self.assertEqual(self.rt2.get_size(),len(self.frame_orig_2))
self.assertEqual(self.rt2.get_header_size(),32)
self.assertEqual(self.rt2.get_data_retries(),None)
self.rt2.set_data_retries( retries=0xAB )
self.assertEqual(self.rt2.get_data_retries(),0xAB)
self.assertEqual(self.rt2.get_size(),len(self.frame_orig_2)+1)
self.assertEqual(self.rt2.get_header_size(),32+1)
# TODO: When exist the field
## def test_28_hardware_queue_field(self):
## 'Test RadioTap Hardware Queue getter/setter'
##
## # When the field is new
## self.assertEqual(self.rt2.get_size(),len(self.frame_orig_2))
## self.assertEqual(self.rt2.get_header_size(),32)
## self.assertEqual(self.rt2.get_hardware_queue(),None)
## self.rt2.set_hardware_queue( queue=0xAB )
## self.assertEqual(self.rt2.get_hardware_queue(),0xAB)
## self.assertEqual(self.rt2.get_size(),len(self.frame_orig_2)+1)
## self.assertEqual(self.rt2.get_header_size(),32+1)
##
## # TODO: When exist the field
def test_29_radiotap_length_field(self):
'Test RadioTap header length field'
# RadioTap from scratch calling get_length() and then get_packet()
rt = RadioTap()
# 0x08 bytes is the minimal headers size:
# 1 byte Revision
# 1 byte pad
# 2 bytes header length
# 4 bytes present flags
self.assertEqual(rt.get_header_length(), 0x08)
raw_packet = rt.get_packet()
self.assertEqual(raw_packet, "\x00\x00\x08\x00\x00\x00\x00\x00")
# RadioTap from scratch without call to get_length()
raw_packet = RadioTap().get_packet()
self.assertEqual(raw_packet, "\x00\x00\x08\x00\x00\x00\x00\x00")
def test_30_radiotap_length_filed_with_payload(self):
'Test RadioTap header length field with payload'
# RadioTap from scratch calling get_length() and then get_packet()
rt = RadioTap()
self.assertEqual(rt.get_header_length(), 0x08)
data = Data("aa")
rt.contains(data)
self.assertEqual(rt.get_header_length(), 0x08) # The header length is the same
raw_packet = rt.get_packet()
self.assertEqual(raw_packet, "\x00\x00\x08\x00\x00\x00\x00\x00aa")
suite = unittest.TestLoader().loadTestsFromTestCase(TestRadioTap)
unittest.TextTestRunner(verbosity=2).run(suite)
| apache-2.0 |
bop/rango | lib/python2.7/site-packages/django/contrib/auth/tests/signals.py | 227 | 3278 | from django.contrib.auth import signals
from django.contrib.auth.models import User
from django.contrib.auth.tests.utils import skipIfCustomUser
from django.test import TestCase
from django.test.client import RequestFactory
from django.test.utils import override_settings
@skipIfCustomUser
@override_settings(USE_TZ=False, PASSWORD_HASHERS=('django.contrib.auth.hashers.SHA1PasswordHasher',))
class SignalTestCase(TestCase):
urls = 'django.contrib.auth.tests.urls'
fixtures = ['authtestdata.json']
def listener_login(self, user, **kwargs):
self.logged_in.append(user)
def listener_logout(self, user, **kwargs):
self.logged_out.append(user)
def listener_login_failed(self, sender, credentials, **kwargs):
self.login_failed.append(credentials)
def setUp(self):
"""Set up the listeners and reset the logged in/logged out counters"""
self.logged_in = []
self.logged_out = []
self.login_failed = []
signals.user_logged_in.connect(self.listener_login)
signals.user_logged_out.connect(self.listener_logout)
signals.user_login_failed.connect(self.listener_login_failed)
def tearDown(self):
"""Disconnect the listeners"""
signals.user_logged_in.disconnect(self.listener_login)
signals.user_logged_out.disconnect(self.listener_logout)
signals.user_login_failed.disconnect(self.listener_login_failed)
def test_login(self):
# Only a successful login will trigger the success signal.
self.client.login(username='testclient', password='bad')
self.assertEqual(len(self.logged_in), 0)
self.assertEqual(len(self.login_failed), 1)
self.assertEqual(self.login_failed[0]['username'], 'testclient')
# verify the password is cleansed
self.assertTrue('***' in self.login_failed[0]['password'])
# Like this:
self.client.login(username='testclient', password='password')
self.assertEqual(len(self.logged_in), 1)
self.assertEqual(self.logged_in[0].username, 'testclient')
# Ensure there were no more failures.
self.assertEqual(len(self.login_failed), 1)
def test_logout_anonymous(self):
# The log_out function will still trigger the signal for anonymous
# users.
self.client.get('/logout/next_page/')
self.assertEqual(len(self.logged_out), 1)
self.assertEqual(self.logged_out[0], None)
def test_logout(self):
self.client.login(username='testclient', password='password')
self.client.get('/logout/next_page/')
self.assertEqual(len(self.logged_out), 1)
self.assertEqual(self.logged_out[0].username, 'testclient')
def test_update_last_login(self):
"""Ensure that only `last_login` is updated in `update_last_login`"""
user = User.objects.get(pk=3)
old_last_login = user.last_login
user.username = "This username shouldn't get saved"
request = RequestFactory().get('/login')
signals.user_logged_in.send(sender=user.__class__, request=request,
user=user)
user = User.objects.get(pk=3)
self.assertEqual(user.username, 'staff')
self.assertNotEqual(user.last_login, old_last_login)
| gpl-2.0 |
jallohm/django | tests/model_meta/results.py | 254 | 27061 | from .models import AbstractPerson, BasePerson, Person, Relating, Relation
TEST_RESULTS = {
'get_all_field_names': {
Person: [
'baseperson_ptr',
'baseperson_ptr_id',
'content_type_abstract',
'content_type_abstract_id',
'content_type_base',
'content_type_base_id',
'content_type_concrete',
'content_type_concrete_id',
'data_abstract',
'data_base',
'data_inherited',
'data_not_concrete_abstract',
'data_not_concrete_base',
'data_not_concrete_inherited',
'fk_abstract',
'fk_abstract_id',
'fk_base',
'fk_base_id',
'fk_inherited',
'fk_inherited_id',
'followers_abstract',
'followers_base',
'followers_concrete',
'following_abstract',
'following_base',
'following_inherited',
'friends_abstract',
'friends_base',
'friends_inherited',
'generic_relation_abstract',
'generic_relation_base',
'generic_relation_concrete',
'id',
'm2m_abstract',
'm2m_base',
'm2m_inherited',
'object_id_abstract',
'object_id_base',
'object_id_concrete',
'relating_basepeople',
'relating_baseperson',
'relating_people',
'relating_person',
],
BasePerson: [
'content_type_abstract',
'content_type_abstract_id',
'content_type_base',
'content_type_base_id',
'data_abstract',
'data_base',
'data_not_concrete_abstract',
'data_not_concrete_base',
'fk_abstract',
'fk_abstract_id',
'fk_base',
'fk_base_id',
'followers_abstract',
'followers_base',
'following_abstract',
'following_base',
'friends_abstract',
'friends_base',
'generic_relation_abstract',
'generic_relation_base',
'id',
'm2m_abstract',
'm2m_base',
'object_id_abstract',
'object_id_base',
'person',
'relating_basepeople',
'relating_baseperson'
],
AbstractPerson: [
'content_type_abstract',
'content_type_abstract_id',
'data_abstract',
'data_not_concrete_abstract',
'fk_abstract',
'fk_abstract_id',
'following_abstract',
'friends_abstract',
'generic_relation_abstract',
'm2m_abstract',
'object_id_abstract',
],
Relating: [
'basepeople',
'basepeople_hidden',
'baseperson',
'baseperson_hidden',
'baseperson_hidden_id',
'baseperson_id',
'id',
'people',
'people_hidden',
'person',
'person_hidden',
'person_hidden_id',
'person_id',
'proxyperson',
'proxyperson_hidden',
'proxyperson_hidden_id',
'proxyperson_id',
],
},
'fields': {
Person: [
'id',
'data_abstract',
'fk_abstract_id',
'data_not_concrete_abstract',
'content_type_abstract_id',
'object_id_abstract',
'data_base',
'fk_base_id',
'data_not_concrete_base',
'content_type_base_id',
'object_id_base',
'baseperson_ptr_id',
'data_inherited',
'fk_inherited_id',
'data_not_concrete_inherited',
'content_type_concrete_id',
'object_id_concrete',
],
BasePerson: [
'id',
'data_abstract',
'fk_abstract_id',
'data_not_concrete_abstract',
'content_type_abstract_id',
'object_id_abstract',
'data_base',
'fk_base_id',
'data_not_concrete_base',
'content_type_base_id',
'object_id_base',
],
AbstractPerson: [
'data_abstract',
'fk_abstract_id',
'data_not_concrete_abstract',
'content_type_abstract_id',
'object_id_abstract',
],
Relating: [
'id',
'baseperson_id',
'baseperson_hidden_id',
'person_id',
'person_hidden_id',
'proxyperson_id',
'proxyperson_hidden_id',
],
},
'local_fields': {
Person: [
'baseperson_ptr_id',
'data_inherited',
'fk_inherited_id',
'data_not_concrete_inherited',
'content_type_concrete_id',
'object_id_concrete',
],
BasePerson: [
'id',
'data_abstract',
'fk_abstract_id',
'data_not_concrete_abstract',
'content_type_abstract_id',
'object_id_abstract',
'data_base',
'fk_base_id',
'data_not_concrete_base',
'content_type_base_id',
'object_id_base',
],
AbstractPerson: [
'data_abstract',
'fk_abstract_id',
'data_not_concrete_abstract',
'content_type_abstract_id',
'object_id_abstract',
],
Relating: [
'id',
'baseperson_id',
'baseperson_hidden_id',
'person_id',
'person_hidden_id',
'proxyperson_id',
'proxyperson_hidden_id',
],
},
'local_concrete_fields': {
Person: [
'baseperson_ptr_id',
'data_inherited',
'fk_inherited_id',
'content_type_concrete_id',
'object_id_concrete',
],
BasePerson: [
'id',
'data_abstract',
'fk_abstract_id',
'content_type_abstract_id',
'object_id_abstract',
'data_base',
'fk_base_id',
'content_type_base_id',
'object_id_base',
],
AbstractPerson: [
'data_abstract',
'fk_abstract_id',
'content_type_abstract_id',
'object_id_abstract',
],
Relating: [
'id',
'baseperson_id',
'baseperson_hidden_id',
'person_id',
'person_hidden_id',
'proxyperson_id',
'proxyperson_hidden_id',
],
},
'many_to_many': {
Person: [
'm2m_abstract',
'friends_abstract',
'following_abstract',
'm2m_base',
'friends_base',
'following_base',
'm2m_inherited',
'friends_inherited',
'following_inherited',
],
BasePerson: [
'm2m_abstract',
'friends_abstract',
'following_abstract',
'm2m_base',
'friends_base',
'following_base',
],
AbstractPerson: [
'm2m_abstract',
'friends_abstract',
'following_abstract',
],
Relating: [
'basepeople',
'basepeople_hidden',
'people',
'people_hidden',
],
},
'many_to_many_with_model': {
Person: [
BasePerson,
BasePerson,
BasePerson,
BasePerson,
BasePerson,
BasePerson,
None,
None,
None,
],
BasePerson: [
None,
None,
None,
None,
None,
None,
],
AbstractPerson: [
None,
None,
None,
],
Relating: [
None,
None,
None,
None,
],
},
'get_all_related_objects_with_model_legacy': {
Person: (
('relating_baseperson', BasePerson),
('relating_person', None),
),
BasePerson: (
('person', None),
('relating_baseperson', None),
),
Relation: (
('fk_abstract_rel', None),
('fo_abstract_rel', None),
('fk_base_rel', None),
('fo_base_rel', None),
('fk_concrete_rel', None),
('fo_concrete_rel', None),
),
},
'get_all_related_objects_with_model_hidden_local': {
Person: (
('+', None),
('_relating_people_hidden_+', None),
('Person_following_inherited+', None),
('Person_following_inherited+', None),
('Person_friends_inherited+', None),
('Person_friends_inherited+', None),
('Person_m2m_inherited+', None),
('Relating_people+', None),
('Relating_people_hidden+', None),
('followers_concrete', None),
('friends_inherited_rel_+', None),
('relating_people', None),
('relating_person', None),
),
BasePerson: (
('+', None),
('_relating_basepeople_hidden_+', None),
('BasePerson_following_abstract+', None),
('BasePerson_following_abstract+', None),
('BasePerson_following_base+', None),
('BasePerson_following_base+', None),
('BasePerson_friends_abstract+', None),
('BasePerson_friends_abstract+', None),
('BasePerson_friends_base+', None),
('BasePerson_friends_base+', None),
('BasePerson_m2m_abstract+', None),
('BasePerson_m2m_base+', None),
('Relating_basepeople+', None),
('Relating_basepeople_hidden+', None),
('followers_abstract', None),
('followers_base', None),
('friends_abstract_rel_+', None),
('friends_base_rel_+', None),
('person', None),
('relating_basepeople', None),
('relating_baseperson', None),
),
Relation: (
('+', None),
('+', None),
('+', None),
('+', None),
('+', None),
('+', None),
('+', None),
('+', None),
('BasePerson_m2m_abstract+', None),
('BasePerson_m2m_base+', None),
('Person_m2m_inherited+', None),
('fk_abstract_rel', None),
('fk_base_rel', None),
('fk_concrete_rel', None),
('fo_abstract_rel', None),
('fo_base_rel', None),
('fo_concrete_rel', None),
('m2m_abstract_rel', None),
('m2m_base_rel', None),
('m2m_concrete_rel', None),
),
},
'get_all_related_objects_with_model_hidden': {
Person: (
('+', BasePerson),
('+', None),
('_relating_basepeople_hidden_+', BasePerson),
('_relating_people_hidden_+', None),
('BasePerson_following_abstract+', BasePerson),
('BasePerson_following_abstract+', BasePerson),
('BasePerson_following_base+', BasePerson),
('BasePerson_following_base+', BasePerson),
('BasePerson_friends_abstract+', BasePerson),
('BasePerson_friends_abstract+', BasePerson),
('BasePerson_friends_base+', BasePerson),
('BasePerson_friends_base+', BasePerson),
('BasePerson_m2m_abstract+', BasePerson),
('BasePerson_m2m_base+', BasePerson),
('Person_following_inherited+', None),
('Person_following_inherited+', None),
('Person_friends_inherited+', None),
('Person_friends_inherited+', None),
('Person_m2m_inherited+', None),
('Relating_basepeople+', BasePerson),
('Relating_basepeople_hidden+', BasePerson),
('Relating_people+', None),
('Relating_people_hidden+', None),
('followers_abstract', BasePerson),
('followers_base', BasePerson),
('followers_concrete', None),
('friends_abstract_rel_+', BasePerson),
('friends_base_rel_+', BasePerson),
('friends_inherited_rel_+', None),
('relating_basepeople', BasePerson),
('relating_baseperson', BasePerson),
('relating_people', None),
('relating_person', None),
),
BasePerson: (
('+', None),
('_relating_basepeople_hidden_+', None),
('BasePerson_following_abstract+', None),
('BasePerson_following_abstract+', None),
('BasePerson_following_base+', None),
('BasePerson_following_base+', None),
('BasePerson_friends_abstract+', None),
('BasePerson_friends_abstract+', None),
('BasePerson_friends_base+', None),
('BasePerson_friends_base+', None),
('BasePerson_m2m_abstract+', None),
('BasePerson_m2m_base+', None),
('Relating_basepeople+', None),
('Relating_basepeople_hidden+', None),
('followers_abstract', None),
('followers_base', None),
('friends_abstract_rel_+', None),
('friends_base_rel_+', None),
('person', None),
('relating_basepeople', None),
('relating_baseperson', None),
),
Relation: (
('+', None),
('+', None),
('+', None),
('+', None),
('+', None),
('+', None),
('+', None),
('+', None),
('BasePerson_m2m_abstract+', None),
('BasePerson_m2m_base+', None),
('Person_m2m_inherited+', None),
('fk_abstract_rel', None),
('fk_base_rel', None),
('fk_concrete_rel', None),
('fo_abstract_rel', None),
('fo_base_rel', None),
('fo_concrete_rel', None),
('m2m_abstract_rel', None),
('m2m_base_rel', None),
('m2m_concrete_rel', None),
),
},
'get_all_related_objects_with_model_local': {
Person: (
('followers_concrete', None),
('relating_person', None),
('relating_people', None),
),
BasePerson: (
('followers_abstract', None),
('followers_base', None),
('person', None),
('relating_baseperson', None),
('relating_basepeople', None),
),
Relation: (
('fk_abstract_rel', None),
('fo_abstract_rel', None),
('fk_base_rel', None),
('fo_base_rel', None),
('m2m_abstract_rel', None),
('m2m_base_rel', None),
('fk_concrete_rel', None),
('fo_concrete_rel', None),
('m2m_concrete_rel', None),
),
},
'get_all_related_objects_with_model': {
Person: (
('followers_abstract', BasePerson),
('followers_base', BasePerson),
('relating_baseperson', BasePerson),
('relating_basepeople', BasePerson),
('followers_concrete', None),
('relating_person', None),
('relating_people', None),
),
BasePerson: (
('followers_abstract', None),
('followers_base', None),
('person', None),
('relating_baseperson', None),
('relating_basepeople', None),
),
Relation: (
('fk_abstract_rel', None),
('fo_abstract_rel', None),
('fk_base_rel', None),
('fo_base_rel', None),
('m2m_abstract_rel', None),
('m2m_base_rel', None),
('fk_concrete_rel', None),
('fo_concrete_rel', None),
('m2m_concrete_rel', None),
),
},
'get_all_related_objects_with_model_local_legacy': {
Person: (
('relating_person', None),
),
BasePerson: (
('person', None),
('relating_baseperson', None)
),
Relation: (
('fk_abstract_rel', None),
('fo_abstract_rel', None),
('fk_base_rel', None),
('fo_base_rel', None),
('fk_concrete_rel', None),
('fo_concrete_rel', None),
),
},
'get_all_related_objects_with_model_hidden_legacy': {
BasePerson: (
('+', None),
('BasePerson_following_abstract+', None),
('BasePerson_following_abstract+', None),
('BasePerson_following_base+', None),
('BasePerson_following_base+', None),
('BasePerson_friends_abstract+', None),
('BasePerson_friends_abstract+', None),
('BasePerson_friends_base+', None),
('BasePerson_friends_base+', None),
('BasePerson_m2m_abstract+', None),
('BasePerson_m2m_base+', None),
('Relating_basepeople+', None),
('Relating_basepeople_hidden+', None),
('person', None),
('relating_baseperson', None),
),
Person: (
('+', BasePerson),
('+', None),
('BasePerson_following_abstract+', BasePerson),
('BasePerson_following_abstract+', BasePerson),
('BasePerson_following_base+', BasePerson),
('BasePerson_following_base+', BasePerson),
('BasePerson_friends_abstract+', BasePerson),
('BasePerson_friends_abstract+', BasePerson),
('BasePerson_friends_base+', BasePerson),
('BasePerson_friends_base+', BasePerson),
('BasePerson_m2m_abstract+', BasePerson),
('BasePerson_m2m_base+', BasePerson),
('Person_following_inherited+', None),
('Person_following_inherited+', None),
('Person_friends_inherited+', None),
('Person_friends_inherited+', None),
('Person_m2m_inherited+', None),
('Relating_basepeople+', BasePerson),
('Relating_basepeople_hidden+', BasePerson),
('Relating_people+', None),
('Relating_people_hidden+', None),
('relating_baseperson', BasePerson),
('relating_person', None),
),
Relation: (
('+', None),
('+', None),
('+', None),
('+', None),
('+', None),
('+', None),
('+', None),
('+', None),
('BasePerson_m2m_abstract+', None),
('BasePerson_m2m_base+', None),
('Person_m2m_inherited+', None),
('fk_abstract_rel', None),
('fk_base_rel', None),
('fk_concrete_rel', None),
('fo_abstract_rel', None),
('fo_base_rel', None),
('fo_concrete_rel', None),
),
},
'get_all_related_objects_with_model_hidden_local_legacy': {
BasePerson: (
('+', None),
('BasePerson_following_abstract+', None),
('BasePerson_following_abstract+', None),
('BasePerson_following_base+', None),
('BasePerson_following_base+', None),
('BasePerson_friends_abstract+', None),
('BasePerson_friends_abstract+', None),
('BasePerson_friends_base+', None),
('BasePerson_friends_base+', None),
('BasePerson_m2m_abstract+', None),
('BasePerson_m2m_base+', None),
('Relating_basepeople+', None),
('Relating_basepeople_hidden+', None),
('person', None),
('relating_baseperson', None),
),
Person: (
('+', None),
('Person_following_inherited+', None),
('Person_following_inherited+', None),
('Person_friends_inherited+', None),
('Person_friends_inherited+', None),
('Person_m2m_inherited+', None),
('Relating_people+', None),
('Relating_people_hidden+', None),
('relating_person', None),
),
Relation: (
('+', None),
('+', None),
('+', None),
('+', None),
('+', None),
('+', None),
('+', None),
('+', None),
('BasePerson_m2m_abstract+', None),
('BasePerson_m2m_base+', None),
('Person_m2m_inherited+', None),
('fk_abstract_rel', None),
('fk_base_rel', None),
('fk_concrete_rel', None),
('fo_abstract_rel', None),
('fo_base_rel', None),
('fo_concrete_rel', None),
),
},
'get_all_related_objects_with_model_proxy_legacy': {
BasePerson: (
('person', None),
('relating_baseperson', None),
),
Person: (
('relating_baseperson', BasePerson),
('relating_person', None), ('relating_proxyperson', None),
),
Relation: (
('fk_abstract_rel', None), ('fo_abstract_rel', None),
('fk_base_rel', None), ('fo_base_rel', None),
('fk_concrete_rel', None), ('fo_concrete_rel', None),
),
},
'get_all_related_objects_with_model_proxy_hidden_legacy': {
BasePerson: (
('+', None),
('BasePerson_following_abstract+', None),
('BasePerson_following_abstract+', None),
('BasePerson_following_base+', None),
('BasePerson_following_base+', None),
('BasePerson_friends_abstract+', None),
('BasePerson_friends_abstract+', None),
('BasePerson_friends_base+', None),
('BasePerson_friends_base+', None),
('BasePerson_m2m_abstract+', None),
('BasePerson_m2m_base+', None),
('Relating_basepeople+', None),
('Relating_basepeople_hidden+', None),
('person', None),
('relating_baseperson', None),
),
Person: (
('+', BasePerson),
('+', None),
('+', None),
('BasePerson_following_abstract+', BasePerson),
('BasePerson_following_abstract+', BasePerson),
('BasePerson_following_base+', BasePerson),
('BasePerson_following_base+', BasePerson),
('BasePerson_friends_abstract+', BasePerson),
('BasePerson_friends_abstract+', BasePerson),
('BasePerson_friends_base+', BasePerson),
('BasePerson_friends_base+', BasePerson),
('BasePerson_m2m_abstract+', BasePerson),
('BasePerson_m2m_base+', BasePerson),
('Person_following_inherited+', None),
('Person_following_inherited+', None),
('Person_friends_inherited+', None),
('Person_friends_inherited+', None),
('Person_m2m_inherited+', None),
('Relating_basepeople+', BasePerson),
('Relating_basepeople_hidden+', BasePerson),
('Relating_people+', None),
('Relating_people_hidden+', None),
('relating_baseperson', BasePerson),
('relating_person', None),
('relating_proxyperson', None),
),
Relation: (
('+', None),
('+', None),
('+', None),
('+', None),
('+', None),
('+', None),
('+', None),
('+', None),
('BasePerson_m2m_abstract+', None),
('BasePerson_m2m_base+', None),
('Person_m2m_inherited+', None),
('fk_abstract_rel', None),
('fk_base_rel', None),
('fk_concrete_rel', None),
('fo_abstract_rel', None),
('fo_base_rel', None),
('fo_concrete_rel', None),
),
},
'get_all_related_many_to_many_with_model_legacy': {
BasePerson: (
('friends_abstract_rel_+', None),
('followers_abstract', None),
('friends_base_rel_+', None),
('followers_base', None),
('relating_basepeople', None),
('_relating_basepeople_hidden_+', None),
),
Person: (
('friends_abstract_rel_+', BasePerson),
('followers_abstract', BasePerson),
('friends_base_rel_+', BasePerson),
('followers_base', BasePerson),
('relating_basepeople', BasePerson),
('_relating_basepeople_hidden_+', BasePerson),
('friends_inherited_rel_+', None),
('followers_concrete', None),
('relating_people', None),
('_relating_people_hidden_+', None),
),
Relation: (
('m2m_abstract_rel', None),
('m2m_base_rel', None),
('m2m_concrete_rel', None),
),
},
'get_all_related_many_to_many_local_legacy': {
BasePerson: [
'friends_abstract_rel_+',
'followers_abstract',
'friends_base_rel_+',
'followers_base',
'relating_basepeople',
'_relating_basepeople_hidden_+',
],
Person: [
'friends_inherited_rel_+',
'followers_concrete',
'relating_people',
'_relating_people_hidden_+',
],
Relation: [
'm2m_abstract_rel',
'm2m_base_rel',
'm2m_concrete_rel',
],
},
'virtual_fields': {
AbstractPerson: [
'generic_relation_abstract',
'content_object_abstract',
],
BasePerson: [
'generic_relation_base',
'content_object_base',
'generic_relation_abstract',
'content_object_abstract',
],
Person: [
'content_object_concrete',
'generic_relation_concrete',
'generic_relation_base',
'content_object_base',
'generic_relation_abstract',
'content_object_abstract',
],
},
'labels': {
AbstractPerson: 'model_meta.AbstractPerson',
BasePerson: 'model_meta.BasePerson',
Person: 'model_meta.Person',
Relating: 'model_meta.Relating',
},
'lower_labels': {
AbstractPerson: 'model_meta.abstractperson',
BasePerson: 'model_meta.baseperson',
Person: 'model_meta.person',
Relating: 'model_meta.relating',
},
}
| bsd-3-clause |
spacewalkproject/spacewalk | backend/server/test/TestProxy.py | 14 | 1090 | #
# Copyright (c) 2008--2015 Red Hat, Inc.
#
# This software is licensed to you under the GNU General Public License,
# version 2 (GPLv2). There is NO WARRANTY for this software, express or
# implied, including the implied warranties of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. You should have received a copy of GPLv2
# along with this software; if not, see
# http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
#
# Red Hat trademarks are not licensed under GPLv2. No permission is
# granted to use or replicate Red Hat trademarks that are incorporated
# in this software or its documentation.
#
import TestServer
import server.redhat_xmlrpc.downloads
class TestProxy(TestServer.TestServer):
def __init__(self):
TestServer.TestServer.__init__(self)
self._init_redhat_xmlrpc_downloads()
def _init_redhat_xmlrpc_downloads(self):
self.downloads = server.redhat_xmlrpc.downloads.Downloads()
def getDownloads(self):
return self.downloads
if __name__ == "__main__":
server = TestProxy()
downloads = server.getDownloads()
| gpl-2.0 |
Ishiihara/kafka | tests/kafkatest/services/replica_verification_tool.py | 15 | 4103 | # Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ducktape.services.background_thread import BackgroundThreadService
from kafkatest.directory_layout.kafka_path import KafkaPathResolverMixin
from kafkatest.services.security.security_config import SecurityConfig
import re
class ReplicaVerificationTool(KafkaPathResolverMixin, BackgroundThreadService):
logs = {
"producer_log": {
"path": "/mnt/replica_verification_tool.log",
"collect_default": False}
}
def __init__(self, context, num_nodes, kafka, topic, report_interval_ms, security_protocol="PLAINTEXT", stop_timeout_sec=30):
super(ReplicaVerificationTool, self).__init__(context, num_nodes)
self.kafka = kafka
self.topic = topic
self.report_interval_ms = report_interval_ms
self.security_protocol = security_protocol
self.security_config = SecurityConfig(self.context, security_protocol)
self.partition_lag = {}
self.stop_timeout_sec = stop_timeout_sec
def _worker(self, idx, node):
cmd = self.start_cmd(node)
self.logger.debug("ReplicaVerificationTool %d command: %s" % (idx, cmd))
self.security_config.setup_node(node)
for line in node.account.ssh_capture(cmd):
self.logger.debug("Parsing line:{}".format(line))
parsed = re.search('.*max lag is (.+?) for partition ([a-zA-Z0-9._-]+-[0-9]+) at', line)
if parsed:
lag = int(parsed.group(1))
topic_partition = parsed.group(2)
self.logger.debug("Setting max lag for {} as {}".format(topic_partition, lag))
self.partition_lag[topic_partition] = lag
def get_lag_for_partition(self, topic, partition):
"""
Get latest lag for given topic-partition
Args:
topic: a topic
partition: a partition of the topic
"""
topic_partition = topic + '-' + str(partition)
lag = self.partition_lag.get(topic_partition, -1)
self.logger.debug("Returning lag for {} as {}".format(topic_partition, lag))
return lag
def start_cmd(self, node):
cmd = self.path.script("kafka-run-class.sh", node)
cmd += " %s" % self.java_class_name()
cmd += " --broker-list %s --topic-white-list %s --time -2 --report-interval-ms %s" % (self.kafka.bootstrap_servers(self.security_protocol), self.topic, self.report_interval_ms)
cmd += " 2>> /mnt/replica_verification_tool.log | tee -a /mnt/replica_verification_tool.log &"
return cmd
def stop_node(self, node):
node.account.kill_java_processes(self.java_class_name(), clean_shutdown=True,
allow_fail=True)
stopped = self.wait_node(node, timeout_sec=self.stop_timeout_sec)
assert stopped, "Node %s: did not stop within the specified timeout of %s seconds" % \
(str(node.account), str(self.stop_timeout_sec))
def clean_node(self, node):
node.account.kill_java_processes(self.java_class_name(), clean_shutdown=False,
allow_fail=True)
node.account.ssh("rm -rf /mnt/replica_verification_tool.log", allow_fail=False)
def java_class_name(self):
return "kafka.tools.ReplicaVerificationTool"
| apache-2.0 |
thunderhoser/GewitterGefahr | gewittergefahr/scripts/run_bwo_for_swirlnet.py | 1 | 11719 | """Runs backwards optimization on Swirlnet model."""
import os
os.environ['CUDA_VISIBLE_DEVICES'] = '-1'
import argparse
import numpy
from keras import backend as K
from keras.models import load_model as load_keras_model
from gewittergefahr.deep_learning import model_interpretation
from gewittergefahr.deep_learning import backwards_optimization as backwards_opt
# random.seed(6695)
# numpy.random.seed(6695)
K.set_session(K.tf.Session(config=K.tf.ConfigProto(
intra_op_parallelism_threads=1, inter_op_parallelism_threads=1,
allow_soft_placement=False
)))
SEPARATOR_STRING = '\n\n' + '*' * 50 + '\n\n'
SWIRLNET_FIELD_MEANS = numpy.array([20.745745, -0.718525, 1.929636])
SWIRLNET_FIELD_STANDARD_DEVIATIONS = numpy.array([
17.947071, 4.343980, 4.969537
])
CLASS_COMPONENT_TYPE_STRING = model_interpretation.CLASS_COMPONENT_TYPE_STRING
NEURON_COMPONENT_TYPE_STRING = model_interpretation.NEURON_COMPONENT_TYPE_STRING
CHANNEL_COMPONENT_TYPE_STRING = (
model_interpretation.CHANNEL_COMPONENT_TYPE_STRING)
MODEL_FILE_ARG_NAME = 'input_model_file_name'
INIT_FUNCTION_ARG_NAME = 'init_function_name'
COMPONENT_TYPE_ARG_NAME = 'component_type_string'
TARGET_CLASS_ARG_NAME = 'target_class'
LAYER_NAME_ARG_NAME = 'layer_name'
NEURON_INDICES_ARG_NAME = 'neuron_indices'
CHANNEL_INDEX_ARG_NAME = 'channel_index'
IDEAL_ACTIVATION_ARG_NAME = 'ideal_activation'
NUM_ITERATIONS_ARG_NAME = 'num_iterations'
LEARNING_RATE_ARG_NAME = 'learning_rate'
OUTPUT_FILE_ARG_NAME = 'output_file_name'
MODEL_FILE_HELP_STRING = (
'Path to file with trained Swirlnet model. Will be read by '
'`keras.models.load_model`.')
INIT_FUNCTION_HELP_STRING = (
'Initialization function (used to create initial input matrices for '
'gradient descent). Must be accepted by '
'`backwards_opt.check_init_function`.')
COMPONENT_HELP_STRING = (
'Determines model component for which activation will be maximized. See '
'`model_interpretation.check_component_metadata` for details.')
IDEAL_ACTIVATION_HELP_STRING = (
'[used only if {0:s} = "{1:s}" or "{2:s}"] See '
'`backwards_opt.optimize_input_for_neuron` or '
'`backwards_opt.optimize_input_for_channel` for details.'
).format(COMPONENT_TYPE_ARG_NAME, NEURON_COMPONENT_TYPE_STRING,
CLASS_COMPONENT_TYPE_STRING)
NUM_ITERATIONS_HELP_STRING = 'Number of iterations for backwards optimization.'
LEARNING_RATE_HELP_STRING = 'Learning rate for backwards optimization.'
OUTPUT_FILE_HELP_STRING = (
'Path to output file (will be written by '
'`backwards_opt.write_standard_file`).')
INPUT_ARG_PARSER = argparse.ArgumentParser()
INPUT_ARG_PARSER.add_argument(
'--' + MODEL_FILE_ARG_NAME, type=str, required=True,
help=MODEL_FILE_HELP_STRING)
INPUT_ARG_PARSER.add_argument(
'--' + INIT_FUNCTION_ARG_NAME, type=str, required=False,
default=backwards_opt.CONSTANT_INIT_FUNCTION_NAME,
help=INIT_FUNCTION_HELP_STRING)
INPUT_ARG_PARSER.add_argument(
'--' + COMPONENT_TYPE_ARG_NAME, type=str, required=True,
help=COMPONENT_HELP_STRING)
INPUT_ARG_PARSER.add_argument(
'--' + TARGET_CLASS_ARG_NAME, type=int, required=False, default=-1,
help=COMPONENT_HELP_STRING)
INPUT_ARG_PARSER.add_argument(
'--' + LAYER_NAME_ARG_NAME, type=str, required=False, default='',
help=COMPONENT_HELP_STRING)
INPUT_ARG_PARSER.add_argument(
'--' + NEURON_INDICES_ARG_NAME, type=int, nargs='+', required=False,
default=[-1], help=COMPONENT_HELP_STRING)
INPUT_ARG_PARSER.add_argument(
'--' + CHANNEL_INDEX_ARG_NAME, type=int, required=False, default=-1,
help=COMPONENT_HELP_STRING)
INPUT_ARG_PARSER.add_argument(
'--' + IDEAL_ACTIVATION_ARG_NAME, type=float, required=False,
default=backwards_opt.DEFAULT_IDEAL_ACTIVATION,
help=IDEAL_ACTIVATION_HELP_STRING)
INPUT_ARG_PARSER.add_argument(
'--' + NUM_ITERATIONS_ARG_NAME, type=int, required=False,
default=backwards_opt.DEFAULT_NUM_ITERATIONS,
help=NUM_ITERATIONS_HELP_STRING)
INPUT_ARG_PARSER.add_argument(
'--' + LEARNING_RATE_ARG_NAME, type=float, required=False,
default=backwards_opt.DEFAULT_LEARNING_RATE,
help=LEARNING_RATE_HELP_STRING)
INPUT_ARG_PARSER.add_argument(
'--' + OUTPUT_FILE_ARG_NAME, type=str, required=True,
help=OUTPUT_FILE_HELP_STRING)
def _brier_score_keras(observation_tensor, class_probability_tensor):
"""Returns Brier score.
E = number of examples
K = number of target classes
:param observation_tensor: E-by-K tensor of observed classes. If
observation_tensor[i, k] = 1, the [i]th example belongs to the [k]th
class.
:param class_probability_tensor: E-by-K tensor of forecast probabilities.
class_probability_tensor[i, k] = forecast probability that the [i]th
example belongs to the [k]th class.
:return: brier_score: Brier score.
"""
return K.mean((class_probability_tensor - observation_tensor) ** 2)
def _brier_skill_score_keras(observation_tensor, class_probability_tensor):
"""Returns Brier skill score.
:param observation_tensor: See doc for `brier_score_keras`.
:param class_probability_tensor: Same.
:return: brier_skill_score: Brier skill score.
"""
uncertainty_tensor = K.mean(
(observation_tensor - K.mean(observation_tensor)) ** 2
)
return (
1. -
_brier_score_keras(observation_tensor, class_probability_tensor) /
uncertainty_tensor
)
def _denormalize_data(input_matrix):
"""Denormalizes Swirlnet input data.
E = number of examples
M = number of rows in grid
M = number of columns in grid
F = number of radar fields
:param input_matrix: E-by-M-by-N-by-F numpy array (normalized).
:return: input_matrix: Same as input but denormalized.
"""
num_fields = input_matrix.shape[-1]
for j in range(num_fields):
input_matrix[..., j] = (
SWIRLNET_FIELD_MEANS[j] +
input_matrix[..., j] * SWIRLNET_FIELD_STANDARD_DEVIATIONS[j]
)
return input_matrix
def _create_initializer(init_function_name):
"""Creates initializer function.
:param init_function_name: See documentation at top of file.
:return: init_function: Initializer function.
"""
if init_function_name == backwards_opt.CONSTANT_INIT_FUNCTION_NAME:
return backwards_opt.create_constant_initializer(0.)
if init_function_name == backwards_opt.UNIFORM_INIT_FUNCTION_NAME:
return backwards_opt.create_uniform_random_initializer(
min_value=-1., max_value=1.)
return backwards_opt.create_gaussian_initializer(
mean=0., standard_deviation=1.)
def _run(model_file_name, init_function_name, component_type_string,
target_class, layer_name, neuron_indices, channel_index,
ideal_activation, num_iterations, learning_rate, output_file_name):
"""Runs backwards optimization on a trained CNN.
This is effectively the main method.
:param model_file_name: See documentation at top of file.
:param init_function_name: Same.
:param component_type_string: Same.
:param target_class: Same.
:param layer_name: Same.
:param neuron_indices: Same.
:param channel_index: Same.
:param ideal_activation: Same.
:param num_iterations: Same.
:param learning_rate: Same.
:param output_file_name: Same.
"""
model_interpretation.check_component_type(component_type_string)
if ideal_activation <= 0:
ideal_activation = None
print('Reading model from: "{0:s}"...'.format(model_file_name))
model_object = load_keras_model(
model_file_name,
custom_objects={'brier_skill_score_keras': _brier_skill_score_keras}
)
init_function = _create_initializer(init_function_name)
print(SEPARATOR_STRING)
if component_type_string == CLASS_COMPONENT_TYPE_STRING:
print('Optimizing image for target class {0:d}...'.format(target_class))
result_dict = backwards_opt.optimize_input_for_class(
model_object=model_object, target_class=target_class,
init_function_or_matrices=init_function,
num_iterations=num_iterations, learning_rate=learning_rate)
elif component_type_string == NEURON_COMPONENT_TYPE_STRING:
print('Optimizing image for neuron {0:s} in layer "{1:s}"...'.format(
str(neuron_indices), layer_name
))
result_dict = backwards_opt.optimize_input_for_neuron(
model_object=model_object, layer_name=layer_name,
neuron_indices=neuron_indices,
init_function_or_matrices=init_function,
num_iterations=num_iterations, learning_rate=learning_rate,
ideal_activation=ideal_activation)
else:
print('Optimizing image for channel {0:d} in layer "{1:s}"...'.format(
channel_index, layer_name))
result_dict = backwards_opt.optimize_input_for_channel(
model_object=model_object, layer_name=layer_name,
channel_index=channel_index,
init_function_or_matrices=init_function,
stat_function_for_neuron_activations=K.max,
num_iterations=num_iterations, learning_rate=learning_rate,
ideal_activation=ideal_activation)
print(SEPARATOR_STRING)
initial_activations = numpy.array([
result_dict[backwards_opt.INITIAL_ACTIVATION_KEY]
])
final_activations = numpy.array([
result_dict[backwards_opt.FINAL_ACTIVATION_KEY]
])
print('Denormalizing input and output (optimized) example...')
denorm_input_matrix = _denormalize_data(
result_dict[backwards_opt.NORM_INPUT_MATRICES_KEY]
)
denorm_output_matrix = _denormalize_data(
result_dict[backwards_opt.NORM_OUTPUT_MATRICES_KEY]
)
print('Writing results to: "{0:s}"...'.format(output_file_name))
bwo_metadata_dict = backwards_opt.check_metadata(
component_type_string=component_type_string,
num_iterations=num_iterations, learning_rate=learning_rate,
target_class=target_class, layer_name=layer_name,
ideal_activation=ideal_activation, neuron_indices=neuron_indices,
channel_index=channel_index, l2_weight=None,
radar_constraint_weight=None,
minmax_constraint_weight=None)
backwards_opt.write_standard_file(
pickle_file_name=output_file_name,
denorm_input_matrices=[denorm_input_matrix],
denorm_output_matrices=[denorm_output_matrix],
initial_activations=initial_activations,
final_activations=final_activations, model_file_name=model_file_name,
metadata_dict=bwo_metadata_dict, full_storm_id_strings=None,
storm_times_unix_sec=None, sounding_pressure_matrix_pa=None)
if __name__ == '__main__':
INPUT_ARG_OBJECT = INPUT_ARG_PARSER.parse_args()
_run(
model_file_name=getattr(INPUT_ARG_OBJECT, MODEL_FILE_ARG_NAME),
init_function_name=getattr(INPUT_ARG_OBJECT, INIT_FUNCTION_ARG_NAME),
component_type_string=getattr(
INPUT_ARG_OBJECT, COMPONENT_TYPE_ARG_NAME),
target_class=getattr(INPUT_ARG_OBJECT, TARGET_CLASS_ARG_NAME),
layer_name=getattr(INPUT_ARG_OBJECT, LAYER_NAME_ARG_NAME),
neuron_indices=numpy.array(
getattr(INPUT_ARG_OBJECT, NEURON_INDICES_ARG_NAME), dtype=int),
channel_index=getattr(INPUT_ARG_OBJECT, CHANNEL_INDEX_ARG_NAME),
ideal_activation=getattr(INPUT_ARG_OBJECT, IDEAL_ACTIVATION_ARG_NAME),
num_iterations=getattr(INPUT_ARG_OBJECT, NUM_ITERATIONS_ARG_NAME),
learning_rate=getattr(INPUT_ARG_OBJECT, LEARNING_RATE_ARG_NAME),
output_file_name=getattr(INPUT_ARG_OBJECT, OUTPUT_FILE_ARG_NAME)
)
| mit |
shubhamgupta123/erpnext | erpnext/stock/doctype/item/item_dashboard.py | 4 | 1149 | from __future__ import unicode_literals
from frappe import _
def get_data():
return {
'heatmap': True,
'heatmap_message': _('This is based on stock movement. See {0} for details')\
.format('<a href="#query-report/Stock Ledger">' + _('Stock Ledger') + '</a>'),
'fieldname': 'item_code',
'non_standard_fieldnames': {
'Work Order': 'production_item',
'Product Bundle': 'new_item_code',
'BOM': 'item',
'Batch': 'item'
},
'transactions': [
{
'label': _('Groups'),
'items': ['BOM', 'Product Bundle', 'Item Alternative']
},
{
'label': _('Pricing'),
'items': ['Item Price', 'Pricing Rule']
},
{
'label': _('Sell'),
'items': ['Quotation', 'Sales Order', 'Delivery Note', 'Sales Invoice']
},
{
'label': _('Buy'),
'items': ['Material Request', 'Supplier Quotation', 'Request for Quotation',
'Purchase Order', 'Purchase Receipt', 'Purchase Invoice']
},
{
'label': _('Traceability'),
'items': ['Serial No', 'Batch']
},
{
'label': _('Move'),
'items': ['Stock Entry']
},
{
'label': _('Manufacture'),
'items': ['Work Order']
}
]
} | gpl-3.0 |
stefanv/scipy3 | scipy/sparse/sparsetools/coo.py | 2 | 8185 | # This file was automatically generated by SWIG (http://www.swig.org).
# Version 1.3.36
#
# Don't modify this file, modify the SWIG interface instead.
# This file is compatible with both classic and new-style classes.
import _coo
import new
new_instancemethod = new.instancemethod
try:
_swig_property = property
except NameError:
pass # Python < 2.2 doesn't have 'property'.
def _swig_setattr_nondynamic(self,class_type,name,value,static=1):
if (name == "thisown"): return self.this.own(value)
if (name == "this"):
if type(value).__name__ == 'PySwigObject':
self.__dict__[name] = value
return
method = class_type.__swig_setmethods__.get(name,None)
if method: return method(self,value)
if (not static) or hasattr(self,name):
self.__dict__[name] = value
else:
raise AttributeError("You cannot add attributes to %s" % self)
def _swig_setattr(self,class_type,name,value):
return _swig_setattr_nondynamic(self,class_type,name,value,0)
def _swig_getattr(self,class_type,name):
if (name == "thisown"): return self.this.own()
method = class_type.__swig_getmethods__.get(name,None)
if method: return method(self)
raise AttributeError,name
def _swig_repr(self):
try: strthis = "proxy of " + self.this.__repr__()
except: strthis = ""
return "<%s.%s; %s >" % (self.__class__.__module__, self.__class__.__name__, strthis,)
import types
try:
_object = types.ObjectType
_newclass = 1
except AttributeError:
class _object : pass
_newclass = 0
del types
def coo_count_diagonals(*args):
"""coo_count_diagonals(int nnz, int Ai, int Aj) -> int"""
return _coo.coo_count_diagonals(*args)
def coo_tocsr(*args):
"""
coo_tocsr(int n_row, int n_col, int nnz, int Ai, int Aj, signed char Ax,
int Bp, int Bj, signed char Bx)
coo_tocsr(int n_row, int n_col, int nnz, int Ai, int Aj, unsigned char Ax,
int Bp, int Bj, unsigned char Bx)
coo_tocsr(int n_row, int n_col, int nnz, int Ai, int Aj, short Ax,
int Bp, int Bj, short Bx)
coo_tocsr(int n_row, int n_col, int nnz, int Ai, int Aj, unsigned short Ax,
int Bp, int Bj, unsigned short Bx)
coo_tocsr(int n_row, int n_col, int nnz, int Ai, int Aj, int Ax,
int Bp, int Bj, int Bx)
coo_tocsr(int n_row, int n_col, int nnz, int Ai, int Aj, unsigned int Ax,
int Bp, int Bj, unsigned int Bx)
coo_tocsr(int n_row, int n_col, int nnz, int Ai, int Aj, long long Ax,
int Bp, int Bj, long long Bx)
coo_tocsr(int n_row, int n_col, int nnz, int Ai, int Aj, unsigned long long Ax,
int Bp, int Bj, unsigned long long Bx)
coo_tocsr(int n_row, int n_col, int nnz, int Ai, int Aj, float Ax,
int Bp, int Bj, float Bx)
coo_tocsr(int n_row, int n_col, int nnz, int Ai, int Aj, double Ax,
int Bp, int Bj, double Bx)
coo_tocsr(int n_row, int n_col, int nnz, int Ai, int Aj, long double Ax,
int Bp, int Bj, long double Bx)
coo_tocsr(int n_row, int n_col, int nnz, int Ai, int Aj, npy_cfloat_wrapper Ax,
int Bp, int Bj, npy_cfloat_wrapper Bx)
coo_tocsr(int n_row, int n_col, int nnz, int Ai, int Aj, npy_cdouble_wrapper Ax,
int Bp, int Bj, npy_cdouble_wrapper Bx)
coo_tocsr(int n_row, int n_col, int nnz, int Ai, int Aj, npy_clongdouble_wrapper Ax,
int Bp, int Bj, npy_clongdouble_wrapper Bx)
"""
return _coo.coo_tocsr(*args)
def coo_tocsc(*args):
"""
coo_tocsc(int n_row, int n_col, int nnz, int Ai, int Aj, signed char Ax,
int Bp, int Bi, signed char Bx)
coo_tocsc(int n_row, int n_col, int nnz, int Ai, int Aj, unsigned char Ax,
int Bp, int Bi, unsigned char Bx)
coo_tocsc(int n_row, int n_col, int nnz, int Ai, int Aj, short Ax,
int Bp, int Bi, short Bx)
coo_tocsc(int n_row, int n_col, int nnz, int Ai, int Aj, unsigned short Ax,
int Bp, int Bi, unsigned short Bx)
coo_tocsc(int n_row, int n_col, int nnz, int Ai, int Aj, int Ax,
int Bp, int Bi, int Bx)
coo_tocsc(int n_row, int n_col, int nnz, int Ai, int Aj, unsigned int Ax,
int Bp, int Bi, unsigned int Bx)
coo_tocsc(int n_row, int n_col, int nnz, int Ai, int Aj, long long Ax,
int Bp, int Bi, long long Bx)
coo_tocsc(int n_row, int n_col, int nnz, int Ai, int Aj, unsigned long long Ax,
int Bp, int Bi, unsigned long long Bx)
coo_tocsc(int n_row, int n_col, int nnz, int Ai, int Aj, float Ax,
int Bp, int Bi, float Bx)
coo_tocsc(int n_row, int n_col, int nnz, int Ai, int Aj, double Ax,
int Bp, int Bi, double Bx)
coo_tocsc(int n_row, int n_col, int nnz, int Ai, int Aj, long double Ax,
int Bp, int Bi, long double Bx)
coo_tocsc(int n_row, int n_col, int nnz, int Ai, int Aj, npy_cfloat_wrapper Ax,
int Bp, int Bi, npy_cfloat_wrapper Bx)
coo_tocsc(int n_row, int n_col, int nnz, int Ai, int Aj, npy_cdouble_wrapper Ax,
int Bp, int Bi, npy_cdouble_wrapper Bx)
coo_tocsc(int n_row, int n_col, int nnz, int Ai, int Aj, npy_clongdouble_wrapper Ax,
int Bp, int Bi, npy_clongdouble_wrapper Bx)
"""
return _coo.coo_tocsc(*args)
def coo_todense(*args):
"""
coo_todense(int n_row, int n_col, int nnz, int Ai, int Aj, signed char Ax,
signed char Bx)
coo_todense(int n_row, int n_col, int nnz, int Ai, int Aj, unsigned char Ax,
unsigned char Bx)
coo_todense(int n_row, int n_col, int nnz, int Ai, int Aj, short Ax,
short Bx)
coo_todense(int n_row, int n_col, int nnz, int Ai, int Aj, unsigned short Ax,
unsigned short Bx)
coo_todense(int n_row, int n_col, int nnz, int Ai, int Aj, int Ax,
int Bx)
coo_todense(int n_row, int n_col, int nnz, int Ai, int Aj, unsigned int Ax,
unsigned int Bx)
coo_todense(int n_row, int n_col, int nnz, int Ai, int Aj, long long Ax,
long long Bx)
coo_todense(int n_row, int n_col, int nnz, int Ai, int Aj, unsigned long long Ax,
unsigned long long Bx)
coo_todense(int n_row, int n_col, int nnz, int Ai, int Aj, float Ax,
float Bx)
coo_todense(int n_row, int n_col, int nnz, int Ai, int Aj, double Ax,
double Bx)
coo_todense(int n_row, int n_col, int nnz, int Ai, int Aj, long double Ax,
long double Bx)
coo_todense(int n_row, int n_col, int nnz, int Ai, int Aj, npy_cfloat_wrapper Ax,
npy_cfloat_wrapper Bx)
coo_todense(int n_row, int n_col, int nnz, int Ai, int Aj, npy_cdouble_wrapper Ax,
npy_cdouble_wrapper Bx)
coo_todense(int n_row, int n_col, int nnz, int Ai, int Aj, npy_clongdouble_wrapper Ax,
npy_clongdouble_wrapper Bx)
"""
return _coo.coo_todense(*args)
def coo_matvec(*args):
"""
coo_matvec(int nnz, int Ai, int Aj, signed char Ax, signed char Xx,
signed char Yx)
coo_matvec(int nnz, int Ai, int Aj, unsigned char Ax, unsigned char Xx,
unsigned char Yx)
coo_matvec(int nnz, int Ai, int Aj, short Ax, short Xx, short Yx)
coo_matvec(int nnz, int Ai, int Aj, unsigned short Ax, unsigned short Xx,
unsigned short Yx)
coo_matvec(int nnz, int Ai, int Aj, int Ax, int Xx, int Yx)
coo_matvec(int nnz, int Ai, int Aj, unsigned int Ax, unsigned int Xx,
unsigned int Yx)
coo_matvec(int nnz, int Ai, int Aj, long long Ax, long long Xx,
long long Yx)
coo_matvec(int nnz, int Ai, int Aj, unsigned long long Ax, unsigned long long Xx,
unsigned long long Yx)
coo_matvec(int nnz, int Ai, int Aj, float Ax, float Xx, float Yx)
coo_matvec(int nnz, int Ai, int Aj, double Ax, double Xx, double Yx)
coo_matvec(int nnz, int Ai, int Aj, long double Ax, long double Xx,
long double Yx)
coo_matvec(int nnz, int Ai, int Aj, npy_cfloat_wrapper Ax, npy_cfloat_wrapper Xx,
npy_cfloat_wrapper Yx)
coo_matvec(int nnz, int Ai, int Aj, npy_cdouble_wrapper Ax, npy_cdouble_wrapper Xx,
npy_cdouble_wrapper Yx)
coo_matvec(int nnz, int Ai, int Aj, npy_clongdouble_wrapper Ax,
npy_clongdouble_wrapper Xx, npy_clongdouble_wrapper Yx)
"""
return _coo.coo_matvec(*args)
| bsd-3-clause |
akosyakov/intellij-community | python/helpers/pydev/pydev_ipython/matplotlibtools.py | 52 | 5401 |
import sys
backends = {'tk': 'TkAgg',
'gtk': 'GTKAgg',
'wx': 'WXAgg',
'qt': 'Qt4Agg', # qt3 not supported
'qt4': 'Qt4Agg',
'osx': 'MacOSX'}
# We also need a reverse backends2guis mapping that will properly choose which
# GUI support to activate based on the desired matplotlib backend. For the
# most part it's just a reverse of the above dict, but we also need to add a
# few others that map to the same GUI manually:
backend2gui = dict(zip(backends.values(), backends.keys()))
backend2gui['Qt4Agg'] = 'qt'
# In the reverse mapping, there are a few extra valid matplotlib backends that
# map to the same GUI support
backend2gui['GTK'] = backend2gui['GTKCairo'] = 'gtk'
backend2gui['WX'] = 'wx'
backend2gui['CocoaAgg'] = 'osx'
def do_enable_gui(guiname):
from pydev_versioncheck import versionok_for_gui
if versionok_for_gui():
try:
from pydev_ipython.inputhook import enable_gui
enable_gui(guiname)
except:
sys.stderr.write("Failed to enable GUI event loop integration for '%s'\n" % guiname)
import traceback
traceback.print_exc()
elif guiname not in ['none', '', None]:
# Only print a warning if the guiname was going to do something
sys.stderr.write("Debug console: Python version does not support GUI event loop integration for '%s'\n" % guiname)
# Return value does not matter, so return back what was sent
return guiname
def find_gui_and_backend():
"""Return the gui and mpl backend."""
matplotlib = sys.modules['matplotlib']
# WARNING: this assumes matplotlib 1.1 or newer!!
backend = matplotlib.rcParams['backend']
# In this case, we need to find what the appropriate gui selection call
# should be for IPython, so we can activate inputhook accordingly
gui = backend2gui.get(backend, None)
return gui, backend
def is_interactive_backend(backend):
""" Check if backend is interactive """
matplotlib = sys.modules['matplotlib']
from matplotlib.rcsetup import interactive_bk, non_interactive_bk
if backend in interactive_bk:
return True
elif backend in non_interactive_bk:
return False
else:
return matplotlib.is_interactive()
def patch_use(enable_gui_function):
""" Patch matplotlib function 'use' """
matplotlib = sys.modules['matplotlib']
def patched_use(*args, **kwargs):
matplotlib.real_use(*args, **kwargs)
gui, backend = find_gui_and_backend()
enable_gui_function(gui)
setattr(matplotlib, "real_use", getattr(matplotlib, "use"))
setattr(matplotlib, "use", patched_use)
def patch_is_interactive():
""" Patch matplotlib function 'use' """
matplotlib = sys.modules['matplotlib']
def patched_is_interactive():
return matplotlib.rcParams['interactive']
setattr(matplotlib, "real_is_interactive", getattr(matplotlib, "is_interactive"))
setattr(matplotlib, "is_interactive", patched_is_interactive)
def activate_matplotlib(enable_gui_function):
"""Set interactive to True for interactive backends.
enable_gui_function - Function which enables gui, should be run in the main thread.
"""
matplotlib = sys.modules['matplotlib']
gui, backend = find_gui_and_backend()
is_interactive = is_interactive_backend(backend)
if is_interactive:
enable_gui_function(gui)
if not matplotlib.is_interactive():
sys.stdout.write("Backend %s is interactive backend. Turning interactive mode on.\n" % backend)
matplotlib.interactive(True)
else:
if matplotlib.is_interactive():
sys.stdout.write("Backend %s is non-interactive backend. Turning interactive mode off.\n" % backend)
matplotlib.interactive(False)
patch_use(enable_gui_function)
patch_is_interactive()
def flag_calls(func):
"""Wrap a function to detect and flag when it gets called.
This is a decorator which takes a function and wraps it in a function with
a 'called' attribute. wrapper.called is initialized to False.
The wrapper.called attribute is set to False right before each call to the
wrapped function, so if the call fails it remains False. After the call
completes, wrapper.called is set to True and the output is returned.
Testing for truth in wrapper.called allows you to determine if a call to
func() was attempted and succeeded."""
# don't wrap twice
if hasattr(func, 'called'):
return func
def wrapper(*args,**kw):
wrapper.called = False
out = func(*args,**kw)
wrapper.called = True
return out
wrapper.called = False
wrapper.__doc__ = func.__doc__
return wrapper
def activate_pylab():
pylab = sys.modules['pylab']
pylab.show._needmain = False
# We need to detect at runtime whether show() is called by the user.
# For this, we wrap it into a decorator which adds a 'called' flag.
pylab.draw_if_interactive = flag_calls(pylab.draw_if_interactive)
def activate_pyplot():
pyplot = sys.modules['matplotlib.pyplot']
pyplot.show._needmain = False
# We need to detect at runtime whether show() is called by the user.
# For this, we wrap it into a decorator which adds a 'called' flag.
pyplot.draw_if_interactive = flag_calls(pyplot.draw_if_interactive)
| apache-2.0 |
jhaux/tensorflow | tensorflow/python/util/protobuf/compare_test.py | 165 | 19171 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for python.util.protobuf.compare."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import copy
import re
import textwrap
import six
from google.protobuf import text_format
from tensorflow.python.platform import googletest
from tensorflow.python.util.protobuf import compare
from tensorflow.python.util.protobuf import compare_test_pb2
def LargePbs(*args):
"""Converts ASCII string Large PBs to messages."""
pbs = []
for arg in args:
pb = compare_test_pb2.Large()
text_format.Merge(arg, pb)
pbs.append(pb)
return pbs
class ProtoEqTest(googletest.TestCase):
def assertNotEquals(self, a, b):
"""Asserts that ProtoEq says a != b."""
a, b = LargePbs(a, b)
googletest.TestCase.assertEquals(self, compare.ProtoEq(a, b), False)
def assertEquals(self, a, b):
"""Asserts that ProtoEq says a == b."""
a, b = LargePbs(a, b)
googletest.TestCase.assertEquals(self, compare.ProtoEq(a, b), True)
def testPrimitives(self):
googletest.TestCase.assertEqual(self, True, compare.ProtoEq('a', 'a'))
googletest.TestCase.assertEqual(self, False, compare.ProtoEq('b', 'a'))
def testEmpty(self):
self.assertEquals('', '')
def testPrimitiveFields(self):
self.assertNotEquals('string_: "a"', '')
self.assertEquals('string_: "a"', 'string_: "a"')
self.assertNotEquals('string_: "b"', 'string_: "a"')
self.assertNotEquals('string_: "ab"', 'string_: "aa"')
self.assertNotEquals('int64_: 0', '')
self.assertEquals('int64_: 0', 'int64_: 0')
self.assertNotEquals('int64_: -1', '')
self.assertNotEquals('int64_: 1', 'int64_: 0')
self.assertNotEquals('int64_: 0', 'int64_: -1')
self.assertNotEquals('float_: 0.0', '')
self.assertEquals('float_: 0.0', 'float_: 0.0')
self.assertNotEquals('float_: -0.1', '')
self.assertNotEquals('float_: 3.14', 'float_: 0')
self.assertNotEquals('float_: 0', 'float_: -0.1')
self.assertEquals('float_: -0.1', 'float_: -0.1')
self.assertNotEquals('bool_: true', '')
self.assertNotEquals('bool_: false', '')
self.assertNotEquals('bool_: true', 'bool_: false')
self.assertEquals('bool_: false', 'bool_: false')
self.assertEquals('bool_: true', 'bool_: true')
self.assertNotEquals('enum_: A', '')
self.assertNotEquals('enum_: B', 'enum_: A')
self.assertNotEquals('enum_: C', 'enum_: B')
self.assertEquals('enum_: C', 'enum_: C')
def testRepeatedPrimitives(self):
self.assertNotEquals('int64s: 0', '')
self.assertEquals('int64s: 0', 'int64s: 0')
self.assertNotEquals('int64s: 1', 'int64s: 0')
self.assertNotEquals('int64s: 0 int64s: 0', '')
self.assertNotEquals('int64s: 0 int64s: 0', 'int64s: 0')
self.assertNotEquals('int64s: 1 int64s: 0', 'int64s: 0')
self.assertNotEquals('int64s: 0 int64s: 1', 'int64s: 0')
self.assertNotEquals('int64s: 1', 'int64s: 0 int64s: 2')
self.assertNotEquals('int64s: 2 int64s: 0', 'int64s: 1')
self.assertEquals('int64s: 0 int64s: 0', 'int64s: 0 int64s: 0')
self.assertEquals('int64s: 0 int64s: 1', 'int64s: 0 int64s: 1')
self.assertNotEquals('int64s: 1 int64s: 0', 'int64s: 0 int64s: 0')
self.assertNotEquals('int64s: 1 int64s: 0', 'int64s: 0 int64s: 1')
self.assertNotEquals('int64s: 1 int64s: 0', 'int64s: 0 int64s: 2')
self.assertNotEquals('int64s: 1 int64s: 1', 'int64s: 1 int64s: 0')
self.assertNotEquals('int64s: 1 int64s: 1', 'int64s: 1 int64s: 0 int64s: 2')
def testMessage(self):
self.assertNotEquals('small <>', '')
self.assertEquals('small <>', 'small <>')
self.assertNotEquals('small < strings: "a" >', '')
self.assertNotEquals('small < strings: "a" >', 'small <>')
self.assertEquals('small < strings: "a" >', 'small < strings: "a" >')
self.assertNotEquals('small < strings: "b" >', 'small < strings: "a" >')
self.assertNotEquals('small < strings: "a" strings: "b" >',
'small < strings: "a" >')
self.assertNotEquals('string_: "a"', 'small <>')
self.assertNotEquals('string_: "a"', 'small < strings: "b" >')
self.assertNotEquals('string_: "a"', 'small < strings: "b" strings: "c" >')
self.assertNotEquals('string_: "a" small <>', 'small <>')
self.assertNotEquals('string_: "a" small <>', 'small < strings: "b" >')
self.assertEquals('string_: "a" small <>', 'string_: "a" small <>')
self.assertNotEquals('string_: "a" small < strings: "a" >',
'string_: "a" small <>')
self.assertEquals('string_: "a" small < strings: "a" >',
'string_: "a" small < strings: "a" >')
self.assertNotEquals('string_: "a" small < strings: "a" >',
'int64_: 1 small < strings: "a" >')
self.assertNotEquals('string_: "a" small < strings: "a" >', 'int64_: 1')
self.assertNotEquals('string_: "a"', 'int64_: 1 small < strings: "a" >')
self.assertNotEquals('string_: "a" int64_: 0 small < strings: "a" >',
'int64_: 1 small < strings: "a" >')
self.assertNotEquals('string_: "a" int64_: 1 small < strings: "a" >',
'string_: "a" int64_: 0 small < strings: "a" >')
self.assertEquals('string_: "a" int64_: 0 small < strings: "a" >',
'string_: "a" int64_: 0 small < strings: "a" >')
def testNestedMessage(self):
self.assertNotEquals('medium <>', '')
self.assertEquals('medium <>', 'medium <>')
self.assertNotEquals('medium < smalls <> >', 'medium <>')
self.assertEquals('medium < smalls <> >', 'medium < smalls <> >')
self.assertNotEquals('medium < smalls <> smalls <> >',
'medium < smalls <> >')
self.assertEquals('medium < smalls <> smalls <> >',
'medium < smalls <> smalls <> >')
self.assertNotEquals('medium < int32s: 0 >', 'medium < smalls <> >')
self.assertNotEquals('medium < smalls < strings: "a"> >',
'medium < smalls <> >')
def testTagOrder(self):
"""Tests that different fields are ordered by tag number.
For reference, here are the relevant tag numbers from compare_test.proto:
optional string string_ = 1;
optional int64 int64_ = 2;
optional float float_ = 3;
optional Small small = 8;
optional Medium medium = 7;
optional Small small = 8;
"""
self.assertNotEquals('string_: "a" ',
' int64_: 1 ')
self.assertNotEquals('string_: "a" int64_: 2 ',
' int64_: 1 ')
self.assertNotEquals('string_: "b" int64_: 1 ',
'string_: "a" int64_: 2 ')
self.assertEquals('string_: "a" int64_: 1 ',
'string_: "a" int64_: 1 ')
self.assertNotEquals('string_: "a" int64_: 1 float_: 0.0',
'string_: "a" int64_: 1 ')
self.assertEquals('string_: "a" int64_: 1 float_: 0.0',
'string_: "a" int64_: 1 float_: 0.0')
self.assertNotEquals('string_: "a" int64_: 1 float_: 0.1',
'string_: "a" int64_: 1 float_: 0.0')
self.assertNotEquals('string_: "a" int64_: 2 float_: 0.0',
'string_: "a" int64_: 1 float_: 0.1')
self.assertNotEquals('string_: "a" ',
' int64_: 1 float_: 0.1')
self.assertNotEquals('string_: "a" float_: 0.0',
' int64_: 1 ')
self.assertNotEquals('string_: "b" float_: 0.0',
'string_: "a" int64_: 1 ')
self.assertNotEquals('string_: "a"', 'small < strings: "a" >')
self.assertNotEquals('string_: "a" small < strings: "a" >',
'small < strings: "b" >')
self.assertNotEquals('string_: "a" small < strings: "b" >',
'string_: "a" small < strings: "a" >')
self.assertEquals('string_: "a" small < strings: "a" >',
'string_: "a" small < strings: "a" >')
self.assertNotEquals('string_: "a" medium <>',
'string_: "a" small < strings: "a" >')
self.assertNotEquals('string_: "a" medium < smalls <> >',
'string_: "a" small < strings: "a" >')
self.assertNotEquals('medium <>', 'small < strings: "a" >')
self.assertNotEquals('medium <> small <>', 'small < strings: "a" >')
self.assertNotEquals('medium < smalls <> >', 'small < strings: "a" >')
self.assertNotEquals('medium < smalls < strings: "a" > >',
'small < strings: "b" >')
class NormalizeNumbersTest(googletest.TestCase):
"""Tests for NormalizeNumberFields()."""
def testNormalizesInts(self):
pb = compare_test_pb2.Large()
pb.int64_ = 4
compare.NormalizeNumberFields(pb)
self.assertTrue(isinstance(pb.int64_, six.integer_types))
pb.int64_ = 4
compare.NormalizeNumberFields(pb)
self.assertTrue(isinstance(pb.int64_, six.integer_types))
pb.int64_ = 9999999999999999
compare.NormalizeNumberFields(pb)
self.assertTrue(isinstance(pb.int64_, six.integer_types))
def testNormalizesRepeatedInts(self):
pb = compare_test_pb2.Large()
pb.int64s.extend([1, 400, 999999999999999])
compare.NormalizeNumberFields(pb)
self.assertTrue(isinstance(pb.int64s[0], six.integer_types))
self.assertTrue(isinstance(pb.int64s[1], six.integer_types))
self.assertTrue(isinstance(pb.int64s[2], six.integer_types))
def testNormalizesFloats(self):
pb1 = compare_test_pb2.Large()
pb1.float_ = 1.2314352351231
pb2 = compare_test_pb2.Large()
pb2.float_ = 1.231435
self.assertNotEqual(pb1.float_, pb2.float_)
compare.NormalizeNumberFields(pb1)
compare.NormalizeNumberFields(pb2)
self.assertEqual(pb1.float_, pb2.float_)
def testNormalizesRepeatedFloats(self):
pb = compare_test_pb2.Large()
pb.medium.floats.extend([0.111111111, 0.111111])
compare.NormalizeNumberFields(pb)
for value in pb.medium.floats:
self.assertAlmostEqual(0.111111, value)
def testNormalizesDoubles(self):
pb1 = compare_test_pb2.Large()
pb1.double_ = 1.2314352351231
pb2 = compare_test_pb2.Large()
pb2.double_ = 1.2314352
self.assertNotEqual(pb1.double_, pb2.double_)
compare.NormalizeNumberFields(pb1)
compare.NormalizeNumberFields(pb2)
self.assertEqual(pb1.double_, pb2.double_)
def testNormalizesMaps(self):
pb = compare_test_pb2.WithMap()
pb.value_message[4].strings.extend(['a', 'b', 'c'])
pb.value_string['d'] = 'e'
compare.NormalizeNumberFields(pb)
class AssertTest(googletest.TestCase):
"""Tests assertProtoEqual()."""
def assertProtoEqual(self, a, b, **kwargs):
if isinstance(a, six.string_types) and isinstance(b, six.string_types):
a, b = LargePbs(a, b)
compare.assertProtoEqual(self, a, b, **kwargs)
def assertAll(self, a, **kwargs):
"""Checks that all possible asserts pass."""
self.assertProtoEqual(a, a, **kwargs)
def assertSameNotEqual(self, a, b):
"""Checks that assertProtoEqual() fails."""
self.assertRaises(AssertionError, self.assertProtoEqual, a, b)
def assertNone(self, a, b, message, **kwargs):
"""Checks that all possible asserts fail with the given message."""
message = re.escape(textwrap.dedent(message))
self.assertRaisesRegexp(AssertionError, message, self.assertProtoEqual, a,
b, **kwargs)
def testCheckInitialized(self):
# neither is initialized
a = compare_test_pb2.Labeled()
a.optional = 1
self.assertNone(a, a, 'Initialization errors: ', check_initialized=True)
self.assertAll(a, check_initialized=False)
# a is initialized, b isn't
b = copy.deepcopy(a)
a.required = 2
self.assertNone(a, b, 'Initialization errors: ', check_initialized=True)
self.assertNone(
a,
b,
"""
- required: 2
optional: 1
""",
check_initialized=False)
# both are initialized
a = compare_test_pb2.Labeled()
a.required = 2
self.assertAll(a, check_initialized=True)
self.assertAll(a, check_initialized=False)
b = copy.deepcopy(a)
b.required = 3
message = """
- required: 2
? ^
+ required: 3
? ^
"""
self.assertNone(a, b, message, check_initialized=True)
self.assertNone(a, b, message, check_initialized=False)
def testAssertEqualWithStringArg(self):
pb = compare_test_pb2.Large()
pb.string_ = 'abc'
pb.float_ = 1.234
compare.assertProtoEqual(self, """
string_: 'abc'
float_: 1.234
""", pb)
def testNormalizesNumbers(self):
pb1 = compare_test_pb2.Large()
pb1.int64_ = 4
pb2 = compare_test_pb2.Large()
pb2.int64_ = 4
compare.assertProtoEqual(self, pb1, pb2)
def testNormalizesFloat(self):
pb1 = compare_test_pb2.Large()
pb1.double_ = 4.0
pb2 = compare_test_pb2.Large()
pb2.double_ = 4
compare.assertProtoEqual(self, pb1, pb2, normalize_numbers=True)
def testPrimitives(self):
self.assertAll('string_: "x"')
self.assertNone('string_: "x"', 'string_: "y"', """
- string_: "x"
? ^
+ string_: "y"
? ^
""")
def testRepeatedPrimitives(self):
self.assertAll('int64s: 0 int64s: 1')
self.assertSameNotEqual('int64s: 0 int64s: 1', 'int64s: 1 int64s: 0')
self.assertSameNotEqual('int64s: 0 int64s: 1 int64s: 2',
'int64s: 2 int64s: 1 int64s: 0')
self.assertSameNotEqual('int64s: 0', 'int64s: 0 int64s: 0')
self.assertSameNotEqual('int64s: 0 int64s: 1',
'int64s: 1 int64s: 0 int64s: 1')
self.assertNone('int64s: 0', 'int64s: 0 int64s: 2', """
int64s: 0
+ int64s: 2
""")
self.assertNone('int64s: 0 int64s: 1', 'int64s: 0 int64s: 2', """
int64s: 0
- int64s: 1
? ^
+ int64s: 2
? ^
""")
def testMessage(self):
self.assertAll('medium: {}')
self.assertAll('medium: { smalls: {} }')
self.assertAll('medium: { int32s: 1 smalls: {} }')
self.assertAll('medium: { smalls: { strings: "x" } }')
self.assertAll(
'medium: { smalls: { strings: "x" } } small: { strings: "y" }')
self.assertSameNotEqual('medium: { smalls: { strings: "x" strings: "y" } }',
'medium: { smalls: { strings: "y" strings: "x" } }')
self.assertSameNotEqual(
'medium: { smalls: { strings: "x" } smalls: { strings: "y" } }',
'medium: { smalls: { strings: "y" } smalls: { strings: "x" } }')
self.assertSameNotEqual(
'medium: { smalls: { strings: "x" strings: "y" strings: "x" } }',
'medium: { smalls: { strings: "y" strings: "x" } }')
self.assertSameNotEqual(
'medium: { smalls: { strings: "x" } int32s: 0 }',
'medium: { int32s: 0 smalls: { strings: "x" } int32s: 0 }')
self.assertNone('medium: {}', 'medium: { smalls: { strings: "x" } }', """
medium {
+ smalls {
+ strings: "x"
+ }
}
""")
self.assertNone('medium: { smalls: { strings: "x" } }',
'medium: { smalls: {} }', """
medium {
smalls {
- strings: "x"
}
}
""")
self.assertNone('medium: { int32s: 0 }', 'medium: { int32s: 1 }', """
medium {
- int32s: 0
? ^
+ int32s: 1
? ^
}
""")
def testMsgPassdown(self):
self.assertRaisesRegexp(
AssertionError,
'test message passed down',
self.assertProtoEqual,
'medium: {}',
'medium: { smalls: { strings: "x" } }',
msg='test message passed down')
def testRepeatedMessage(self):
self.assertAll('medium: { smalls: {} smalls: {} }')
self.assertAll('medium: { smalls: { strings: "x" } } medium: {}')
self.assertAll('medium: { smalls: { strings: "x" } } medium: { int32s: 0 }')
self.assertAll('medium: { smalls: {} smalls: { strings: "x" } } small: {}')
self.assertSameNotEqual('medium: { smalls: { strings: "x" } smalls: {} }',
'medium: { smalls: {} smalls: { strings: "x" } }')
self.assertSameNotEqual('medium: { smalls: {} }',
'medium: { smalls: {} smalls: {} }')
self.assertSameNotEqual('medium: { smalls: {} smalls: {} } medium: {}',
'medium: {} medium: {} medium: { smalls: {} }')
self.assertSameNotEqual(
'medium: { smalls: { strings: "x" } smalls: {} }',
'medium: { smalls: {} smalls: { strings: "x" } smalls: {} }')
self.assertNone('medium: {}', 'medium: {} medium { smalls: {} }', """
medium {
+ smalls {
+ }
}
""")
self.assertNone('medium: { smalls: {} smalls: { strings: "x" } }',
'medium: { smalls: {} smalls: { strings: "y" } }', """
medium {
smalls {
}
smalls {
- strings: "x"
? ^
+ strings: "y"
? ^
}
}
""")
class MixinTests(compare.ProtoAssertions, googletest.TestCase):
def testAssertEqualWithStringArg(self):
pb = compare_test_pb2.Large()
pb.string_ = 'abc'
pb.float_ = 1.234
self.assertProtoEqual("""
string_: 'abc'
float_: 1.234
""", pb)
if __name__ == '__main__':
googletest.main()
| apache-2.0 |
tragiclifestories/django | django/forms/formsets.py | 362 | 17988 | from __future__ import unicode_literals
from django.core.exceptions import ValidationError
from django.forms import Form
from django.forms.fields import BooleanField, IntegerField
from django.forms.utils import ErrorList
from django.forms.widgets import HiddenInput
from django.utils import six
from django.utils.encoding import python_2_unicode_compatible
from django.utils.functional import cached_property
from django.utils.html import html_safe
from django.utils.safestring import mark_safe
from django.utils.six.moves import range
from django.utils.translation import ugettext as _, ungettext
__all__ = ('BaseFormSet', 'formset_factory', 'all_valid')
# special field names
TOTAL_FORM_COUNT = 'TOTAL_FORMS'
INITIAL_FORM_COUNT = 'INITIAL_FORMS'
MIN_NUM_FORM_COUNT = 'MIN_NUM_FORMS'
MAX_NUM_FORM_COUNT = 'MAX_NUM_FORMS'
ORDERING_FIELD_NAME = 'ORDER'
DELETION_FIELD_NAME = 'DELETE'
# default minimum number of forms in a formset
DEFAULT_MIN_NUM = 0
# default maximum number of forms in a formset, to prevent memory exhaustion
DEFAULT_MAX_NUM = 1000
class ManagementForm(Form):
"""
``ManagementForm`` is used to keep track of how many form instances
are displayed on the page. If adding new forms via javascript, you should
increment the count field of this form as well.
"""
def __init__(self, *args, **kwargs):
self.base_fields[TOTAL_FORM_COUNT] = IntegerField(widget=HiddenInput)
self.base_fields[INITIAL_FORM_COUNT] = IntegerField(widget=HiddenInput)
# MIN_NUM_FORM_COUNT and MAX_NUM_FORM_COUNT are output with the rest of
# the management form, but only for the convenience of client-side
# code. The POST value of them returned from the client is not checked.
self.base_fields[MIN_NUM_FORM_COUNT] = IntegerField(required=False, widget=HiddenInput)
self.base_fields[MAX_NUM_FORM_COUNT] = IntegerField(required=False, widget=HiddenInput)
super(ManagementForm, self).__init__(*args, **kwargs)
@html_safe
@python_2_unicode_compatible
class BaseFormSet(object):
"""
A collection of instances of the same Form class.
"""
def __init__(self, data=None, files=None, auto_id='id_%s', prefix=None,
initial=None, error_class=ErrorList, form_kwargs=None):
self.is_bound = data is not None or files is not None
self.prefix = prefix or self.get_default_prefix()
self.auto_id = auto_id
self.data = data or {}
self.files = files or {}
self.initial = initial
self.form_kwargs = form_kwargs or {}
self.error_class = error_class
self._errors = None
self._non_form_errors = None
def __str__(self):
return self.as_table()
def __iter__(self):
"""Yields the forms in the order they should be rendered"""
return iter(self.forms)
def __getitem__(self, index):
"""Returns the form at the given index, based on the rendering order"""
return self.forms[index]
def __len__(self):
return len(self.forms)
def __bool__(self):
"""All formsets have a management form which is not included in the length"""
return True
def __nonzero__(self): # Python 2 compatibility
return type(self).__bool__(self)
@property
def management_form(self):
"""Returns the ManagementForm instance for this FormSet."""
if self.is_bound:
form = ManagementForm(self.data, auto_id=self.auto_id, prefix=self.prefix)
if not form.is_valid():
raise ValidationError(
_('ManagementForm data is missing or has been tampered with'),
code='missing_management_form',
)
else:
form = ManagementForm(auto_id=self.auto_id, prefix=self.prefix, initial={
TOTAL_FORM_COUNT: self.total_form_count(),
INITIAL_FORM_COUNT: self.initial_form_count(),
MIN_NUM_FORM_COUNT: self.min_num,
MAX_NUM_FORM_COUNT: self.max_num
})
return form
def total_form_count(self):
"""Returns the total number of forms in this FormSet."""
if self.is_bound:
# return absolute_max if it is lower than the actual total form
# count in the data; this is DoS protection to prevent clients
# from forcing the server to instantiate arbitrary numbers of
# forms
return min(self.management_form.cleaned_data[TOTAL_FORM_COUNT], self.absolute_max)
else:
initial_forms = self.initial_form_count()
total_forms = max(initial_forms, self.min_num) + self.extra
# Allow all existing related objects/inlines to be displayed,
# but don't allow extra beyond max_num.
if initial_forms > self.max_num >= 0:
total_forms = initial_forms
elif total_forms > self.max_num >= 0:
total_forms = self.max_num
return total_forms
def initial_form_count(self):
"""Returns the number of forms that are required in this FormSet."""
if self.is_bound:
return self.management_form.cleaned_data[INITIAL_FORM_COUNT]
else:
# Use the length of the initial data if it's there, 0 otherwise.
initial_forms = len(self.initial) if self.initial else 0
return initial_forms
@cached_property
def forms(self):
"""
Instantiate forms at first property access.
"""
# DoS protection is included in total_form_count()
forms = [self._construct_form(i, **self.get_form_kwargs(i))
for i in range(self.total_form_count())]
return forms
def get_form_kwargs(self, index):
"""
Return additional keyword arguments for each individual formset form.
index will be None if the form being constructed is a new empty
form.
"""
return self.form_kwargs.copy()
def _construct_form(self, i, **kwargs):
"""
Instantiates and returns the i-th form instance in a formset.
"""
defaults = {
'auto_id': self.auto_id,
'prefix': self.add_prefix(i),
'error_class': self.error_class,
}
if self.is_bound:
defaults['data'] = self.data
defaults['files'] = self.files
if self.initial and 'initial' not in kwargs:
try:
defaults['initial'] = self.initial[i]
except IndexError:
pass
# Allow extra forms to be empty, unless they're part of
# the minimum forms.
if i >= self.initial_form_count() and i >= self.min_num:
defaults['empty_permitted'] = True
defaults.update(kwargs)
form = self.form(**defaults)
self.add_fields(form, i)
return form
@property
def initial_forms(self):
"""Return a list of all the initial forms in this formset."""
return self.forms[:self.initial_form_count()]
@property
def extra_forms(self):
"""Return a list of all the extra forms in this formset."""
return self.forms[self.initial_form_count():]
@property
def empty_form(self):
form = self.form(
auto_id=self.auto_id,
prefix=self.add_prefix('__prefix__'),
empty_permitted=True,
**self.get_form_kwargs(None)
)
self.add_fields(form, None)
return form
@property
def cleaned_data(self):
"""
Returns a list of form.cleaned_data dicts for every form in self.forms.
"""
if not self.is_valid():
raise AttributeError("'%s' object has no attribute 'cleaned_data'" % self.__class__.__name__)
return [form.cleaned_data for form in self.forms]
@property
def deleted_forms(self):
"""
Returns a list of forms that have been marked for deletion.
"""
if not self.is_valid() or not self.can_delete:
return []
# construct _deleted_form_indexes which is just a list of form indexes
# that have had their deletion widget set to True
if not hasattr(self, '_deleted_form_indexes'):
self._deleted_form_indexes = []
for i in range(0, self.total_form_count()):
form = self.forms[i]
# if this is an extra form and hasn't changed, don't consider it
if i >= self.initial_form_count() and not form.has_changed():
continue
if self._should_delete_form(form):
self._deleted_form_indexes.append(i)
return [self.forms[i] for i in self._deleted_form_indexes]
@property
def ordered_forms(self):
"""
Returns a list of form in the order specified by the incoming data.
Raises an AttributeError if ordering is not allowed.
"""
if not self.is_valid() or not self.can_order:
raise AttributeError("'%s' object has no attribute 'ordered_forms'" % self.__class__.__name__)
# Construct _ordering, which is a list of (form_index, order_field_value)
# tuples. After constructing this list, we'll sort it by order_field_value
# so we have a way to get to the form indexes in the order specified
# by the form data.
if not hasattr(self, '_ordering'):
self._ordering = []
for i in range(0, self.total_form_count()):
form = self.forms[i]
# if this is an extra form and hasn't changed, don't consider it
if i >= self.initial_form_count() and not form.has_changed():
continue
# don't add data marked for deletion to self.ordered_data
if self.can_delete and self._should_delete_form(form):
continue
self._ordering.append((i, form.cleaned_data[ORDERING_FIELD_NAME]))
# After we're done populating self._ordering, sort it.
# A sort function to order things numerically ascending, but
# None should be sorted below anything else. Allowing None as
# a comparison value makes it so we can leave ordering fields
# blank.
def compare_ordering_key(k):
if k[1] is None:
return (1, 0) # +infinity, larger than any number
return (0, k[1])
self._ordering.sort(key=compare_ordering_key)
# Return a list of form.cleaned_data dicts in the order specified by
# the form data.
return [self.forms[i[0]] for i in self._ordering]
@classmethod
def get_default_prefix(cls):
return 'form'
def non_form_errors(self):
"""
Returns an ErrorList of errors that aren't associated with a particular
form -- i.e., from formset.clean(). Returns an empty ErrorList if there
are none.
"""
if self._non_form_errors is None:
self.full_clean()
return self._non_form_errors
@property
def errors(self):
"""
Returns a list of form.errors for every form in self.forms.
"""
if self._errors is None:
self.full_clean()
return self._errors
def total_error_count(self):
"""
Returns the number of errors across all forms in the formset.
"""
return len(self.non_form_errors()) +\
sum(len(form_errors) for form_errors in self.errors)
def _should_delete_form(self, form):
"""
Returns whether or not the form was marked for deletion.
"""
return form.cleaned_data.get(DELETION_FIELD_NAME, False)
def is_valid(self):
"""
Returns True if every form in self.forms is valid.
"""
if not self.is_bound:
return False
# We loop over every form.errors here rather than short circuiting on the
# first failure to make sure validation gets triggered for every form.
forms_valid = True
# This triggers a full clean.
self.errors
for i in range(0, self.total_form_count()):
form = self.forms[i]
if self.can_delete:
if self._should_delete_form(form):
# This form is going to be deleted so any of its errors
# should not cause the entire formset to be invalid.
continue
forms_valid &= form.is_valid()
return forms_valid and not self.non_form_errors()
def full_clean(self):
"""
Cleans all of self.data and populates self._errors and
self._non_form_errors.
"""
self._errors = []
self._non_form_errors = self.error_class()
if not self.is_bound: # Stop further processing.
return
for i in range(0, self.total_form_count()):
form = self.forms[i]
self._errors.append(form.errors)
try:
if (self.validate_max and
self.total_form_count() - len(self.deleted_forms) > self.max_num) or \
self.management_form.cleaned_data[TOTAL_FORM_COUNT] > self.absolute_max:
raise ValidationError(ungettext(
"Please submit %d or fewer forms.",
"Please submit %d or fewer forms.", self.max_num) % self.max_num,
code='too_many_forms',
)
if (self.validate_min and
self.total_form_count() - len(self.deleted_forms) < self.min_num):
raise ValidationError(ungettext(
"Please submit %d or more forms.",
"Please submit %d or more forms.", self.min_num) % self.min_num,
code='too_few_forms')
# Give self.clean() a chance to do cross-form validation.
self.clean()
except ValidationError as e:
self._non_form_errors = self.error_class(e.error_list)
def clean(self):
"""
Hook for doing any extra formset-wide cleaning after Form.clean() has
been called on every form. Any ValidationError raised by this method
will not be associated with a particular form; it will be accessible
via formset.non_form_errors()
"""
pass
def has_changed(self):
"""
Returns true if data in any form differs from initial.
"""
return any(form.has_changed() for form in self)
def add_fields(self, form, index):
"""A hook for adding extra fields on to each form instance."""
if self.can_order:
# Only pre-fill the ordering field for initial forms.
if index is not None and index < self.initial_form_count():
form.fields[ORDERING_FIELD_NAME] = IntegerField(label=_('Order'), initial=index + 1, required=False)
else:
form.fields[ORDERING_FIELD_NAME] = IntegerField(label=_('Order'), required=False)
if self.can_delete:
form.fields[DELETION_FIELD_NAME] = BooleanField(label=_('Delete'), required=False)
def add_prefix(self, index):
return '%s-%s' % (self.prefix, index)
def is_multipart(self):
"""
Returns True if the formset needs to be multipart, i.e. it
has FileInput. Otherwise, False.
"""
if self.forms:
return self.forms[0].is_multipart()
else:
return self.empty_form.is_multipart()
@property
def media(self):
# All the forms on a FormSet are the same, so you only need to
# interrogate the first form for media.
if self.forms:
return self.forms[0].media
else:
return self.empty_form.media
def as_table(self):
"Returns this formset rendered as HTML <tr>s -- excluding the <table></table>."
# XXX: there is no semantic division between forms here, there
# probably should be. It might make sense to render each form as a
# table row with each field as a td.
forms = ' '.join(form.as_table() for form in self)
return mark_safe('\n'.join([six.text_type(self.management_form), forms]))
def as_p(self):
"Returns this formset rendered as HTML <p>s."
forms = ' '.join(form.as_p() for form in self)
return mark_safe('\n'.join([six.text_type(self.management_form), forms]))
def as_ul(self):
"Returns this formset rendered as HTML <li>s."
forms = ' '.join(form.as_ul() for form in self)
return mark_safe('\n'.join([six.text_type(self.management_form), forms]))
def formset_factory(form, formset=BaseFormSet, extra=1, can_order=False,
can_delete=False, max_num=None, validate_max=False,
min_num=None, validate_min=False):
"""Return a FormSet for the given form class."""
if min_num is None:
min_num = DEFAULT_MIN_NUM
if max_num is None:
max_num = DEFAULT_MAX_NUM
# hard limit on forms instantiated, to prevent memory-exhaustion attacks
# limit is simply max_num + DEFAULT_MAX_NUM (which is 2*DEFAULT_MAX_NUM
# if max_num is None in the first place)
absolute_max = max_num + DEFAULT_MAX_NUM
attrs = {'form': form, 'extra': extra,
'can_order': can_order, 'can_delete': can_delete,
'min_num': min_num, 'max_num': max_num,
'absolute_max': absolute_max, 'validate_min': validate_min,
'validate_max': validate_max}
return type(form.__name__ + str('FormSet'), (formset,), attrs)
def all_valid(formsets):
"""Returns true if every formset in formsets is valid."""
valid = True
for formset in formsets:
if not formset.is_valid():
valid = False
return valid
| bsd-3-clause |
karpathy/simple-amt | launch_hits.py | 2 | 1876 | import argparse, json
from boto.mturk.price import Price
from boto.mturk.question import HTMLQuestion
from boto.mturk.connection import MTurkRequestError
import simpleamt
if __name__ == '__main__':
parser = argparse.ArgumentParser(parents=[simpleamt.get_parent_parser()])
parser.add_argument('--hit_properties_file', type=argparse.FileType('r'))
parser.add_argument('--html_template')
parser.add_argument('--input_json_file', type=argparse.FileType('r'))
args = parser.parse_args()
mtc = simpleamt.get_mturk_connection_from_args(args)
hit_properties = json.load(args.hit_properties_file)
hit_properties['reward'] = Price(hit_properties['reward'])
simpleamt.setup_qualifications(hit_properties)
frame_height = hit_properties.pop('frame_height')
env = simpleamt.get_jinja_env(args.config)
template = env.get_template(args.html_template)
hit_ids = []
for i, line in enumerate(args.input_json_file):
hit_input = json.loads(line.strip())
# In a previous version I removed all single quotes from the json dump.
# TODO: double check to see if this is still necessary.
template_params = { 'input': json.dumps(hit_input) }
html = template.render(template_params)
html_question = HTMLQuestion(html, frame_height)
hit_properties['question'] = html_question
# This error handling is kinda hacky.
# TODO: Do something better here.
launched = False
while not launched:
try:
print 'Trying to launch HIT %d' % (i + 1)
boto_hit = mtc.create_hit(**hit_properties)
launched = True
except MTurkRequestError as e:
print e
hit_id = boto_hit[0].HITId
hit_ids.append(hit_id)
# TODO: Should the hit ids file be mandatory?
if args.hit_ids_file is not None:
with open(args.hit_ids_file, 'w') as f:
for hit_id in hit_ids:
f.write('%s\n' % hit_id)
| mit |
caterzy/JackPlayer | TFTest/TFSaver_Meta.py | 1 | 1116 | # This Python file uses the following encoding: utf-8
__author__ = 'JackRao'
import tensorflow as tf
import os
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets("MNIST_data", one_hot=True)
x = tf.placeholder(tf.float32, [None, 784])
W = tf.Variable(tf.zeros([784, 10]))
b = tf.Variable(tf.zeros([10]))
y = tf.nn.softmax(tf.matmul(x, W) + b)
y_ = tf.placeholder(tf.float32, [None, 10])
cross_entropy = tf.reduce_mean(-tf.reduce_sum(y_ * tf.log(y), reduction_indices=[1]))
train_step = tf.train.GradientDescentOptimizer(0.5).minimize(cross_entropy)
init = tf.global_variables_initializer()
sess = tf.Session()
sess.run(init)
saver = tf.train.Saver()
tf.add_to_collection('x', x)
tf.add_to_collection('y', y)
for i in range(10000):
batch_xs, batch_ys = mnist.train.next_batch(100)
sess.run(train_step, feed_dict={x: batch_xs, y_: batch_ys})
print("Training OK")
model_dir = "MNIST_data1"
model_name = "test.ckpt"
if not os.path.exists(model_dir):
os.mkdir(model_dir)
saver.save(sess, os.path.join(model_dir, model_name))
print("Save OK")
| gpl-2.0 |
cohortfsllc/cohort-cocl2-sandbox | pnacl/unsupported/objdump-arm-canonicalizer.py | 12 | 1944 | #!/usr/bin/python
# Copyright (c) 2012 The Native Client Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""A simple tool for making objdump's disassemble dumps
for arm more canonical.
If two binaries have been generated with an almost identical code
generator, we expect the delta of the canoncalized dumps to be small
as well.
"""
import sys
import re
# keeps track of offset within a function
count = 0
PREDICATES = ["eq", "ne",
"cs", "cc", "hs", "lo", # cs == hs, cc == lo
"mi", "pl",
"vs", "vc",
"hi", "ls",
"ge", "lt",
"gt", "le",
"",
]
BRANCHES = set(["b" + p for p in PREDICATES])
CALLS = set(["bl" + p for p in PREDICATES])
for line in sys.stdin:
tokens = line.split()
if re.search(r">:$", line):
# we encountered a function beginning
print "@@@@@@@@@@@@@@@", tokens[1]
count = 0
elif re.search(r"^ +[0-9a-f]+:", line):
# we encountered an instruction, first strip the instruction address
line = line[8:]
opcode = tokens[2]
if opcode in BRANCHES:
# Rewrite:
# 20104: 3a00000a bcc 20134 <recurse+0x74>
# 44: 3a00000a bcc <recurse+0x74>
fr = r"(\s+" + opcode + r"\s+)[0-9a-f]+"
to = r"\1"
line = re.sub(fr, to, line)
elif opcode in CALLS:
# Rewrite:
# 2001c: eb00527f bl 34a20 <__register_frame_info>
# 1c: bl <__register_frame_info>
fr = r"[0-9a-f]+(\s+" + opcode + r"\s+)[0-9a-f]+"
to = r" \1"
line = re.sub(fr, to, line)
# replace the address which was stripped out above by an offset
print "%8x" % count, line,
count += 4
else:
# pass thru everything which is neither function beginning or instruction
print line,
| bsd-3-clause |
smurfix/HomEvenT | irrigation/rainman/models/__init__.py | 1 | 2120 | # -*- coding: utf-8 -*-
## Copyright © 2012, Matthias Urlichs <matthias@urlichs.de>
##
## This program is free software: you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation, either version 3 of the License, or
## (at your option) any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License (included; see the file LICENSE)
## for more details.
##
from __future__ import division,absolute_import
from django.db import models as m
from rainman.utils import now
class Model(m.Model):
class Meta:
abstract = True
app_label = 'rainman'
def __repr__(self):
return u'‹%s %s›' % (self.__class__.__name__,unicode(self))
def refresh(self):
"""Refreshes this instance from db"""
from_db = self.__class__.objects.get(pk=self.pk)
fields = self.__class__._meta.get_all_field_names()
for field in fields:
try:
val = getattr(from_db, field)
except AttributeError:
continue
# unfortunately these classes are not accessible externally
# so we have to check by name
if val.__class__.__name__ not in ("RelatedManager","ManyRelatedManager"):
setattr(self, field, val)
def sync(self):
pass
def shutdown(self):
pass
def update(self,**k):
self.__class__.objects.filter(id=self.id).update(**k)
from rainman.models.site import Site
from rainman.models.feed import Feed
from rainman.models.controller import Controller
from rainman.models.env import EnvGroup,EnvItem
from rainman.models.valve import Valve
from rainman.models.history import Level,History,Log
from rainman.models.day import Day,DayTime,DayRange
from rainman.models.group import Group
from rainman.models.override import GroupOverride,ValveOverride,GroupAdjust
from rainman.models.schedule import Schedule
from rainman.models.meter import RainMeter,TempMeter,WindMeter,SunMeter
from rainman.models.auth import UserForSite
| gpl-3.0 |
dfang/odoo | addons/procurement_jit/__manifest__.py | 40 | 1102 | # -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
{
'name': 'Just In Time Scheduling',
'version': '1.0',
'category': 'Warehouse',
'description': """
This module will automatically reserve the picking from stock when a sale order is confirmed
============================================================================================
Upon confirmation of a sale order or when quantities are added,
the picking that reserves from stock will be reserved if the
necessary quantities are available.
In the simplest configurations, this is an easy way of working:
first come, first served. However, when not installed, you can
use manual reservation or run the schedulers where the system
will take into account the expected date and the priority.
If this automatic reservation would reserve too much, you can
still unreserve a picking.
""",
'website': 'https://www.odoo.com/page/manufacturing',
'depends': ['sale_stock'],
'data': [],
'demo': [],
'test': [],
'installable': True,
'auto_install': True,
}
| agpl-3.0 |
awkspace/ansible | test/units/modules/network/f5/test_bigip_gtm_topology_region.py | 17 | 4038 | # -*- coding: utf-8 -*-
#
# Copyright: (c) 2018, F5 Networks Inc.
# GNU General Public License v3.0 (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import os
import json
import pytest
import sys
if sys.version_info < (2, 7):
pytestmark = pytest.mark.skip("F5 Ansible modules require Python >= 2.7")
from ansible.module_utils.basic import AnsibleModule
try:
from library.modules.bigip_gtm_topology_region import ApiParameters
from library.modules.bigip_gtm_topology_region import ModuleParameters
from library.modules.bigip_gtm_topology_region import ModuleManager
from library.modules.bigip_gtm_topology_region import ArgumentSpec
# In Ansible 2.8, Ansible changed import paths.
from test.units.compat import unittest
from test.units.compat.mock import Mock
from test.units.compat.mock import patch
from test.units.modules.utils import set_module_args
except ImportError:
from ansible.modules.network.f5.bigip_gtm_topology_region import ApiParameters
from ansible.modules.network.f5.bigip_gtm_topology_region import ModuleParameters
from ansible.modules.network.f5.bigip_gtm_topology_region import ModuleManager
from ansible.modules.network.f5.bigip_gtm_topology_region import ArgumentSpec
# Ansible 2.8 imports
from units.compat import unittest
from units.compat.mock import Mock
from units.compat.mock import patch
from units.modules.utils import set_module_args
fixture_path = os.path.join(os.path.dirname(__file__), 'fixtures')
fixture_data = {}
def load_fixture(name):
path = os.path.join(fixture_path, name)
if path in fixture_data:
return fixture_data[path]
with open(path) as f:
data = f.read()
try:
data = json.loads(data)
except Exception:
pass
fixture_data[path] = data
return data
class TestParameters(unittest.TestCase):
def test_module_parameters(self):
args = dict(
name='foobar',
region_members=[
dict(
country='Poland',
negate=True
),
dict(
datacenter='bazcenter'
)
],
partition='Common'
)
p = ModuleParameters(params=args)
assert p.name == 'foobar'
assert p.partition == 'Common'
assert p.region_members == ['not country PL', 'datacenter /Common/bazcenter']
def test_api_parameters(self):
args = dict(
name='foobar',
region_members=[
dict(
name='not country PL'
),
dict(
name='datacenter /Common/bazcenter'
)
],
partition='Common'
)
p = ApiParameters(params=args)
assert p.name == 'foobar'
assert p.partition == 'Common'
assert p.region_members == ['not country PL', 'datacenter /Common/bazcenter']
class TestManager(unittest.TestCase):
def setUp(self):
self.spec = ArgumentSpec()
def test_create_topology_record(self, *args):
set_module_args(dict(
name='foobar',
region_members=[
dict(
country='Poland',
negate=True
),
dict(
datacenter='bazcenter'
)
],
partition='Common'
)
)
module = AnsibleModule(
argument_spec=self.spec.argument_spec,
supports_check_mode=self.spec.supports_check_mode
)
# Override methods in the specific type of manager
mm = ModuleManager(module=module)
mm.exists = Mock(side_effect=[False, True])
mm.create_on_device = Mock(return_value=True)
results = mm.exec_module()
assert results['changed'] is True
| gpl-3.0 |
alexsmx/coto_frictionless | boilerplate/external/babel/messages/frontend.py | 67 | 51292 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (C) 2007 Edgewall Software
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at http://babel.edgewall.org/wiki/License.
#
# This software consists of voluntary contributions made by many
# individuals. For the exact contribution history, see the revision
# history and logs, available at http://babel.edgewall.org/log/.
"""Frontends for the message extraction functionality."""
from ConfigParser import RawConfigParser
from datetime import datetime
from distutils import log
from distutils.cmd import Command
from distutils.errors import DistutilsOptionError, DistutilsSetupError
from locale import getpreferredencoding
import logging
from optparse import OptionParser
import os
import re
import shutil
from StringIO import StringIO
import sys
import tempfile
from babel import __version__ as VERSION
from babel import Locale, localedata
from babel.core import UnknownLocaleError
from babel.messages.catalog import Catalog
from babel.messages.extract import extract_from_dir, DEFAULT_KEYWORDS, \
DEFAULT_MAPPING
from babel.messages.mofile import write_mo
from babel.messages.pofile import read_po, write_po
from babel.messages.plurals import PLURALS
from babel.util import odict, LOCALTZ
__all__ = ['CommandLineInterface', 'compile_catalog', 'extract_messages',
'init_catalog', 'check_message_extractors', 'update_catalog']
__docformat__ = 'restructuredtext en'
class compile_catalog(Command):
"""Catalog compilation command for use in ``setup.py`` scripts.
If correctly installed, this command is available to Setuptools-using
setup scripts automatically. For projects using plain old ``distutils``,
the command needs to be registered explicitly in ``setup.py``::
from babel.messages.frontend import compile_catalog
setup(
...
cmdclass = {'compile_catalog': compile_catalog}
)
:since: version 0.9
:see: `Integrating new distutils commands <http://docs.python.org/dist/node32.html>`_
:see: `setuptools <http://peak.telecommunity.com/DevCenter/setuptools>`_
"""
description = 'compile message catalogs to binary MO files'
user_options = [
('domain=', 'D',
"domain of PO file (default 'messages')"),
('directory=', 'd',
'path to base directory containing the catalogs'),
('input-file=', 'i',
'name of the input file'),
('output-file=', 'o',
"name of the output file (default "
"'<output_dir>/<locale>/LC_MESSAGES/<domain>.po')"),
('locale=', 'l',
'locale of the catalog to compile'),
('use-fuzzy', 'f',
'also include fuzzy translations'),
('statistics', None,
'print statistics about translations')
]
boolean_options = ['use-fuzzy', 'statistics']
def initialize_options(self):
self.domain = 'messages'
self.directory = None
self.input_file = None
self.output_file = None
self.locale = None
self.use_fuzzy = False
self.statistics = False
def finalize_options(self):
if not self.input_file and not self.directory:
raise DistutilsOptionError('you must specify either the input file '
'or the base directory')
if not self.output_file and not self.directory:
raise DistutilsOptionError('you must specify either the input file '
'or the base directory')
def run(self):
po_files = []
mo_files = []
if not self.input_file:
if self.locale:
po_files.append((self.locale,
os.path.join(self.directory, self.locale,
'LC_MESSAGES',
self.domain + '.po')))
mo_files.append(os.path.join(self.directory, self.locale,
'LC_MESSAGES',
self.domain + '.mo'))
else:
for locale in os.listdir(self.directory):
po_file = os.path.join(self.directory, locale,
'LC_MESSAGES', self.domain + '.po')
if os.path.exists(po_file):
po_files.append((locale, po_file))
mo_files.append(os.path.join(self.directory, locale,
'LC_MESSAGES',
self.domain + '.mo'))
else:
po_files.append((self.locale, self.input_file))
if self.output_file:
mo_files.append(self.output_file)
else:
mo_files.append(os.path.join(self.directory, self.locale,
'LC_MESSAGES',
self.domain + '.mo'))
if not po_files:
raise DistutilsOptionError('no message catalogs found')
for idx, (locale, po_file) in enumerate(po_files):
mo_file = mo_files[idx]
infile = open(po_file, 'r')
try:
catalog = read_po(infile, locale)
finally:
infile.close()
if self.statistics:
translated = 0
for message in list(catalog)[1:]:
if message.string:
translated +=1
percentage = 0
if len(catalog):
percentage = translated * 100 // len(catalog)
log.info('%d of %d messages (%d%%) translated in %r',
translated, len(catalog), percentage, po_file)
if catalog.fuzzy and not self.use_fuzzy:
log.warn('catalog %r is marked as fuzzy, skipping', po_file)
continue
for message, errors in catalog.check():
for error in errors:
log.error('error: %s:%d: %s', po_file, message.lineno,
error)
log.info('compiling catalog %r to %r', po_file, mo_file)
outfile = open(mo_file, 'wb')
try:
write_mo(outfile, catalog, use_fuzzy=self.use_fuzzy)
finally:
outfile.close()
class extract_messages(Command):
"""Message extraction command for use in ``setup.py`` scripts.
If correctly installed, this command is available to Setuptools-using
setup scripts automatically. For projects using plain old ``distutils``,
the command needs to be registered explicitly in ``setup.py``::
from babel.messages.frontend import extract_messages
setup(
...
cmdclass = {'extract_messages': extract_messages}
)
:see: `Integrating new distutils commands <http://docs.python.org/dist/node32.html>`_
:see: `setuptools <http://peak.telecommunity.com/DevCenter/setuptools>`_
"""
description = 'extract localizable strings from the project code'
user_options = [
('charset=', None,
'charset to use in the output file'),
('keywords=', 'k',
'space-separated list of keywords to look for in addition to the '
'defaults'),
('no-default-keywords', None,
'do not include the default keywords'),
('mapping-file=', 'F',
'path to the mapping configuration file'),
('no-location', None,
'do not include location comments with filename and line number'),
('omit-header', None,
'do not include msgid "" entry in header'),
('output-file=', 'o',
'name of the output file'),
('width=', 'w',
'set output line width (default 76)'),
('no-wrap', None,
'do not break long message lines, longer than the output line width, '
'into several lines'),
('sort-output', None,
'generate sorted output (default False)'),
('sort-by-file', None,
'sort output by file location (default False)'),
('msgid-bugs-address=', None,
'set report address for msgid'),
('copyright-holder=', None,
'set copyright holder in output'),
('add-comments=', 'c',
'place comment block with TAG (or those preceding keyword lines) in '
'output file. Seperate multiple TAGs with commas(,)'),
('strip-comments', None,
'strip the comment TAGs from the comments.'),
('input-dirs=', None,
'directories that should be scanned for messages'),
]
boolean_options = [
'no-default-keywords', 'no-location', 'omit-header', 'no-wrap',
'sort-output', 'sort-by-file', 'strip-comments'
]
def initialize_options(self):
self.charset = 'utf-8'
self.keywords = ''
self._keywords = DEFAULT_KEYWORDS.copy()
self.no_default_keywords = False
self.mapping_file = None
self.no_location = False
self.omit_header = False
self.output_file = None
self.input_dirs = None
self.width = None
self.no_wrap = False
self.sort_output = False
self.sort_by_file = False
self.msgid_bugs_address = None
self.copyright_holder = None
self.add_comments = None
self._add_comments = []
self.strip_comments = False
def finalize_options(self):
if self.no_default_keywords and not self.keywords:
raise DistutilsOptionError('you must specify new keywords if you '
'disable the default ones')
if self.no_default_keywords:
self._keywords = {}
if self.keywords:
self._keywords.update(parse_keywords(self.keywords.split()))
if not self.output_file:
raise DistutilsOptionError('no output file specified')
if self.no_wrap and self.width:
raise DistutilsOptionError("'--no-wrap' and '--width' are mutually "
"exclusive")
if not self.no_wrap and not self.width:
self.width = 76
elif self.width is not None:
self.width = int(self.width)
if self.sort_output and self.sort_by_file:
raise DistutilsOptionError("'--sort-output' and '--sort-by-file' "
"are mutually exclusive")
if not self.input_dirs:
self.input_dirs = dict.fromkeys([k.split('.',1)[0]
for k in self.distribution.packages
]).keys()
if self.add_comments:
self._add_comments = self.add_comments.split(',')
def run(self):
mappings = self._get_mappings()
outfile = open(self.output_file, 'w')
try:
catalog = Catalog(project=self.distribution.get_name(),
version=self.distribution.get_version(),
msgid_bugs_address=self.msgid_bugs_address,
copyright_holder=self.copyright_holder,
charset=self.charset)
for dirname, (method_map, options_map) in mappings.items():
def callback(filename, method, options):
if method == 'ignore':
return
filepath = os.path.normpath(os.path.join(dirname, filename))
optstr = ''
if options:
optstr = ' (%s)' % ', '.join(['%s="%s"' % (k, v) for
k, v in options.items()])
log.info('extracting messages from %s%s', filepath, optstr)
extracted = extract_from_dir(dirname, method_map, options_map,
keywords=self._keywords,
comment_tags=self._add_comments,
callback=callback,
strip_comment_tags=
self.strip_comments)
for filename, lineno, message, comments in extracted:
filepath = os.path.normpath(os.path.join(dirname, filename))
catalog.add(message, None, [(filepath, lineno)],
auto_comments=comments)
log.info('writing PO template file to %s' % self.output_file)
write_po(outfile, catalog, width=self.width,
no_location=self.no_location,
omit_header=self.omit_header,
sort_output=self.sort_output,
sort_by_file=self.sort_by_file)
finally:
outfile.close()
def _get_mappings(self):
mappings = {}
if self.mapping_file:
fileobj = open(self.mapping_file, 'U')
try:
method_map, options_map = parse_mapping(fileobj)
for dirname in self.input_dirs:
mappings[dirname] = method_map, options_map
finally:
fileobj.close()
elif getattr(self.distribution, 'message_extractors', None):
message_extractors = self.distribution.message_extractors
for dirname, mapping in message_extractors.items():
if isinstance(mapping, basestring):
method_map, options_map = parse_mapping(StringIO(mapping))
else:
method_map, options_map = [], {}
for pattern, method, options in mapping:
method_map.append((pattern, method))
options_map[pattern] = options or {}
mappings[dirname] = method_map, options_map
else:
for dirname in self.input_dirs:
mappings[dirname] = DEFAULT_MAPPING, {}
return mappings
def check_message_extractors(dist, name, value):
"""Validate the ``message_extractors`` keyword argument to ``setup()``.
:param dist: the distutils/setuptools ``Distribution`` object
:param name: the name of the keyword argument (should always be
"message_extractors")
:param value: the value of the keyword argument
:raise `DistutilsSetupError`: if the value is not valid
:see: `Adding setup() arguments
<http://peak.telecommunity.com/DevCenter/setuptools#adding-setup-arguments>`_
"""
assert name == 'message_extractors'
if not isinstance(value, dict):
raise DistutilsSetupError('the value of the "message_extractors" '
'parameter must be a dictionary')
class init_catalog(Command):
"""New catalog initialization command for use in ``setup.py`` scripts.
If correctly installed, this command is available to Setuptools-using
setup scripts automatically. For projects using plain old ``distutils``,
the command needs to be registered explicitly in ``setup.py``::
from babel.messages.frontend import init_catalog
setup(
...
cmdclass = {'init_catalog': init_catalog}
)
:see: `Integrating new distutils commands <http://docs.python.org/dist/node32.html>`_
:see: `setuptools <http://peak.telecommunity.com/DevCenter/setuptools>`_
"""
description = 'create a new catalog based on a POT file'
user_options = [
('domain=', 'D',
"domain of PO file (default 'messages')"),
('input-file=', 'i',
'name of the input file'),
('output-dir=', 'd',
'path to output directory'),
('output-file=', 'o',
"name of the output file (default "
"'<output_dir>/<locale>/LC_MESSAGES/<domain>.po')"),
('locale=', 'l',
'locale for the new localized catalog'),
]
def initialize_options(self):
self.output_dir = None
self.output_file = None
self.input_file = None
self.locale = None
self.domain = 'messages'
def finalize_options(self):
if not self.input_file:
raise DistutilsOptionError('you must specify the input file')
if not self.locale:
raise DistutilsOptionError('you must provide a locale for the '
'new catalog')
try:
self._locale = Locale.parse(self.locale)
except UnknownLocaleError, e:
raise DistutilsOptionError(e)
if not self.output_file and not self.output_dir:
raise DistutilsOptionError('you must specify the output directory')
if not self.output_file:
self.output_file = os.path.join(self.output_dir, self.locale,
'LC_MESSAGES', self.domain + '.po')
if not os.path.exists(os.path.dirname(self.output_file)):
os.makedirs(os.path.dirname(self.output_file))
def run(self):
log.info('creating catalog %r based on %r', self.output_file,
self.input_file)
infile = open(self.input_file, 'r')
try:
# Although reading from the catalog template, read_po must be fed
# the locale in order to correcly calculate plurals
catalog = read_po(infile, locale=self.locale)
finally:
infile.close()
catalog.locale = self._locale
catalog.fuzzy = False
outfile = open(self.output_file, 'w')
try:
write_po(outfile, catalog)
finally:
outfile.close()
class update_catalog(Command):
"""Catalog merging command for use in ``setup.py`` scripts.
If correctly installed, this command is available to Setuptools-using
setup scripts automatically. For projects using plain old ``distutils``,
the command needs to be registered explicitly in ``setup.py``::
from babel.messages.frontend import update_catalog
setup(
...
cmdclass = {'update_catalog': update_catalog}
)
:since: version 0.9
:see: `Integrating new distutils commands <http://docs.python.org/dist/node32.html>`_
:see: `setuptools <http://peak.telecommunity.com/DevCenter/setuptools>`_
"""
description = 'update message catalogs from a POT file'
user_options = [
('domain=', 'D',
"domain of PO file (default 'messages')"),
('input-file=', 'i',
'name of the input file'),
('output-dir=', 'd',
'path to base directory containing the catalogs'),
('output-file=', 'o',
"name of the output file (default "
"'<output_dir>/<locale>/LC_MESSAGES/<domain>.po')"),
('locale=', 'l',
'locale of the catalog to compile'),
('ignore-obsolete=', None,
'whether to omit obsolete messages from the output'),
('no-fuzzy-matching', 'N',
'do not use fuzzy matching'),
('previous', None,
'keep previous msgids of translated messages')
]
boolean_options = ['ignore_obsolete', 'no_fuzzy_matching', 'previous']
def initialize_options(self):
self.domain = 'messages'
self.input_file = None
self.output_dir = None
self.output_file = None
self.locale = None
self.ignore_obsolete = False
self.no_fuzzy_matching = False
self.previous = False
def finalize_options(self):
if not self.input_file:
raise DistutilsOptionError('you must specify the input file')
if not self.output_file and not self.output_dir:
raise DistutilsOptionError('you must specify the output file or '
'directory')
if self.output_file and not self.locale:
raise DistutilsOptionError('you must specify the locale')
if self.no_fuzzy_matching and self.previous:
self.previous = False
def run(self):
po_files = []
if not self.output_file:
if self.locale:
po_files.append((self.locale,
os.path.join(self.output_dir, self.locale,
'LC_MESSAGES',
self.domain + '.po')))
else:
for locale in os.listdir(self.output_dir):
po_file = os.path.join(self.output_dir, locale,
'LC_MESSAGES',
self.domain + '.po')
if os.path.exists(po_file):
po_files.append((locale, po_file))
else:
po_files.append((self.locale, self.output_file))
domain = self.domain
if not domain:
domain = os.path.splitext(os.path.basename(self.input_file))[0]
infile = open(self.input_file, 'U')
try:
template = read_po(infile)
finally:
infile.close()
if not po_files:
raise DistutilsOptionError('no message catalogs found')
for locale, filename in po_files:
log.info('updating catalog %r based on %r', filename,
self.input_file)
infile = open(filename, 'U')
try:
catalog = read_po(infile, locale=locale, domain=domain)
finally:
infile.close()
catalog.update(template, self.no_fuzzy_matching)
tmpname = os.path.join(os.path.dirname(filename),
tempfile.gettempprefix() +
os.path.basename(filename))
tmpfile = open(tmpname, 'w')
try:
try:
write_po(tmpfile, catalog,
ignore_obsolete=self.ignore_obsolete,
include_previous=self.previous)
finally:
tmpfile.close()
except:
os.remove(tmpname)
raise
try:
os.rename(tmpname, filename)
except OSError:
# We're probably on Windows, which doesn't support atomic
# renames, at least not through Python
# If the error is in fact due to a permissions problem, that
# same error is going to be raised from one of the following
# operations
os.remove(filename)
shutil.copy(tmpname, filename)
os.remove(tmpname)
class CommandLineInterface(object):
"""Command-line interface.
This class provides a simple command-line interface to the message
extraction and PO file generation functionality.
"""
usage = '%%prog %s [options] %s'
version = '%%prog %s' % VERSION
commands = {
'compile': 'compile message catalogs to MO files',
'extract': 'extract messages from source files and generate a POT file',
'init': 'create new message catalogs from a POT file',
'update': 'update existing message catalogs from a POT file'
}
def run(self, argv=sys.argv):
"""Main entry point of the command-line interface.
:param argv: list of arguments passed on the command-line
"""
self.parser = OptionParser(usage=self.usage % ('command', '[args]'),
version=self.version)
self.parser.disable_interspersed_args()
self.parser.print_help = self._help
self.parser.add_option('--list-locales', dest='list_locales',
action='store_true',
help="print all known locales and exit")
self.parser.add_option('-v', '--verbose', action='store_const',
dest='loglevel', const=logging.DEBUG,
help='print as much as possible')
self.parser.add_option('-q', '--quiet', action='store_const',
dest='loglevel', const=logging.ERROR,
help='print as little as possible')
self.parser.set_defaults(list_locales=False, loglevel=logging.INFO)
options, args = self.parser.parse_args(argv[1:])
self._configure_logging(options.loglevel)
if options.list_locales:
identifiers = localedata.list()
longest = max([len(identifier) for identifier in identifiers])
identifiers.sort()
format = u'%%-%ds %%s' % (longest + 1)
for identifier in identifiers:
locale = Locale.parse(identifier)
output = format % (identifier, locale.english_name)
print output.encode(sys.stdout.encoding or
getpreferredencoding() or
'ascii', 'replace')
return 0
if not args:
self.parser.error('no valid command or option passed. '
'Try the -h/--help option for more information.')
cmdname = args[0]
if cmdname not in self.commands:
self.parser.error('unknown command "%s"' % cmdname)
return getattr(self, cmdname)(args[1:])
def _configure_logging(self, loglevel):
self.log = logging.getLogger('babel')
self.log.setLevel(loglevel)
# Don't add a new handler for every instance initialization (#227), this
# would cause duplicated output when the CommandLineInterface as an
# normal Python class.
if self.log.handlers:
handler = self.log.handlers[0]
else:
handler = logging.StreamHandler()
self.log.addHandler(handler)
handler.setLevel(loglevel)
formatter = logging.Formatter('%(message)s')
handler.setFormatter(formatter)
def _help(self):
print self.parser.format_help()
print "commands:"
longest = max([len(command) for command in self.commands])
format = " %%-%ds %%s" % max(8, longest + 1)
commands = self.commands.items()
commands.sort()
for name, description in commands:
print format % (name, description)
def compile(self, argv):
"""Subcommand for compiling a message catalog to a MO file.
:param argv: the command arguments
:since: version 0.9
"""
parser = OptionParser(usage=self.usage % ('compile', ''),
description=self.commands['compile'])
parser.add_option('--domain', '-D', dest='domain',
help="domain of MO and PO files (default '%default')")
parser.add_option('--directory', '-d', dest='directory',
metavar='DIR', help='base directory of catalog files')
parser.add_option('--locale', '-l', dest='locale', metavar='LOCALE',
help='locale of the catalog')
parser.add_option('--input-file', '-i', dest='input_file',
metavar='FILE', help='name of the input file')
parser.add_option('--output-file', '-o', dest='output_file',
metavar='FILE',
help="name of the output file (default "
"'<output_dir>/<locale>/LC_MESSAGES/"
"<domain>.mo')")
parser.add_option('--use-fuzzy', '-f', dest='use_fuzzy',
action='store_true',
help='also include fuzzy translations (default '
'%default)')
parser.add_option('--statistics', dest='statistics',
action='store_true',
help='print statistics about translations')
parser.set_defaults(domain='messages', use_fuzzy=False,
compile_all=False, statistics=False)
options, args = parser.parse_args(argv)
po_files = []
mo_files = []
if not options.input_file:
if not options.directory:
parser.error('you must specify either the input file or the '
'base directory')
if options.locale:
po_files.append((options.locale,
os.path.join(options.directory,
options.locale, 'LC_MESSAGES',
options.domain + '.po')))
mo_files.append(os.path.join(options.directory, options.locale,
'LC_MESSAGES',
options.domain + '.mo'))
else:
for locale in os.listdir(options.directory):
po_file = os.path.join(options.directory, locale,
'LC_MESSAGES', options.domain + '.po')
if os.path.exists(po_file):
po_files.append((locale, po_file))
mo_files.append(os.path.join(options.directory, locale,
'LC_MESSAGES',
options.domain + '.mo'))
else:
po_files.append((options.locale, options.input_file))
if options.output_file:
mo_files.append(options.output_file)
else:
if not options.directory:
parser.error('you must specify either the input file or '
'the base directory')
mo_files.append(os.path.join(options.directory, options.locale,
'LC_MESSAGES',
options.domain + '.mo'))
if not po_files:
parser.error('no message catalogs found')
for idx, (locale, po_file) in enumerate(po_files):
mo_file = mo_files[idx]
infile = open(po_file, 'r')
try:
catalog = read_po(infile, locale)
finally:
infile.close()
if options.statistics:
translated = 0
for message in list(catalog)[1:]:
if message.string:
translated +=1
percentage = 0
if len(catalog):
percentage = translated * 100 // len(catalog)
self.log.info("%d of %d messages (%d%%) translated in %r",
translated, len(catalog), percentage, po_file)
if catalog.fuzzy and not options.use_fuzzy:
self.log.warn('catalog %r is marked as fuzzy, skipping',
po_file)
continue
for message, errors in catalog.check():
for error in errors:
self.log.error('error: %s:%d: %s', po_file, message.lineno,
error)
self.log.info('compiling catalog %r to %r', po_file, mo_file)
outfile = open(mo_file, 'wb')
try:
write_mo(outfile, catalog, use_fuzzy=options.use_fuzzy)
finally:
outfile.close()
def extract(self, argv):
"""Subcommand for extracting messages from source files and generating
a POT file.
:param argv: the command arguments
"""
parser = OptionParser(usage=self.usage % ('extract', 'dir1 <dir2> ...'),
description=self.commands['extract'])
parser.add_option('--charset', dest='charset',
help='charset to use in the output (default '
'"%default")')
parser.add_option('-k', '--keyword', dest='keywords', action='append',
help='keywords to look for in addition to the '
'defaults. You can specify multiple -k flags on '
'the command line.')
parser.add_option('--no-default-keywords', dest='no_default_keywords',
action='store_true',
help="do not include the default keywords")
parser.add_option('--mapping', '-F', dest='mapping_file',
help='path to the extraction mapping file')
parser.add_option('--no-location', dest='no_location',
action='store_true',
help='do not include location comments with filename '
'and line number')
parser.add_option('--omit-header', dest='omit_header',
action='store_true',
help='do not include msgid "" entry in header')
parser.add_option('-o', '--output', dest='output',
help='path to the output POT file')
parser.add_option('-w', '--width', dest='width', type='int',
help="set output line width (default 76)")
parser.add_option('--no-wrap', dest='no_wrap', action = 'store_true',
help='do not break long message lines, longer than '
'the output line width, into several lines')
parser.add_option('--sort-output', dest='sort_output',
action='store_true',
help='generate sorted output (default False)')
parser.add_option('--sort-by-file', dest='sort_by_file',
action='store_true',
help='sort output by file location (default False)')
parser.add_option('--msgid-bugs-address', dest='msgid_bugs_address',
metavar='EMAIL@ADDRESS',
help='set report address for msgid')
parser.add_option('--copyright-holder', dest='copyright_holder',
help='set copyright holder in output')
parser.add_option('--project', dest='project',
help='set project name in output')
parser.add_option('--version', dest='version',
help='set project version in output')
parser.add_option('--add-comments', '-c', dest='comment_tags',
metavar='TAG', action='append',
help='place comment block with TAG (or those '
'preceding keyword lines) in output file. One '
'TAG per argument call')
parser.add_option('--strip-comment-tags', '-s',
dest='strip_comment_tags', action='store_true',
help='Strip the comment tags from the comments.')
parser.set_defaults(charset='utf-8', keywords=[],
no_default_keywords=False, no_location=False,
omit_header = False, width=None, no_wrap=False,
sort_output=False, sort_by_file=False,
comment_tags=[], strip_comment_tags=False)
options, args = parser.parse_args(argv)
if not args:
parser.error('incorrect number of arguments')
if options.output not in (None, '-'):
outfile = open(options.output, 'w')
else:
outfile = sys.stdout
keywords = DEFAULT_KEYWORDS.copy()
if options.no_default_keywords:
if not options.keywords:
parser.error('you must specify new keywords if you disable the '
'default ones')
keywords = {}
if options.keywords:
keywords.update(parse_keywords(options.keywords))
if options.mapping_file:
fileobj = open(options.mapping_file, 'U')
try:
method_map, options_map = parse_mapping(fileobj)
finally:
fileobj.close()
else:
method_map = DEFAULT_MAPPING
options_map = {}
if options.width and options.no_wrap:
parser.error("'--no-wrap' and '--width' are mutually exclusive.")
elif not options.width and not options.no_wrap:
options.width = 76
if options.sort_output and options.sort_by_file:
parser.error("'--sort-output' and '--sort-by-file' are mutually "
"exclusive")
try:
catalog = Catalog(project=options.project,
version=options.version,
msgid_bugs_address=options.msgid_bugs_address,
copyright_holder=options.copyright_holder,
charset=options.charset)
for dirname in args:
if not os.path.isdir(dirname):
parser.error('%r is not a directory' % dirname)
def callback(filename, method, options):
if method == 'ignore':
return
filepath = os.path.normpath(os.path.join(dirname, filename))
optstr = ''
if options:
optstr = ' (%s)' % ', '.join(['%s="%s"' % (k, v) for
k, v in options.items()])
self.log.info('extracting messages from %s%s', filepath,
optstr)
extracted = extract_from_dir(dirname, method_map, options_map,
keywords, options.comment_tags,
callback=callback,
strip_comment_tags=
options.strip_comment_tags)
for filename, lineno, message, comments in extracted:
filepath = os.path.normpath(os.path.join(dirname, filename))
catalog.add(message, None, [(filepath, lineno)],
auto_comments=comments)
if options.output not in (None, '-'):
self.log.info('writing PO template file to %s' % options.output)
write_po(outfile, catalog, width=options.width,
no_location=options.no_location,
omit_header=options.omit_header,
sort_output=options.sort_output,
sort_by_file=options.sort_by_file)
finally:
if options.output:
outfile.close()
def init(self, argv):
"""Subcommand for creating new message catalogs from a template.
:param argv: the command arguments
"""
parser = OptionParser(usage=self.usage % ('init', ''),
description=self.commands['init'])
parser.add_option('--domain', '-D', dest='domain',
help="domain of PO file (default '%default')")
parser.add_option('--input-file', '-i', dest='input_file',
metavar='FILE', help='name of the input file')
parser.add_option('--output-dir', '-d', dest='output_dir',
metavar='DIR', help='path to output directory')
parser.add_option('--output-file', '-o', dest='output_file',
metavar='FILE',
help="name of the output file (default "
"'<output_dir>/<locale>/LC_MESSAGES/"
"<domain>.po')")
parser.add_option('--locale', '-l', dest='locale', metavar='LOCALE',
help='locale for the new localized catalog')
parser.set_defaults(domain='messages')
options, args = parser.parse_args(argv)
if not options.locale:
parser.error('you must provide a locale for the new catalog')
try:
locale = Locale.parse(options.locale)
except UnknownLocaleError, e:
parser.error(e)
if not options.input_file:
parser.error('you must specify the input file')
if not options.output_file and not options.output_dir:
parser.error('you must specify the output file or directory')
if not options.output_file:
options.output_file = os.path.join(options.output_dir,
options.locale, 'LC_MESSAGES',
options.domain + '.po')
if not os.path.exists(os.path.dirname(options.output_file)):
os.makedirs(os.path.dirname(options.output_file))
infile = open(options.input_file, 'r')
try:
# Although reading from the catalog template, read_po must be fed
# the locale in order to correcly calculate plurals
catalog = read_po(infile, locale=options.locale)
finally:
infile.close()
catalog.locale = locale
catalog.revision_date = datetime.now(LOCALTZ)
self.log.info('creating catalog %r based on %r', options.output_file,
options.input_file)
outfile = open(options.output_file, 'w')
try:
write_po(outfile, catalog)
finally:
outfile.close()
def update(self, argv):
"""Subcommand for updating existing message catalogs from a template.
:param argv: the command arguments
:since: version 0.9
"""
parser = OptionParser(usage=self.usage % ('update', ''),
description=self.commands['update'])
parser.add_option('--domain', '-D', dest='domain',
help="domain of PO file (default '%default')")
parser.add_option('--input-file', '-i', dest='input_file',
metavar='FILE', help='name of the input file')
parser.add_option('--output-dir', '-d', dest='output_dir',
metavar='DIR', help='path to output directory')
parser.add_option('--output-file', '-o', dest='output_file',
metavar='FILE',
help="name of the output file (default "
"'<output_dir>/<locale>/LC_MESSAGES/"
"<domain>.po')")
parser.add_option('--locale', '-l', dest='locale', metavar='LOCALE',
help='locale of the translations catalog')
parser.add_option('--ignore-obsolete', dest='ignore_obsolete',
action='store_true',
help='do not include obsolete messages in the output '
'(default %default)')
parser.add_option('--no-fuzzy-matching', '-N', dest='no_fuzzy_matching',
action='store_true',
help='do not use fuzzy matching (default %default)')
parser.add_option('--previous', dest='previous', action='store_true',
help='keep previous msgids of translated messages '
'(default %default)')
parser.set_defaults(domain='messages', ignore_obsolete=False,
no_fuzzy_matching=False, previous=False)
options, args = parser.parse_args(argv)
if not options.input_file:
parser.error('you must specify the input file')
if not options.output_file and not options.output_dir:
parser.error('you must specify the output file or directory')
if options.output_file and not options.locale:
parser.error('you must specify the locale')
if options.no_fuzzy_matching and options.previous:
options.previous = False
po_files = []
if not options.output_file:
if options.locale:
po_files.append((options.locale,
os.path.join(options.output_dir,
options.locale, 'LC_MESSAGES',
options.domain + '.po')))
else:
for locale in os.listdir(options.output_dir):
po_file = os.path.join(options.output_dir, locale,
'LC_MESSAGES',
options.domain + '.po')
if os.path.exists(po_file):
po_files.append((locale, po_file))
else:
po_files.append((options.locale, options.output_file))
domain = options.domain
if not domain:
domain = os.path.splitext(os.path.basename(options.input_file))[0]
infile = open(options.input_file, 'U')
try:
template = read_po(infile)
finally:
infile.close()
if not po_files:
parser.error('no message catalogs found')
for locale, filename in po_files:
self.log.info('updating catalog %r based on %r', filename,
options.input_file)
infile = open(filename, 'U')
try:
catalog = read_po(infile, locale=locale, domain=domain)
finally:
infile.close()
catalog.update(template, options.no_fuzzy_matching)
tmpname = os.path.join(os.path.dirname(filename),
tempfile.gettempprefix() +
os.path.basename(filename))
tmpfile = open(tmpname, 'w')
try:
try:
write_po(tmpfile, catalog,
ignore_obsolete=options.ignore_obsolete,
include_previous=options.previous)
finally:
tmpfile.close()
except:
os.remove(tmpname)
raise
try:
os.rename(tmpname, filename)
except OSError:
# We're probably on Windows, which doesn't support atomic
# renames, at least not through Python
# If the error is in fact due to a permissions problem, that
# same error is going to be raised from one of the following
# operations
os.remove(filename)
shutil.copy(tmpname, filename)
os.remove(tmpname)
def main():
return CommandLineInterface().run(sys.argv)
def parse_mapping(fileobj, filename=None):
"""Parse an extraction method mapping from a file-like object.
>>> buf = StringIO('''
... [extractors]
... custom = mypackage.module:myfunc
...
... # Python source files
... [python: **.py]
...
... # Genshi templates
... [genshi: **/templates/**.html]
... include_attrs =
... [genshi: **/templates/**.txt]
... template_class = genshi.template:TextTemplate
... encoding = latin-1
...
... # Some custom extractor
... [custom: **/custom/*.*]
... ''')
>>> method_map, options_map = parse_mapping(buf)
>>> len(method_map)
4
>>> method_map[0]
('**.py', 'python')
>>> options_map['**.py']
{}
>>> method_map[1]
('**/templates/**.html', 'genshi')
>>> options_map['**/templates/**.html']['include_attrs']
''
>>> method_map[2]
('**/templates/**.txt', 'genshi')
>>> options_map['**/templates/**.txt']['template_class']
'genshi.template:TextTemplate'
>>> options_map['**/templates/**.txt']['encoding']
'latin-1'
>>> method_map[3]
('**/custom/*.*', 'mypackage.module:myfunc')
>>> options_map['**/custom/*.*']
{}
:param fileobj: a readable file-like object containing the configuration
text to parse
:return: a `(method_map, options_map)` tuple
:rtype: `tuple`
:see: `extract_from_directory`
"""
extractors = {}
method_map = []
options_map = {}
parser = RawConfigParser()
parser._sections = odict(parser._sections) # We need ordered sections
parser.readfp(fileobj, filename)
for section in parser.sections():
if section == 'extractors':
extractors = dict(parser.items(section))
else:
method, pattern = [part.strip() for part in section.split(':', 1)]
method_map.append((pattern, method))
options_map[pattern] = dict(parser.items(section))
if extractors:
for idx, (pattern, method) in enumerate(method_map):
if method in extractors:
method = extractors[method]
method_map[idx] = (pattern, method)
return (method_map, options_map)
def parse_keywords(strings=[]):
"""Parse keywords specifications from the given list of strings.
>>> kw = parse_keywords(['_', 'dgettext:2', 'dngettext:2,3']).items()
>>> kw.sort()
>>> for keyword, indices in kw:
... print (keyword, indices)
('_', None)
('dgettext', (2,))
('dngettext', (2, 3))
"""
keywords = {}
for string in strings:
if ':' in string:
funcname, indices = string.split(':')
else:
funcname, indices = string, None
if funcname not in keywords:
if indices:
indices = tuple([(int(x)) for x in indices.split(',')])
keywords[funcname] = indices
return keywords
if __name__ == '__main__':
main()
| lgpl-3.0 |
strahlc/exaile | plugins/replaygain/__init__.py | 3 | 4405 | # Copyright (C) 2009-2010 Aren Olson
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2, or (at your option)
# any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
#
# The developers of the Exaile media player hereby grant permission
# for non-GPL compatible GStreamer and Exaile plugins to be used and
# distributed together with GStreamer and Exaile. This permission is
# above and beyond the permissions granted by the GPL license by which
# Exaile is covered. If you modify this code, you may extend this
# exception to your version of the code, but you are not obligated to
# do so. If you do not wish to do so, delete this exception statement
# from your version.
from xl import providers, event, settings
from xl.player.gst.gst_utils import ElementBin
from gi.repository import Gst
try:
import replaygainprefs
def get_preferences_pane():
return replaygainprefs
except: # fail gracefully if we cant set up the UI
pass
NEEDED_ELEMS = ["rgvolume", "rglimiter"]
def enable(exaile):
for elem in NEEDED_ELEMS:
if not Gst.ElementFactory.find(elem):
raise ImportError("Needed gstreamer element %s missing." % elem)
providers.register("gst_audio_filter", ReplaygainVolume)
providers.register("gst_audio_filter", ReplaygainLimiter)
def disable(exaile):
providers.unregister("gst_audio_filter", ReplaygainVolume)
providers.unregister("gst_audio_filter", ReplaygainLimiter)
class ReplaygainVolume(ElementBin):
"""
Handles replaygain volume adjustment and pre-amp.
Placed at 20 in the pipeline, since most elements should do their
processing after it.
"""
index = 20
name = "rgvolume"
def __init__(self):
ElementBin.__init__(self, name=self.name)
self.audioconvert = Gst.ElementFactory.make("audioconvert", None)
self.elements[40] = self.audioconvert
self.rgvol = Gst.ElementFactory.make("rgvolume", None)
self.elements[50] = self.rgvol
self.setup_elements()
event.add_ui_callback(self._on_option_set, "replaygain_option_set")
# load settings
for x in ("album-mode", "pre-amp", "fallback-gain"):
self._on_option_set("replaygain_option_set", None,
"replaygain/%s"%x)
def _on_option_set(self, name, object, data):
if data == "replaygain/album-mode":
self.rgvol.set_property("album-mode",
settings.get_option("replaygain/album-mode", True))
elif data == "replaygain/pre-amp":
self.rgvol.set_property("pre-amp",
settings.get_option("replaygain/pre-amp", 0))
elif data == "replaygain/fallback-gain":
self.rgvol.set_property("fallback-gain",
settings.get_option("replaygain/fallback-gain", 0))
class ReplaygainLimiter(ElementBin):
"""
Implements clipping protection.
Placed at 80 in the pipeline so that other elements can come
before it if necessary.
"""
index = 80
name = "rglimiter"
def __init__(self):
ElementBin.__init__(self, name=self.name)
self.rglimit = Gst.ElementFactory.make("rglimiter", None)
self.elements[50] = self.rglimit
self.audioconvert = Gst.ElementFactory.make("audioconvert", None)
self.elements[60] = self.audioconvert
self.setup_elements()
event.add_ui_callback(self._on_option_set, "replaygain_option_set")
self._on_option_set("replaygain_option_set", None,
"replaygain/clipping-protection")
def _on_option_set(self, name, object, data):
if data == "replaygain/clipping-protection":
self.rglimit.set_property("enabled",
settings.get_option("replaygain/clipping-protection",
True))
| gpl-2.0 |
simonwydooghe/ansible | lib/ansible/modules/net_tools/nios/nios_dns_view.py | 68 | 4192 | #!/usr/bin/python
# Copyright (c) 2018 Red Hat, Inc.
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'certified'}
DOCUMENTATION = '''
---
module: nios_dns_view
version_added: "2.5"
author: "Peter Sprygada (@privateip)"
short_description: Configure Infoblox NIOS DNS views
description:
- Adds and/or removes instances of DNS view objects from
Infoblox NIOS servers. This module manages NIOS C(view) objects
using the Infoblox WAPI interface over REST.
- Updates instances of DNS view object from Infoblox NIOS servers.
requirements:
- infoblox-client
extends_documentation_fragment: nios
options:
name:
description:
- Specifies the fully qualified hostname to add or remove from
the system. User can also update the hostname as it is possible
to pass a dict containing I(new_name), I(old_name). See examples.
required: true
aliases:
- view
network_view:
description:
- Specifies the name of the network view to assign the configured
DNS view to. The network view must already be configured on the
target system.
required: true
default: default
extattrs:
description:
- Allows for the configuration of Extensible Attributes on the
instance of the object. This argument accepts a set of key / value
pairs for configuration.
required: false
comment:
description:
- Configures a text string comment to be associated with the instance
of this object. The provided text string will be configured on the
object instance.
required: false
state:
description:
- Configures the intended state of the instance of the object on
the NIOS server. When this value is set to C(present), the object
is configured on the device and when this value is set to C(absent)
the value is removed (if necessary) from the device.
required: false
default: present
choices:
- present
- absent
'''
EXAMPLES = '''
- name: configure a new dns view instance
nios_dns_view:
name: ansible-dns
state: present
provider:
host: "{{ inventory_hostname_short }}"
username: admin
password: admin
connection: local
- name: update the comment for dns view
nios_dns_view:
name: ansible-dns
comment: this is an example comment
state: present
provider:
host: "{{ inventory_hostname_short }}"
username: admin
password: admin
connection: local
- name: remove the dns view instance
nios_dns_view:
name: ansible-dns
state: absent
provider:
host: "{{ inventory_hostname_short }}"
username: admin
password: admin
connection: local
- name: update the dns view instance
nios_dns_view:
name: {new_name: ansible-dns-new, old_name: ansible-dns}
state: present
provider:
host: "{{ inventory_hostname_short }}"
username: admin
password: admin
connection: local
'''
RETURN = ''' # '''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.net_tools.nios.api import WapiModule
from ansible.module_utils.net_tools.nios.api import NIOS_DNS_VIEW
def main():
''' Main entry point for module execution
'''
ib_spec = dict(
name=dict(required=True, aliases=['view'], ib_req=True),
network_view=dict(default='default', ib_req=True),
extattrs=dict(type='dict'),
comment=dict()
)
argument_spec = dict(
provider=dict(required=True),
state=dict(default='present', choices=['present', 'absent'])
)
argument_spec.update(ib_spec)
argument_spec.update(WapiModule.provider_spec)
module = AnsibleModule(argument_spec=argument_spec,
supports_check_mode=True)
wapi = WapiModule(module)
result = wapi.run(NIOS_DNS_VIEW, ib_spec)
module.exit_json(**result)
if __name__ == '__main__':
main()
| gpl-3.0 |
efeguney/gemmlowp | meta/generators/transform_kernels_arm_32.py | 7 | 1385 | # Copyright 2016 The Gemmlowp Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Generates the arm32 headers used by the gemm/gemv lib."""
import cc_emitter
import common
import neon_emitter
import transform_kernels_common
def Main():
"""."""
cc = cc_emitter.CCEmitter()
common.GenerateHeader(cc, 'gemmlowp_meta_transform_kernels_arm_32',
'GEMMLOWP_NEON_32')
cc.EmitNamespaceBegin('gemmlowp')
cc.EmitNamespaceBegin('meta')
cc.EmitNewline()
transform_kernels_common.GenerateKernels(cc,
neon_emitter.NeonEmitter(),
[(16, x) for x in range(16)])
cc.EmitNamespaceEnd()
cc.EmitNamespaceEnd()
cc.EmitNewline()
common.GenerateFooter(cc, 'Meta gemm for arm32 requires: GEMMLOWP_NEON_32!')
if __name__ == '__main__':
Main()
| apache-2.0 |
khosrow/metpx | sarracenia/sarra/plugins/msg_sundew_pxroute.py | 1 | 2902 | #!/usr/bin/env python3
"""
Implement message filtering based on a routing table from MetPX-Sundew.
Make it easier to feed clients exactly the same products with sarracenia,
that they are used to with sundew.
the pxrouting option must be set in the configuration before the on_message
plugin is configured, like so:
msg_pxrouting /local/home/peter/src/pdspx/routing/etc/pxRouting.conf
msg_pxclient navcan-amis
on_message sundew_pxroute.py
"""
class SundewRoute(object):
def __init__(self,parent):
"""
For World Meteorological Organization message oriented routing.
Read the configured metpx-sundew routing table, and figure out which
Abbreviated Header Lines (AHL's) are configured to be sent to 'target'
being careful to account for membership in clientAliases.
init sets 'ahls_to_route' according to the contents of pxrouting
"""
self.ahls_to_route={}
logger = parent.logger
pxrf=open(parent.msg_pxrouting[0],'r')
possible_references=parent.msg_pxclient[0].split(',')
logger.info( "sundew_pxroute, target clients: %s" % possible_references )
for line in pxrf:
words = line.split()
if (len(words) < 2) or words[0] == '#' :
continue
if words[0] == 'clientAlias':
expansion = words[2].split(',')
for i in possible_references :
if i in expansion:
possible_references.append( words[1] )
logger.debug( "sundew_pxroute adding clientAlias %s to possible_reference %s" % \
(words[1], possible_references) )
continue
if words[0] == 'key' :
expansion = words[2].split(',')
for i in possible_references :
if i in expansion:
self.ahls_to_route[ words[1] ] = True
pxrf.close()
logger.debug( "sundew_pxroute For %s, the following headers are routed %s" % ( parent.msg_pxclient[0], self.ahls_to_route.keys() ) )
def perform(self,parent):
logger = parent.logger
msg = parent.msg
ahl = msg.local_file.split('/')[-1][0:11]
if ( len(ahl) < 11 ) or ( ahl[6] != '_' ):
logger.debug("sundew_pxroute not an AHL: %s, " % ahl )
return False
if ( ahl in self.ahls_to_route.keys() ) :
logger.debug("sundew_pxroute yes, deliver: %s, " % ahl )
return True
else:
logger.debug("sundew_pxroute no, do not deliver: %s, " % ahl )
return False
# at this point the parent is "self"
sundewroute=SundewRoute(self)
self.on_message = sundewroute.perform
| gpl-2.0 |
jonlooney/jxmlease | jxmlease/_parsehandler.py | 1 | 8605 | #!/usr/bin/env python
# Copyright (c) 2015-2016, Juniper Networks, Inc.
# All rights reserved.
#
# Copyright (C) 2012 Martin Blech and individual contributors.
#
# See the LICENSE file for further information.
"""Internal module that provides a common parsing handler."""
from __future__ import absolute_import
from . import OrderedDict, _unicode
from .dictnode import XMLDictNode
__all__ = []
class _GeneratorMatch(object):
# Essentially, a data structure used to hold information on matches.
def __init__(self, rooted=False, elements=None, depth=0, match_string=""):
if elements is None:
elements = []
self.rooted = rooted
self.elements = elements
self.depth = depth
self.match_string = match_string
class _DictSAXHandler(object):
# A handler for SAX events.
# parameters are documented under the Parser class.
def __init__(self,
xml_attribs=True,
strip_whitespace=True,
namespace_separator=_unicode(':'),
namespaces=None,
strip_namespace=False,
cdata_separator=_unicode(''),
generator=None):
self.path = []
self.stack = []
self.matches = []
self.root = XMLDictNode()
self.item = self.root
self.item_depth = 0
self.xml_attribs = xml_attribs
self.strip_whitespace = strip_whitespace
self.namespace_separator = namespace_separator
self.namespaces = namespaces
self.strip_namespace = strip_namespace
self.match_tests = []
self.matches = []
if isinstance(generator, str):
self.match_tests.append(self._parse_generator_matches(generator))
elif generator is not None:
for i in generator:
self.match_tests.append(self._parse_generator_matches(i))
if len(self.match_tests) > 0:
self.match_depth = 1000000 # effectively, infinity
for match in self.match_tests:
if match.depth < self.match_depth:
self.match_depth = match.depth
else:
self.match_depth = -1
self.in_ignore = (self.match_depth > 0)
self.cdata_separator = cdata_separator
self.need_cdata_separator = False
self.processing_started = False
def _parse_generator_matches(self, match_string):
match_obj = _GeneratorMatch(match_string=match_string)
parsed_match_string = match_string.split("/")
# Determine if we had a leading slash
if parsed_match_string[0] == "":
match_obj.rooted = True
del parsed_match_string[0]
# Pop a single trailing slash
if parsed_match_string[-1] == "":
del parsed_match_string[-1]
# Verify there are no other empty elements.
for i in parsed_match_string:
if i == "":
raise Warning(
"Match condition contains empty path elements (%s)" %
(match_string,)
)
# Get the depth and the element list.
match_obj.depth = len(parsed_match_string)
match_obj.elements = parsed_match_string
return match_obj
def _check_generator_matches(self):
if self.match_depth > len(self.path):
return
for match in self.match_tests:
if match.rooted and len(self.path) != match.depth:
continue
if not(match.rooted) and len(self.path) < match.depth:
continue
if match.elements == self.path[-match.depth:]:
path = '/'.join([""] + self.path)
if path == "":
path = _unicode('/')
self.matches.append((path, match.match_string, self.item))
break
def _build_name(self, full_name):
if (not self.namespaces) and (not self.strip_namespace):
return full_name
i = full_name.rfind(self.namespace_separator)
if i == -1:
return full_name
namespace, name = full_name[:i], full_name[i+1:]
if self.strip_namespace:
return name
short_namespace = self.namespaces.get(namespace, namespace)
if not short_namespace:
return name
else:
return self.namespace_separator.join((short_namespace, name))
def _attrs_to_dict(self, attrs):
if isinstance(attrs, dict):
rv = attrs
else:
rv = OrderedDict(zip(attrs[0::2], attrs[1::2])) # pylint: disable=zip-builtin-not-iterating
if self.strip_namespace:
for k in list(rv.keys()):
if k == "xmlns" or k.startswith("xmlns" +
self.namespace_separator):
del rv[k]
for k in list(rv.keys()):
if k.rfind(self.namespace_separator) >= 0:
newkey = k[k.rfind(self.namespace_separator) + 1:]
if newkey in rv:
raise ValueError("Stripping namespace causes duplicate "
"attribute \"%s\"" % newkey)
rv[newkey] = rv[k]
del rv[k]
return rv
def start_element(self, full_name, attrs):
"""Handle the start of an element."""
self.processing_started = True
name = self._build_name(full_name)
attrs = self._attrs_to_dict(attrs)
self.path.append(name)
if self.xml_attribs:
attrs = OrderedDict(
(self._build_name(key), value)
for (key, value) in attrs.items()
)
else:
attrs = OrderedDict()
if self.in_ignore and len(self.path) >= self.match_depth:
# We were ignoring lower levels of the hierarchy. Get a new
# root.
self.item = XMLDictNode()
self.in_ignore = False
if not self.in_ignore:
# Add a new item
newnode = self.item.add_node(name, xml_attrs=attrs)
# Save the old item (which may have been updated).
self.stack.append(self.item.get_current_node())
# Change the current focus to the new item.
self.item = newnode
# We don't need a CDATA separator when starting an item.
self.need_cdata_separator = False
def end_element(self, full_name): # pylint: disable=unused-argument
"""Handle the end of an element."""
if not self.in_ignore:
if self.strip_whitespace:
self.item = self.item.strip_cdata(return_node=True)
self._check_generator_matches()
self.item = self.stack.pop()
self.path.pop()
if len(self.path) < self.match_depth:
self.in_ignore = True
if not self.in_ignore:
# We may need a CDATA separator when ending an item.
if len(self.item.get_cdata()) > 0:
self.need_cdata_separator = True
def characters(self, data):
"""Handle character data."""
self.processing_started = True
if not self.in_ignore:
if self.need_cdata_separator:
data = self.cdata_separator + data
self.need_cdata_separator = False
self.item = self.item.append_cdata(data, return_node=True)
def end_document(self):
"""Handle the end of the document."""
assert len(self.path) == 0, "endDocument() called with open elements"
self._check_generator_matches()
def pop_matches(self):
"""Return a match from the cache.
When called as a generator, the calling function may process a
chunk of the document and then pull matches, if any, from the
cache using this function.
This function also clears the match cache.
Args:
None
Returns:
A list which contains zero or more matches. Each match is
a tuple of: ``(path,match_string,xml_node)``, where the
*path* is the calculated absolute path to the matching
node, *match_string* is the user-supplied match string
that triggered the match, and *xml_node* is the object
representing that node (an instance of a
:py:class:`XMLNodeBase` subclass).
"""
rv = self.matches
self.matches = []
return rv
| mit |
dmitry-r/incubator-airflow | tests/api/client/test_local_client.py | 20 | 5654 | # -*- coding: utf-8 -*-
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import datetime
import json
import unittest
from mock import patch
from airflow import AirflowException
from airflow.api.client.local_client import Client
from airflow import models
from airflow import settings
from airflow.utils.state import State
EXECDATE = datetime.datetime.now()
EXECDATE_NOFRACTIONS = EXECDATE.replace(microsecond=0)
EXECDATE_ISO = EXECDATE_NOFRACTIONS.isoformat()
real_datetime_class = datetime.datetime
def mock_datetime_now(target, dt):
class DatetimeSubclassMeta(type):
@classmethod
def __instancecheck__(mcs, obj):
return isinstance(obj, real_datetime_class)
class BaseMockedDatetime(real_datetime_class):
@classmethod
def now(cls, tz=None):
return target.replace(tzinfo=tz)
@classmethod
def utcnow(cls):
return target
# Python2 & Python3 compatible metaclass
MockedDatetime = DatetimeSubclassMeta('datetime', (BaseMockedDatetime,), {})
return patch.object(dt, 'datetime', MockedDatetime)
class TestLocalClient(unittest.TestCase):
@classmethod
def setUpClass(cls):
super(TestLocalClient, cls).setUpClass()
session = settings.Session()
session.query(models.Pool).delete()
session.commit()
session.close()
def setUp(self):
super(TestLocalClient, self).setUp()
self.client = Client(api_base_url=None, auth=None)
self.session = settings.Session()
def tearDown(self):
self.session.query(models.Pool).delete()
self.session.commit()
self.session.close()
super(TestLocalClient, self).tearDown()
@patch.object(models.DAG, 'create_dagrun')
def test_trigger_dag(self, mock):
client = self.client
# non existent
with self.assertRaises(AirflowException):
client.trigger_dag(dag_id="blablabla")
import airflow.api.common.experimental.trigger_dag
with mock_datetime_now(EXECDATE, airflow.api.common.experimental.trigger_dag.datetime):
# no execution date, execution date should be set automatically
client.trigger_dag(dag_id="test_start_date_scheduling")
mock.assert_called_once_with(run_id="manual__{0}".format(EXECDATE_ISO),
execution_date=EXECDATE_NOFRACTIONS,
state=State.RUNNING,
conf=None,
external_trigger=True)
mock.reset_mock()
# execution date with microseconds cutoff
client.trigger_dag(dag_id="test_start_date_scheduling", execution_date=EXECDATE)
mock.assert_called_once_with(run_id="manual__{0}".format(EXECDATE_ISO),
execution_date=EXECDATE_NOFRACTIONS,
state=State.RUNNING,
conf=None,
external_trigger=True)
mock.reset_mock()
# run id
run_id = "my_run_id"
client.trigger_dag(dag_id="test_start_date_scheduling", run_id=run_id)
mock.assert_called_once_with(run_id=run_id,
execution_date=EXECDATE_NOFRACTIONS,
state=State.RUNNING,
conf=None,
external_trigger=True)
mock.reset_mock()
# test conf
conf = '{"name": "John"}'
client.trigger_dag(dag_id="test_start_date_scheduling", conf=conf)
mock.assert_called_once_with(run_id="manual__{0}".format(EXECDATE_ISO),
execution_date=EXECDATE_NOFRACTIONS,
state=State.RUNNING,
conf=json.loads(conf),
external_trigger=True)
mock.reset_mock()
def test_get_pool(self):
self.client.create_pool(name='foo', slots=1, description='')
pool = self.client.get_pool(name='foo')
self.assertEqual(pool, ('foo', 1, ''))
def test_get_pools(self):
self.client.create_pool(name='foo1', slots=1, description='')
self.client.create_pool(name='foo2', slots=2, description='')
pools = sorted(self.client.get_pools(), key=lambda p: p[0])
self.assertEqual(pools, [('foo1', 1, ''), ('foo2', 2, '')])
def test_create_pool(self):
pool = self.client.create_pool(name='foo', slots=1, description='')
self.assertEqual(pool, ('foo', 1, ''))
self.assertEqual(self.session.query(models.Pool).count(), 1)
def test_delete_pool(self):
self.client.create_pool(name='foo', slots=1, description='')
self.assertEqual(self.session.query(models.Pool).count(), 1)
self.client.delete_pool(name='foo')
self.assertEqual(self.session.query(models.Pool).count(), 0)
| apache-2.0 |
ff94315/hiwifi-openwrt-HC5661-HC5761 | staging_dir/host/lib/python2.7/test/test_datetime.py | 72 | 134923 | """Test date/time type.
See http://www.zope.org/Members/fdrake/DateTimeWiki/TestCases
"""
from __future__ import division
import sys
import pickle
import cPickle
import unittest
from test import test_support
from datetime import MINYEAR, MAXYEAR
from datetime import timedelta
from datetime import tzinfo
from datetime import time
from datetime import date, datetime
pickle_choices = [(pickler, unpickler, proto)
for pickler in pickle, cPickle
for unpickler in pickle, cPickle
for proto in range(3)]
assert len(pickle_choices) == 2*2*3
# An arbitrary collection of objects of non-datetime types, for testing
# mixed-type comparisons.
OTHERSTUFF = (10, 10L, 34.5, "abc", {}, [], ())
#############################################################################
# module tests
class TestModule(unittest.TestCase):
def test_constants(self):
import datetime
self.assertEqual(datetime.MINYEAR, 1)
self.assertEqual(datetime.MAXYEAR, 9999)
#############################################################################
# tzinfo tests
class FixedOffset(tzinfo):
def __init__(self, offset, name, dstoffset=42):
if isinstance(offset, int):
offset = timedelta(minutes=offset)
if isinstance(dstoffset, int):
dstoffset = timedelta(minutes=dstoffset)
self.__offset = offset
self.__name = name
self.__dstoffset = dstoffset
def __repr__(self):
return self.__name.lower()
def utcoffset(self, dt):
return self.__offset
def tzname(self, dt):
return self.__name
def dst(self, dt):
return self.__dstoffset
class PicklableFixedOffset(FixedOffset):
def __init__(self, offset=None, name=None, dstoffset=None):
FixedOffset.__init__(self, offset, name, dstoffset)
class TestTZInfo(unittest.TestCase):
def test_non_abstractness(self):
# In order to allow subclasses to get pickled, the C implementation
# wasn't able to get away with having __init__ raise
# NotImplementedError.
useless = tzinfo()
dt = datetime.max
self.assertRaises(NotImplementedError, useless.tzname, dt)
self.assertRaises(NotImplementedError, useless.utcoffset, dt)
self.assertRaises(NotImplementedError, useless.dst, dt)
def test_subclass_must_override(self):
class NotEnough(tzinfo):
def __init__(self, offset, name):
self.__offset = offset
self.__name = name
self.assertTrue(issubclass(NotEnough, tzinfo))
ne = NotEnough(3, "NotByALongShot")
self.assertIsInstance(ne, tzinfo)
dt = datetime.now()
self.assertRaises(NotImplementedError, ne.tzname, dt)
self.assertRaises(NotImplementedError, ne.utcoffset, dt)
self.assertRaises(NotImplementedError, ne.dst, dt)
def test_normal(self):
fo = FixedOffset(3, "Three")
self.assertIsInstance(fo, tzinfo)
for dt in datetime.now(), None:
self.assertEqual(fo.utcoffset(dt), timedelta(minutes=3))
self.assertEqual(fo.tzname(dt), "Three")
self.assertEqual(fo.dst(dt), timedelta(minutes=42))
def test_pickling_base(self):
# There's no point to pickling tzinfo objects on their own (they
# carry no data), but they need to be picklable anyway else
# concrete subclasses can't be pickled.
orig = tzinfo.__new__(tzinfo)
self.assertTrue(type(orig) is tzinfo)
for pickler, unpickler, proto in pickle_choices:
green = pickler.dumps(orig, proto)
derived = unpickler.loads(green)
self.assertTrue(type(derived) is tzinfo)
def test_pickling_subclass(self):
# Make sure we can pickle/unpickle an instance of a subclass.
offset = timedelta(minutes=-300)
orig = PicklableFixedOffset(offset, 'cookie')
self.assertIsInstance(orig, tzinfo)
self.assertTrue(type(orig) is PicklableFixedOffset)
self.assertEqual(orig.utcoffset(None), offset)
self.assertEqual(orig.tzname(None), 'cookie')
for pickler, unpickler, proto in pickle_choices:
green = pickler.dumps(orig, proto)
derived = unpickler.loads(green)
self.assertIsInstance(derived, tzinfo)
self.assertTrue(type(derived) is PicklableFixedOffset)
self.assertEqual(derived.utcoffset(None), offset)
self.assertEqual(derived.tzname(None), 'cookie')
#############################################################################
# Base clase for testing a particular aspect of timedelta, time, date and
# datetime comparisons.
class HarmlessMixedComparison:
# Test that __eq__ and __ne__ don't complain for mixed-type comparisons.
# Subclasses must define 'theclass', and theclass(1, 1, 1) must be a
# legit constructor.
def test_harmless_mixed_comparison(self):
me = self.theclass(1, 1, 1)
self.assertFalse(me == ())
self.assertTrue(me != ())
self.assertFalse(() == me)
self.assertTrue(() != me)
self.assertIn(me, [1, 20L, [], me])
self.assertIn([], [me, 1, 20L, []])
def test_harmful_mixed_comparison(self):
me = self.theclass(1, 1, 1)
self.assertRaises(TypeError, lambda: me < ())
self.assertRaises(TypeError, lambda: me <= ())
self.assertRaises(TypeError, lambda: me > ())
self.assertRaises(TypeError, lambda: me >= ())
self.assertRaises(TypeError, lambda: () < me)
self.assertRaises(TypeError, lambda: () <= me)
self.assertRaises(TypeError, lambda: () > me)
self.assertRaises(TypeError, lambda: () >= me)
self.assertRaises(TypeError, cmp, (), me)
self.assertRaises(TypeError, cmp, me, ())
#############################################################################
# timedelta tests
class TestTimeDelta(HarmlessMixedComparison, unittest.TestCase):
theclass = timedelta
def test_constructor(self):
eq = self.assertEqual
td = timedelta
# Check keyword args to constructor
eq(td(), td(weeks=0, days=0, hours=0, minutes=0, seconds=0,
milliseconds=0, microseconds=0))
eq(td(1), td(days=1))
eq(td(0, 1), td(seconds=1))
eq(td(0, 0, 1), td(microseconds=1))
eq(td(weeks=1), td(days=7))
eq(td(days=1), td(hours=24))
eq(td(hours=1), td(minutes=60))
eq(td(minutes=1), td(seconds=60))
eq(td(seconds=1), td(milliseconds=1000))
eq(td(milliseconds=1), td(microseconds=1000))
# Check float args to constructor
eq(td(weeks=1.0/7), td(days=1))
eq(td(days=1.0/24), td(hours=1))
eq(td(hours=1.0/60), td(minutes=1))
eq(td(minutes=1.0/60), td(seconds=1))
eq(td(seconds=0.001), td(milliseconds=1))
eq(td(milliseconds=0.001), td(microseconds=1))
def test_computations(self):
eq = self.assertEqual
td = timedelta
a = td(7) # One week
b = td(0, 60) # One minute
c = td(0, 0, 1000) # One millisecond
eq(a+b+c, td(7, 60, 1000))
eq(a-b, td(6, 24*3600 - 60))
eq(-a, td(-7))
eq(+a, td(7))
eq(-b, td(-1, 24*3600 - 60))
eq(-c, td(-1, 24*3600 - 1, 999000))
eq(abs(a), a)
eq(abs(-a), a)
eq(td(6, 24*3600), a)
eq(td(0, 0, 60*1000000), b)
eq(a*10, td(70))
eq(a*10, 10*a)
eq(a*10L, 10*a)
eq(b*10, td(0, 600))
eq(10*b, td(0, 600))
eq(b*10L, td(0, 600))
eq(c*10, td(0, 0, 10000))
eq(10*c, td(0, 0, 10000))
eq(c*10L, td(0, 0, 10000))
eq(a*-1, -a)
eq(b*-2, -b-b)
eq(c*-2, -c+-c)
eq(b*(60*24), (b*60)*24)
eq(b*(60*24), (60*b)*24)
eq(c*1000, td(0, 1))
eq(1000*c, td(0, 1))
eq(a//7, td(1))
eq(b//10, td(0, 6))
eq(c//1000, td(0, 0, 1))
eq(a//10, td(0, 7*24*360))
eq(a//3600000, td(0, 0, 7*24*1000))
# Issue #11576
eq(td(999999999, 86399, 999999) - td(999999999, 86399, 999998),
td(0, 0, 1))
eq(td(999999999, 1, 1) - td(999999999, 1, 0),
td(0, 0, 1))
def test_disallowed_computations(self):
a = timedelta(42)
# Add/sub ints, longs, floats should be illegal
for i in 1, 1L, 1.0:
self.assertRaises(TypeError, lambda: a+i)
self.assertRaises(TypeError, lambda: a-i)
self.assertRaises(TypeError, lambda: i+a)
self.assertRaises(TypeError, lambda: i-a)
# Mul/div by float isn't supported.
x = 2.3
self.assertRaises(TypeError, lambda: a*x)
self.assertRaises(TypeError, lambda: x*a)
self.assertRaises(TypeError, lambda: a/x)
self.assertRaises(TypeError, lambda: x/a)
self.assertRaises(TypeError, lambda: a // x)
self.assertRaises(TypeError, lambda: x // a)
# Division of int by timedelta doesn't make sense.
# Division by zero doesn't make sense.
for zero in 0, 0L:
self.assertRaises(TypeError, lambda: zero // a)
self.assertRaises(ZeroDivisionError, lambda: a // zero)
def test_basic_attributes(self):
days, seconds, us = 1, 7, 31
td = timedelta(days, seconds, us)
self.assertEqual(td.days, days)
self.assertEqual(td.seconds, seconds)
self.assertEqual(td.microseconds, us)
def test_total_seconds(self):
td = timedelta(days=365)
self.assertEqual(td.total_seconds(), 31536000.0)
for total_seconds in [123456.789012, -123456.789012, 0.123456, 0, 1e6]:
td = timedelta(seconds=total_seconds)
self.assertEqual(td.total_seconds(), total_seconds)
# Issue8644: Test that td.total_seconds() has the same
# accuracy as td / timedelta(seconds=1).
for ms in [-1, -2, -123]:
td = timedelta(microseconds=ms)
self.assertEqual(td.total_seconds(),
((24*3600*td.days + td.seconds)*10**6
+ td.microseconds)/10**6)
def test_carries(self):
t1 = timedelta(days=100,
weeks=-7,
hours=-24*(100-49),
minutes=-3,
seconds=12,
microseconds=(3*60 - 12) * 1e6 + 1)
t2 = timedelta(microseconds=1)
self.assertEqual(t1, t2)
def test_hash_equality(self):
t1 = timedelta(days=100,
weeks=-7,
hours=-24*(100-49),
minutes=-3,
seconds=12,
microseconds=(3*60 - 12) * 1000000)
t2 = timedelta()
self.assertEqual(hash(t1), hash(t2))
t1 += timedelta(weeks=7)
t2 += timedelta(days=7*7)
self.assertEqual(t1, t2)
self.assertEqual(hash(t1), hash(t2))
d = {t1: 1}
d[t2] = 2
self.assertEqual(len(d), 1)
self.assertEqual(d[t1], 2)
def test_pickling(self):
args = 12, 34, 56
orig = timedelta(*args)
for pickler, unpickler, proto in pickle_choices:
green = pickler.dumps(orig, proto)
derived = unpickler.loads(green)
self.assertEqual(orig, derived)
def test_compare(self):
t1 = timedelta(2, 3, 4)
t2 = timedelta(2, 3, 4)
self.assertTrue(t1 == t2)
self.assertTrue(t1 <= t2)
self.assertTrue(t1 >= t2)
self.assertTrue(not t1 != t2)
self.assertTrue(not t1 < t2)
self.assertTrue(not t1 > t2)
self.assertEqual(cmp(t1, t2), 0)
self.assertEqual(cmp(t2, t1), 0)
for args in (3, 3, 3), (2, 4, 4), (2, 3, 5):
t2 = timedelta(*args) # this is larger than t1
self.assertTrue(t1 < t2)
self.assertTrue(t2 > t1)
self.assertTrue(t1 <= t2)
self.assertTrue(t2 >= t1)
self.assertTrue(t1 != t2)
self.assertTrue(t2 != t1)
self.assertTrue(not t1 == t2)
self.assertTrue(not t2 == t1)
self.assertTrue(not t1 > t2)
self.assertTrue(not t2 < t1)
self.assertTrue(not t1 >= t2)
self.assertTrue(not t2 <= t1)
self.assertEqual(cmp(t1, t2), -1)
self.assertEqual(cmp(t2, t1), 1)
for badarg in OTHERSTUFF:
self.assertEqual(t1 == badarg, False)
self.assertEqual(t1 != badarg, True)
self.assertEqual(badarg == t1, False)
self.assertEqual(badarg != t1, True)
self.assertRaises(TypeError, lambda: t1 <= badarg)
self.assertRaises(TypeError, lambda: t1 < badarg)
self.assertRaises(TypeError, lambda: t1 > badarg)
self.assertRaises(TypeError, lambda: t1 >= badarg)
self.assertRaises(TypeError, lambda: badarg <= t1)
self.assertRaises(TypeError, lambda: badarg < t1)
self.assertRaises(TypeError, lambda: badarg > t1)
self.assertRaises(TypeError, lambda: badarg >= t1)
def test_str(self):
td = timedelta
eq = self.assertEqual
eq(str(td(1)), "1 day, 0:00:00")
eq(str(td(-1)), "-1 day, 0:00:00")
eq(str(td(2)), "2 days, 0:00:00")
eq(str(td(-2)), "-2 days, 0:00:00")
eq(str(td(hours=12, minutes=58, seconds=59)), "12:58:59")
eq(str(td(hours=2, minutes=3, seconds=4)), "2:03:04")
eq(str(td(weeks=-30, hours=23, minutes=12, seconds=34)),
"-210 days, 23:12:34")
eq(str(td(milliseconds=1)), "0:00:00.001000")
eq(str(td(microseconds=3)), "0:00:00.000003")
eq(str(td(days=999999999, hours=23, minutes=59, seconds=59,
microseconds=999999)),
"999999999 days, 23:59:59.999999")
def test_roundtrip(self):
for td in (timedelta(days=999999999, hours=23, minutes=59,
seconds=59, microseconds=999999),
timedelta(days=-999999999),
timedelta(days=1, seconds=2, microseconds=3)):
# Verify td -> string -> td identity.
s = repr(td)
self.assertTrue(s.startswith('datetime.'))
s = s[9:]
td2 = eval(s)
self.assertEqual(td, td2)
# Verify identity via reconstructing from pieces.
td2 = timedelta(td.days, td.seconds, td.microseconds)
self.assertEqual(td, td2)
def test_resolution_info(self):
self.assertIsInstance(timedelta.min, timedelta)
self.assertIsInstance(timedelta.max, timedelta)
self.assertIsInstance(timedelta.resolution, timedelta)
self.assertTrue(timedelta.max > timedelta.min)
self.assertEqual(timedelta.min, timedelta(-999999999))
self.assertEqual(timedelta.max, timedelta(999999999, 24*3600-1, 1e6-1))
self.assertEqual(timedelta.resolution, timedelta(0, 0, 1))
def test_overflow(self):
tiny = timedelta.resolution
td = timedelta.min + tiny
td -= tiny # no problem
self.assertRaises(OverflowError, td.__sub__, tiny)
self.assertRaises(OverflowError, td.__add__, -tiny)
td = timedelta.max - tiny
td += tiny # no problem
self.assertRaises(OverflowError, td.__add__, tiny)
self.assertRaises(OverflowError, td.__sub__, -tiny)
self.assertRaises(OverflowError, lambda: -timedelta.max)
def test_microsecond_rounding(self):
td = timedelta
eq = self.assertEqual
# Single-field rounding.
eq(td(milliseconds=0.4/1000), td(0)) # rounds to 0
eq(td(milliseconds=-0.4/1000), td(0)) # rounds to 0
eq(td(milliseconds=0.6/1000), td(microseconds=1))
eq(td(milliseconds=-0.6/1000), td(microseconds=-1))
# Rounding due to contributions from more than one field.
us_per_hour = 3600e6
us_per_day = us_per_hour * 24
eq(td(days=.4/us_per_day), td(0))
eq(td(hours=.2/us_per_hour), td(0))
eq(td(days=.4/us_per_day, hours=.2/us_per_hour), td(microseconds=1))
eq(td(days=-.4/us_per_day), td(0))
eq(td(hours=-.2/us_per_hour), td(0))
eq(td(days=-.4/us_per_day, hours=-.2/us_per_hour), td(microseconds=-1))
def test_massive_normalization(self):
td = timedelta(microseconds=-1)
self.assertEqual((td.days, td.seconds, td.microseconds),
(-1, 24*3600-1, 999999))
def test_bool(self):
self.assertTrue(timedelta(1))
self.assertTrue(timedelta(0, 1))
self.assertTrue(timedelta(0, 0, 1))
self.assertTrue(timedelta(microseconds=1))
self.assertTrue(not timedelta(0))
def test_subclass_timedelta(self):
class T(timedelta):
@staticmethod
def from_td(td):
return T(td.days, td.seconds, td.microseconds)
def as_hours(self):
sum = (self.days * 24 +
self.seconds / 3600.0 +
self.microseconds / 3600e6)
return round(sum)
t1 = T(days=1)
self.assertTrue(type(t1) is T)
self.assertEqual(t1.as_hours(), 24)
t2 = T(days=-1, seconds=-3600)
self.assertTrue(type(t2) is T)
self.assertEqual(t2.as_hours(), -25)
t3 = t1 + t2
self.assertTrue(type(t3) is timedelta)
t4 = T.from_td(t3)
self.assertTrue(type(t4) is T)
self.assertEqual(t3.days, t4.days)
self.assertEqual(t3.seconds, t4.seconds)
self.assertEqual(t3.microseconds, t4.microseconds)
self.assertEqual(str(t3), str(t4))
self.assertEqual(t4.as_hours(), -1)
#############################################################################
# date tests
class TestDateOnly(unittest.TestCase):
# Tests here won't pass if also run on datetime objects, so don't
# subclass this to test datetimes too.
def test_delta_non_days_ignored(self):
dt = date(2000, 1, 2)
delta = timedelta(days=1, hours=2, minutes=3, seconds=4,
microseconds=5)
days = timedelta(delta.days)
self.assertEqual(days, timedelta(1))
dt2 = dt + delta
self.assertEqual(dt2, dt + days)
dt2 = delta + dt
self.assertEqual(dt2, dt + days)
dt2 = dt - delta
self.assertEqual(dt2, dt - days)
delta = -delta
days = timedelta(delta.days)
self.assertEqual(days, timedelta(-2))
dt2 = dt + delta
self.assertEqual(dt2, dt + days)
dt2 = delta + dt
self.assertEqual(dt2, dt + days)
dt2 = dt - delta
self.assertEqual(dt2, dt - days)
class SubclassDate(date):
sub_var = 1
class TestDate(HarmlessMixedComparison, unittest.TestCase):
# Tests here should pass for both dates and datetimes, except for a
# few tests that TestDateTime overrides.
theclass = date
def test_basic_attributes(self):
dt = self.theclass(2002, 3, 1)
self.assertEqual(dt.year, 2002)
self.assertEqual(dt.month, 3)
self.assertEqual(dt.day, 1)
def test_roundtrip(self):
for dt in (self.theclass(1, 2, 3),
self.theclass.today()):
# Verify dt -> string -> date identity.
s = repr(dt)
self.assertTrue(s.startswith('datetime.'))
s = s[9:]
dt2 = eval(s)
self.assertEqual(dt, dt2)
# Verify identity via reconstructing from pieces.
dt2 = self.theclass(dt.year, dt.month, dt.day)
self.assertEqual(dt, dt2)
def test_ordinal_conversions(self):
# Check some fixed values.
for y, m, d, n in [(1, 1, 1, 1), # calendar origin
(1, 12, 31, 365),
(2, 1, 1, 366),
# first example from "Calendrical Calculations"
(1945, 11, 12, 710347)]:
d = self.theclass(y, m, d)
self.assertEqual(n, d.toordinal())
fromord = self.theclass.fromordinal(n)
self.assertEqual(d, fromord)
if hasattr(fromord, "hour"):
# if we're checking something fancier than a date, verify
# the extra fields have been zeroed out
self.assertEqual(fromord.hour, 0)
self.assertEqual(fromord.minute, 0)
self.assertEqual(fromord.second, 0)
self.assertEqual(fromord.microsecond, 0)
# Check first and last days of year spottily across the whole
# range of years supported.
for year in xrange(MINYEAR, MAXYEAR+1, 7):
# Verify (year, 1, 1) -> ordinal -> y, m, d is identity.
d = self.theclass(year, 1, 1)
n = d.toordinal()
d2 = self.theclass.fromordinal(n)
self.assertEqual(d, d2)
# Verify that moving back a day gets to the end of year-1.
if year > 1:
d = self.theclass.fromordinal(n-1)
d2 = self.theclass(year-1, 12, 31)
self.assertEqual(d, d2)
self.assertEqual(d2.toordinal(), n-1)
# Test every day in a leap-year and a non-leap year.
dim = [31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31]
for year, isleap in (2000, True), (2002, False):
n = self.theclass(year, 1, 1).toordinal()
for month, maxday in zip(range(1, 13), dim):
if month == 2 and isleap:
maxday += 1
for day in range(1, maxday+1):
d = self.theclass(year, month, day)
self.assertEqual(d.toordinal(), n)
self.assertEqual(d, self.theclass.fromordinal(n))
n += 1
def test_extreme_ordinals(self):
a = self.theclass.min
a = self.theclass(a.year, a.month, a.day) # get rid of time parts
aord = a.toordinal()
b = a.fromordinal(aord)
self.assertEqual(a, b)
self.assertRaises(ValueError, lambda: a.fromordinal(aord - 1))
b = a + timedelta(days=1)
self.assertEqual(b.toordinal(), aord + 1)
self.assertEqual(b, self.theclass.fromordinal(aord + 1))
a = self.theclass.max
a = self.theclass(a.year, a.month, a.day) # get rid of time parts
aord = a.toordinal()
b = a.fromordinal(aord)
self.assertEqual(a, b)
self.assertRaises(ValueError, lambda: a.fromordinal(aord + 1))
b = a - timedelta(days=1)
self.assertEqual(b.toordinal(), aord - 1)
self.assertEqual(b, self.theclass.fromordinal(aord - 1))
def test_bad_constructor_arguments(self):
# bad years
self.theclass(MINYEAR, 1, 1) # no exception
self.theclass(MAXYEAR, 1, 1) # no exception
self.assertRaises(ValueError, self.theclass, MINYEAR-1, 1, 1)
self.assertRaises(ValueError, self.theclass, MAXYEAR+1, 1, 1)
# bad months
self.theclass(2000, 1, 1) # no exception
self.theclass(2000, 12, 1) # no exception
self.assertRaises(ValueError, self.theclass, 2000, 0, 1)
self.assertRaises(ValueError, self.theclass, 2000, 13, 1)
# bad days
self.theclass(2000, 2, 29) # no exception
self.theclass(2004, 2, 29) # no exception
self.theclass(2400, 2, 29) # no exception
self.assertRaises(ValueError, self.theclass, 2000, 2, 30)
self.assertRaises(ValueError, self.theclass, 2001, 2, 29)
self.assertRaises(ValueError, self.theclass, 2100, 2, 29)
self.assertRaises(ValueError, self.theclass, 1900, 2, 29)
self.assertRaises(ValueError, self.theclass, 2000, 1, 0)
self.assertRaises(ValueError, self.theclass, 2000, 1, 32)
def test_hash_equality(self):
d = self.theclass(2000, 12, 31)
# same thing
e = self.theclass(2000, 12, 31)
self.assertEqual(d, e)
self.assertEqual(hash(d), hash(e))
dic = {d: 1}
dic[e] = 2
self.assertEqual(len(dic), 1)
self.assertEqual(dic[d], 2)
self.assertEqual(dic[e], 2)
d = self.theclass(2001, 1, 1)
# same thing
e = self.theclass(2001, 1, 1)
self.assertEqual(d, e)
self.assertEqual(hash(d), hash(e))
dic = {d: 1}
dic[e] = 2
self.assertEqual(len(dic), 1)
self.assertEqual(dic[d], 2)
self.assertEqual(dic[e], 2)
def test_computations(self):
a = self.theclass(2002, 1, 31)
b = self.theclass(1956, 1, 31)
diff = a-b
self.assertEqual(diff.days, 46*365 + len(range(1956, 2002, 4)))
self.assertEqual(diff.seconds, 0)
self.assertEqual(diff.microseconds, 0)
day = timedelta(1)
week = timedelta(7)
a = self.theclass(2002, 3, 2)
self.assertEqual(a + day, self.theclass(2002, 3, 3))
self.assertEqual(day + a, self.theclass(2002, 3, 3))
self.assertEqual(a - day, self.theclass(2002, 3, 1))
self.assertEqual(-day + a, self.theclass(2002, 3, 1))
self.assertEqual(a + week, self.theclass(2002, 3, 9))
self.assertEqual(a - week, self.theclass(2002, 2, 23))
self.assertEqual(a + 52*week, self.theclass(2003, 3, 1))
self.assertEqual(a - 52*week, self.theclass(2001, 3, 3))
self.assertEqual((a + week) - a, week)
self.assertEqual((a + day) - a, day)
self.assertEqual((a - week) - a, -week)
self.assertEqual((a - day) - a, -day)
self.assertEqual(a - (a + week), -week)
self.assertEqual(a - (a + day), -day)
self.assertEqual(a - (a - week), week)
self.assertEqual(a - (a - day), day)
# Add/sub ints, longs, floats should be illegal
for i in 1, 1L, 1.0:
self.assertRaises(TypeError, lambda: a+i)
self.assertRaises(TypeError, lambda: a-i)
self.assertRaises(TypeError, lambda: i+a)
self.assertRaises(TypeError, lambda: i-a)
# delta - date is senseless.
self.assertRaises(TypeError, lambda: day - a)
# mixing date and (delta or date) via * or // is senseless
self.assertRaises(TypeError, lambda: day * a)
self.assertRaises(TypeError, lambda: a * day)
self.assertRaises(TypeError, lambda: day // a)
self.assertRaises(TypeError, lambda: a // day)
self.assertRaises(TypeError, lambda: a * a)
self.assertRaises(TypeError, lambda: a // a)
# date + date is senseless
self.assertRaises(TypeError, lambda: a + a)
def test_overflow(self):
tiny = self.theclass.resolution
for delta in [tiny, timedelta(1), timedelta(2)]:
dt = self.theclass.min + delta
dt -= delta # no problem
self.assertRaises(OverflowError, dt.__sub__, delta)
self.assertRaises(OverflowError, dt.__add__, -delta)
dt = self.theclass.max - delta
dt += delta # no problem
self.assertRaises(OverflowError, dt.__add__, delta)
self.assertRaises(OverflowError, dt.__sub__, -delta)
def test_fromtimestamp(self):
import time
# Try an arbitrary fixed value.
year, month, day = 1999, 9, 19
ts = time.mktime((year, month, day, 0, 0, 0, 0, 0, -1))
d = self.theclass.fromtimestamp(ts)
self.assertEqual(d.year, year)
self.assertEqual(d.month, month)
self.assertEqual(d.day, day)
def test_insane_fromtimestamp(self):
# It's possible that some platform maps time_t to double,
# and that this test will fail there. This test should
# exempt such platforms (provided they return reasonable
# results!).
for insane in -1e200, 1e200:
self.assertRaises(ValueError, self.theclass.fromtimestamp,
insane)
def test_today(self):
import time
# We claim that today() is like fromtimestamp(time.time()), so
# prove it.
for dummy in range(3):
today = self.theclass.today()
ts = time.time()
todayagain = self.theclass.fromtimestamp(ts)
if today == todayagain:
break
# There are several legit reasons that could fail:
# 1. It recently became midnight, between the today() and the
# time() calls.
# 2. The platform time() has such fine resolution that we'll
# never get the same value twice.
# 3. The platform time() has poor resolution, and we just
# happened to call today() right before a resolution quantum
# boundary.
# 4. The system clock got fiddled between calls.
# In any case, wait a little while and try again.
time.sleep(0.1)
# It worked or it didn't. If it didn't, assume it's reason #2, and
# let the test pass if they're within half a second of each other.
self.assertTrue(today == todayagain or
abs(todayagain - today) < timedelta(seconds=0.5))
def test_weekday(self):
for i in range(7):
# March 4, 2002 is a Monday
self.assertEqual(self.theclass(2002, 3, 4+i).weekday(), i)
self.assertEqual(self.theclass(2002, 3, 4+i).isoweekday(), i+1)
# January 2, 1956 is a Monday
self.assertEqual(self.theclass(1956, 1, 2+i).weekday(), i)
self.assertEqual(self.theclass(1956, 1, 2+i).isoweekday(), i+1)
def test_isocalendar(self):
# Check examples from
# http://www.phys.uu.nl/~vgent/calendar/isocalendar.htm
for i in range(7):
d = self.theclass(2003, 12, 22+i)
self.assertEqual(d.isocalendar(), (2003, 52, i+1))
d = self.theclass(2003, 12, 29) + timedelta(i)
self.assertEqual(d.isocalendar(), (2004, 1, i+1))
d = self.theclass(2004, 1, 5+i)
self.assertEqual(d.isocalendar(), (2004, 2, i+1))
d = self.theclass(2009, 12, 21+i)
self.assertEqual(d.isocalendar(), (2009, 52, i+1))
d = self.theclass(2009, 12, 28) + timedelta(i)
self.assertEqual(d.isocalendar(), (2009, 53, i+1))
d = self.theclass(2010, 1, 4+i)
self.assertEqual(d.isocalendar(), (2010, 1, i+1))
def test_iso_long_years(self):
# Calculate long ISO years and compare to table from
# http://www.phys.uu.nl/~vgent/calendar/isocalendar.htm
ISO_LONG_YEARS_TABLE = """
4 32 60 88
9 37 65 93
15 43 71 99
20 48 76
26 54 82
105 133 161 189
111 139 167 195
116 144 172
122 150 178
128 156 184
201 229 257 285
207 235 263 291
212 240 268 296
218 246 274
224 252 280
303 331 359 387
308 336 364 392
314 342 370 398
320 348 376
325 353 381
"""
iso_long_years = map(int, ISO_LONG_YEARS_TABLE.split())
iso_long_years.sort()
L = []
for i in range(400):
d = self.theclass(2000+i, 12, 31)
d1 = self.theclass(1600+i, 12, 31)
self.assertEqual(d.isocalendar()[1:], d1.isocalendar()[1:])
if d.isocalendar()[1] == 53:
L.append(i)
self.assertEqual(L, iso_long_years)
def test_isoformat(self):
t = self.theclass(2, 3, 2)
self.assertEqual(t.isoformat(), "0002-03-02")
def test_ctime(self):
t = self.theclass(2002, 3, 2)
self.assertEqual(t.ctime(), "Sat Mar 2 00:00:00 2002")
def test_strftime(self):
t = self.theclass(2005, 3, 2)
self.assertEqual(t.strftime("m:%m d:%d y:%y"), "m:03 d:02 y:05")
self.assertEqual(t.strftime(""), "") # SF bug #761337
self.assertEqual(t.strftime('x'*1000), 'x'*1000) # SF bug #1556784
self.assertRaises(TypeError, t.strftime) # needs an arg
self.assertRaises(TypeError, t.strftime, "one", "two") # too many args
self.assertRaises(TypeError, t.strftime, 42) # arg wrong type
# test that unicode input is allowed (issue 2782)
self.assertEqual(t.strftime(u"%m"), "03")
# A naive object replaces %z and %Z w/ empty strings.
self.assertEqual(t.strftime("'%z' '%Z'"), "'' ''")
#make sure that invalid format specifiers are handled correctly
#self.assertRaises(ValueError, t.strftime, "%e")
#self.assertRaises(ValueError, t.strftime, "%")
#self.assertRaises(ValueError, t.strftime, "%#")
#oh well, some systems just ignore those invalid ones.
#at least, excercise them to make sure that no crashes
#are generated
for f in ["%e", "%", "%#"]:
try:
t.strftime(f)
except ValueError:
pass
#check that this standard extension works
t.strftime("%f")
def test_format(self):
dt = self.theclass(2007, 9, 10)
self.assertEqual(dt.__format__(''), str(dt))
# check that a derived class's __str__() gets called
class A(self.theclass):
def __str__(self):
return 'A'
a = A(2007, 9, 10)
self.assertEqual(a.__format__(''), 'A')
# check that a derived class's strftime gets called
class B(self.theclass):
def strftime(self, format_spec):
return 'B'
b = B(2007, 9, 10)
self.assertEqual(b.__format__(''), str(dt))
for fmt in ["m:%m d:%d y:%y",
"m:%m d:%d y:%y H:%H M:%M S:%S",
"%z %Z",
]:
self.assertEqual(dt.__format__(fmt), dt.strftime(fmt))
self.assertEqual(a.__format__(fmt), dt.strftime(fmt))
self.assertEqual(b.__format__(fmt), 'B')
def test_resolution_info(self):
self.assertIsInstance(self.theclass.min, self.theclass)
self.assertIsInstance(self.theclass.max, self.theclass)
self.assertIsInstance(self.theclass.resolution, timedelta)
self.assertTrue(self.theclass.max > self.theclass.min)
def test_extreme_timedelta(self):
big = self.theclass.max - self.theclass.min
# 3652058 days, 23 hours, 59 minutes, 59 seconds, 999999 microseconds
n = (big.days*24*3600 + big.seconds)*1000000 + big.microseconds
# n == 315537897599999999 ~= 2**58.13
justasbig = timedelta(0, 0, n)
self.assertEqual(big, justasbig)
self.assertEqual(self.theclass.min + big, self.theclass.max)
self.assertEqual(self.theclass.max - big, self.theclass.min)
def test_timetuple(self):
for i in range(7):
# January 2, 1956 is a Monday (0)
d = self.theclass(1956, 1, 2+i)
t = d.timetuple()
self.assertEqual(t, (1956, 1, 2+i, 0, 0, 0, i, 2+i, -1))
# February 1, 1956 is a Wednesday (2)
d = self.theclass(1956, 2, 1+i)
t = d.timetuple()
self.assertEqual(t, (1956, 2, 1+i, 0, 0, 0, (2+i)%7, 32+i, -1))
# March 1, 1956 is a Thursday (3), and is the 31+29+1 = 61st day
# of the year.
d = self.theclass(1956, 3, 1+i)
t = d.timetuple()
self.assertEqual(t, (1956, 3, 1+i, 0, 0, 0, (3+i)%7, 61+i, -1))
self.assertEqual(t.tm_year, 1956)
self.assertEqual(t.tm_mon, 3)
self.assertEqual(t.tm_mday, 1+i)
self.assertEqual(t.tm_hour, 0)
self.assertEqual(t.tm_min, 0)
self.assertEqual(t.tm_sec, 0)
self.assertEqual(t.tm_wday, (3+i)%7)
self.assertEqual(t.tm_yday, 61+i)
self.assertEqual(t.tm_isdst, -1)
def test_pickling(self):
args = 6, 7, 23
orig = self.theclass(*args)
for pickler, unpickler, proto in pickle_choices:
green = pickler.dumps(orig, proto)
derived = unpickler.loads(green)
self.assertEqual(orig, derived)
def test_compare(self):
t1 = self.theclass(2, 3, 4)
t2 = self.theclass(2, 3, 4)
self.assertTrue(t1 == t2)
self.assertTrue(t1 <= t2)
self.assertTrue(t1 >= t2)
self.assertTrue(not t1 != t2)
self.assertTrue(not t1 < t2)
self.assertTrue(not t1 > t2)
self.assertEqual(cmp(t1, t2), 0)
self.assertEqual(cmp(t2, t1), 0)
for args in (3, 3, 3), (2, 4, 4), (2, 3, 5):
t2 = self.theclass(*args) # this is larger than t1
self.assertTrue(t1 < t2)
self.assertTrue(t2 > t1)
self.assertTrue(t1 <= t2)
self.assertTrue(t2 >= t1)
self.assertTrue(t1 != t2)
self.assertTrue(t2 != t1)
self.assertTrue(not t1 == t2)
self.assertTrue(not t2 == t1)
self.assertTrue(not t1 > t2)
self.assertTrue(not t2 < t1)
self.assertTrue(not t1 >= t2)
self.assertTrue(not t2 <= t1)
self.assertEqual(cmp(t1, t2), -1)
self.assertEqual(cmp(t2, t1), 1)
for badarg in OTHERSTUFF:
self.assertEqual(t1 == badarg, False)
self.assertEqual(t1 != badarg, True)
self.assertEqual(badarg == t1, False)
self.assertEqual(badarg != t1, True)
self.assertRaises(TypeError, lambda: t1 < badarg)
self.assertRaises(TypeError, lambda: t1 > badarg)
self.assertRaises(TypeError, lambda: t1 >= badarg)
self.assertRaises(TypeError, lambda: badarg <= t1)
self.assertRaises(TypeError, lambda: badarg < t1)
self.assertRaises(TypeError, lambda: badarg > t1)
self.assertRaises(TypeError, lambda: badarg >= t1)
def test_mixed_compare(self):
our = self.theclass(2000, 4, 5)
self.assertRaises(TypeError, cmp, our, 1)
self.assertRaises(TypeError, cmp, 1, our)
class AnotherDateTimeClass(object):
def __cmp__(self, other):
# Return "equal" so calling this can't be confused with
# compare-by-address (which never says "equal" for distinct
# objects).
return 0
__hash__ = None # Silence Py3k warning
# This still errors, because date and datetime comparison raise
# TypeError instead of NotImplemented when they don't know what to
# do, in order to stop comparison from falling back to the default
# compare-by-address.
their = AnotherDateTimeClass()
self.assertRaises(TypeError, cmp, our, their)
# Oops: The next stab raises TypeError in the C implementation,
# but not in the Python implementation of datetime. The difference
# is due to that the Python implementation defines __cmp__ but
# the C implementation defines tp_richcompare. This is more pain
# to fix than it's worth, so commenting out the test.
# self.assertEqual(cmp(their, our), 0)
# But date and datetime comparison return NotImplemented instead if the
# other object has a timetuple attr. This gives the other object a
# chance to do the comparison.
class Comparable(AnotherDateTimeClass):
def timetuple(self):
return ()
their = Comparable()
self.assertEqual(cmp(our, their), 0)
self.assertEqual(cmp(their, our), 0)
self.assertTrue(our == their)
self.assertTrue(their == our)
def test_bool(self):
# All dates are considered true.
self.assertTrue(self.theclass.min)
self.assertTrue(self.theclass.max)
def test_strftime_out_of_range(self):
# For nasty technical reasons, we can't handle years before 1900.
cls = self.theclass
self.assertEqual(cls(1900, 1, 1).strftime("%Y"), "1900")
for y in 1, 49, 51, 99, 100, 1000, 1899:
self.assertRaises(ValueError, cls(y, 1, 1).strftime, "%Y")
def test_replace(self):
cls = self.theclass
args = [1, 2, 3]
base = cls(*args)
self.assertEqual(base, base.replace())
i = 0
for name, newval in (("year", 2),
("month", 3),
("day", 4)):
newargs = args[:]
newargs[i] = newval
expected = cls(*newargs)
got = base.replace(**{name: newval})
self.assertEqual(expected, got)
i += 1
# Out of bounds.
base = cls(2000, 2, 29)
self.assertRaises(ValueError, base.replace, year=2001)
def test_subclass_date(self):
class C(self.theclass):
theAnswer = 42
def __new__(cls, *args, **kws):
temp = kws.copy()
extra = temp.pop('extra')
result = self.theclass.__new__(cls, *args, **temp)
result.extra = extra
return result
def newmeth(self, start):
return start + self.year + self.month
args = 2003, 4, 14
dt1 = self.theclass(*args)
dt2 = C(*args, **{'extra': 7})
self.assertEqual(dt2.__class__, C)
self.assertEqual(dt2.theAnswer, 42)
self.assertEqual(dt2.extra, 7)
self.assertEqual(dt1.toordinal(), dt2.toordinal())
self.assertEqual(dt2.newmeth(-7), dt1.year + dt1.month - 7)
def test_pickling_subclass_date(self):
args = 6, 7, 23
orig = SubclassDate(*args)
for pickler, unpickler, proto in pickle_choices:
green = pickler.dumps(orig, proto)
derived = unpickler.loads(green)
self.assertEqual(orig, derived)
def test_backdoor_resistance(self):
# For fast unpickling, the constructor accepts a pickle string.
# This is a low-overhead backdoor. A user can (by intent or
# mistake) pass a string directly, which (if it's the right length)
# will get treated like a pickle, and bypass the normal sanity
# checks in the constructor. This can create insane objects.
# The constructor doesn't want to burn the time to validate all
# fields, but does check the month field. This stops, e.g.,
# datetime.datetime('1995-03-25') from yielding an insane object.
base = '1995-03-25'
if not issubclass(self.theclass, datetime):
base = base[:4]
for month_byte in '9', chr(0), chr(13), '\xff':
self.assertRaises(TypeError, self.theclass,
base[:2] + month_byte + base[3:])
for ord_byte in range(1, 13):
# This shouldn't blow up because of the month byte alone. If
# the implementation changes to do more-careful checking, it may
# blow up because other fields are insane.
self.theclass(base[:2] + chr(ord_byte) + base[3:])
#############################################################################
# datetime tests
class SubclassDatetime(datetime):
sub_var = 1
class TestDateTime(TestDate):
theclass = datetime
def test_basic_attributes(self):
dt = self.theclass(2002, 3, 1, 12, 0)
self.assertEqual(dt.year, 2002)
self.assertEqual(dt.month, 3)
self.assertEqual(dt.day, 1)
self.assertEqual(dt.hour, 12)
self.assertEqual(dt.minute, 0)
self.assertEqual(dt.second, 0)
self.assertEqual(dt.microsecond, 0)
def test_basic_attributes_nonzero(self):
# Make sure all attributes are non-zero so bugs in
# bit-shifting access show up.
dt = self.theclass(2002, 3, 1, 12, 59, 59, 8000)
self.assertEqual(dt.year, 2002)
self.assertEqual(dt.month, 3)
self.assertEqual(dt.day, 1)
self.assertEqual(dt.hour, 12)
self.assertEqual(dt.minute, 59)
self.assertEqual(dt.second, 59)
self.assertEqual(dt.microsecond, 8000)
def test_roundtrip(self):
for dt in (self.theclass(1, 2, 3, 4, 5, 6, 7),
self.theclass.now()):
# Verify dt -> string -> datetime identity.
s = repr(dt)
self.assertTrue(s.startswith('datetime.'))
s = s[9:]
dt2 = eval(s)
self.assertEqual(dt, dt2)
# Verify identity via reconstructing from pieces.
dt2 = self.theclass(dt.year, dt.month, dt.day,
dt.hour, dt.minute, dt.second,
dt.microsecond)
self.assertEqual(dt, dt2)
def test_isoformat(self):
t = self.theclass(2, 3, 2, 4, 5, 1, 123)
self.assertEqual(t.isoformat(), "0002-03-02T04:05:01.000123")
self.assertEqual(t.isoformat('T'), "0002-03-02T04:05:01.000123")
self.assertEqual(t.isoformat(' '), "0002-03-02 04:05:01.000123")
self.assertEqual(t.isoformat('\x00'), "0002-03-02\x0004:05:01.000123")
# str is ISO format with the separator forced to a blank.
self.assertEqual(str(t), "0002-03-02 04:05:01.000123")
t = self.theclass(2, 3, 2)
self.assertEqual(t.isoformat(), "0002-03-02T00:00:00")
self.assertEqual(t.isoformat('T'), "0002-03-02T00:00:00")
self.assertEqual(t.isoformat(' '), "0002-03-02 00:00:00")
# str is ISO format with the separator forced to a blank.
self.assertEqual(str(t), "0002-03-02 00:00:00")
def test_format(self):
dt = self.theclass(2007, 9, 10, 4, 5, 1, 123)
self.assertEqual(dt.__format__(''), str(dt))
# check that a derived class's __str__() gets called
class A(self.theclass):
def __str__(self):
return 'A'
a = A(2007, 9, 10, 4, 5, 1, 123)
self.assertEqual(a.__format__(''), 'A')
# check that a derived class's strftime gets called
class B(self.theclass):
def strftime(self, format_spec):
return 'B'
b = B(2007, 9, 10, 4, 5, 1, 123)
self.assertEqual(b.__format__(''), str(dt))
for fmt in ["m:%m d:%d y:%y",
"m:%m d:%d y:%y H:%H M:%M S:%S",
"%z %Z",
]:
self.assertEqual(dt.__format__(fmt), dt.strftime(fmt))
self.assertEqual(a.__format__(fmt), dt.strftime(fmt))
self.assertEqual(b.__format__(fmt), 'B')
def test_more_ctime(self):
# Test fields that TestDate doesn't touch.
import time
t = self.theclass(2002, 3, 2, 18, 3, 5, 123)
self.assertEqual(t.ctime(), "Sat Mar 2 18:03:05 2002")
# Oops! The next line fails on Win2K under MSVC 6, so it's commented
# out. The difference is that t.ctime() produces " 2" for the day,
# but platform ctime() produces "02" for the day. According to
# C99, t.ctime() is correct here.
# self.assertEqual(t.ctime(), time.ctime(time.mktime(t.timetuple())))
# So test a case where that difference doesn't matter.
t = self.theclass(2002, 3, 22, 18, 3, 5, 123)
self.assertEqual(t.ctime(), time.ctime(time.mktime(t.timetuple())))
def test_tz_independent_comparing(self):
dt1 = self.theclass(2002, 3, 1, 9, 0, 0)
dt2 = self.theclass(2002, 3, 1, 10, 0, 0)
dt3 = self.theclass(2002, 3, 1, 9, 0, 0)
self.assertEqual(dt1, dt3)
self.assertTrue(dt2 > dt3)
# Make sure comparison doesn't forget microseconds, and isn't done
# via comparing a float timestamp (an IEEE double doesn't have enough
# precision to span microsecond resolution across years 1 thru 9999,
# so comparing via timestamp necessarily calls some distinct values
# equal).
dt1 = self.theclass(MAXYEAR, 12, 31, 23, 59, 59, 999998)
us = timedelta(microseconds=1)
dt2 = dt1 + us
self.assertEqual(dt2 - dt1, us)
self.assertTrue(dt1 < dt2)
def test_strftime_with_bad_tzname_replace(self):
# verify ok if tzinfo.tzname().replace() returns a non-string
class MyTzInfo(FixedOffset):
def tzname(self, dt):
class MyStr(str):
def replace(self, *args):
return None
return MyStr('name')
t = self.theclass(2005, 3, 2, 0, 0, 0, 0, MyTzInfo(3, 'name'))
self.assertRaises(TypeError, t.strftime, '%Z')
def test_bad_constructor_arguments(self):
# bad years
self.theclass(MINYEAR, 1, 1) # no exception
self.theclass(MAXYEAR, 1, 1) # no exception
self.assertRaises(ValueError, self.theclass, MINYEAR-1, 1, 1)
self.assertRaises(ValueError, self.theclass, MAXYEAR+1, 1, 1)
# bad months
self.theclass(2000, 1, 1) # no exception
self.theclass(2000, 12, 1) # no exception
self.assertRaises(ValueError, self.theclass, 2000, 0, 1)
self.assertRaises(ValueError, self.theclass, 2000, 13, 1)
# bad days
self.theclass(2000, 2, 29) # no exception
self.theclass(2004, 2, 29) # no exception
self.theclass(2400, 2, 29) # no exception
self.assertRaises(ValueError, self.theclass, 2000, 2, 30)
self.assertRaises(ValueError, self.theclass, 2001, 2, 29)
self.assertRaises(ValueError, self.theclass, 2100, 2, 29)
self.assertRaises(ValueError, self.theclass, 1900, 2, 29)
self.assertRaises(ValueError, self.theclass, 2000, 1, 0)
self.assertRaises(ValueError, self.theclass, 2000, 1, 32)
# bad hours
self.theclass(2000, 1, 31, 0) # no exception
self.theclass(2000, 1, 31, 23) # no exception
self.assertRaises(ValueError, self.theclass, 2000, 1, 31, -1)
self.assertRaises(ValueError, self.theclass, 2000, 1, 31, 24)
# bad minutes
self.theclass(2000, 1, 31, 23, 0) # no exception
self.theclass(2000, 1, 31, 23, 59) # no exception
self.assertRaises(ValueError, self.theclass, 2000, 1, 31, 23, -1)
self.assertRaises(ValueError, self.theclass, 2000, 1, 31, 23, 60)
# bad seconds
self.theclass(2000, 1, 31, 23, 59, 0) # no exception
self.theclass(2000, 1, 31, 23, 59, 59) # no exception
self.assertRaises(ValueError, self.theclass, 2000, 1, 31, 23, 59, -1)
self.assertRaises(ValueError, self.theclass, 2000, 1, 31, 23, 59, 60)
# bad microseconds
self.theclass(2000, 1, 31, 23, 59, 59, 0) # no exception
self.theclass(2000, 1, 31, 23, 59, 59, 999999) # no exception
self.assertRaises(ValueError, self.theclass,
2000, 1, 31, 23, 59, 59, -1)
self.assertRaises(ValueError, self.theclass,
2000, 1, 31, 23, 59, 59,
1000000)
def test_hash_equality(self):
d = self.theclass(2000, 12, 31, 23, 30, 17)
e = self.theclass(2000, 12, 31, 23, 30, 17)
self.assertEqual(d, e)
self.assertEqual(hash(d), hash(e))
dic = {d: 1}
dic[e] = 2
self.assertEqual(len(dic), 1)
self.assertEqual(dic[d], 2)
self.assertEqual(dic[e], 2)
d = self.theclass(2001, 1, 1, 0, 5, 17)
e = self.theclass(2001, 1, 1, 0, 5, 17)
self.assertEqual(d, e)
self.assertEqual(hash(d), hash(e))
dic = {d: 1}
dic[e] = 2
self.assertEqual(len(dic), 1)
self.assertEqual(dic[d], 2)
self.assertEqual(dic[e], 2)
def test_computations(self):
a = self.theclass(2002, 1, 31)
b = self.theclass(1956, 1, 31)
diff = a-b
self.assertEqual(diff.days, 46*365 + len(range(1956, 2002, 4)))
self.assertEqual(diff.seconds, 0)
self.assertEqual(diff.microseconds, 0)
a = self.theclass(2002, 3, 2, 17, 6)
millisec = timedelta(0, 0, 1000)
hour = timedelta(0, 3600)
day = timedelta(1)
week = timedelta(7)
self.assertEqual(a + hour, self.theclass(2002, 3, 2, 18, 6))
self.assertEqual(hour + a, self.theclass(2002, 3, 2, 18, 6))
self.assertEqual(a + 10*hour, self.theclass(2002, 3, 3, 3, 6))
self.assertEqual(a - hour, self.theclass(2002, 3, 2, 16, 6))
self.assertEqual(-hour + a, self.theclass(2002, 3, 2, 16, 6))
self.assertEqual(a - hour, a + -hour)
self.assertEqual(a - 20*hour, self.theclass(2002, 3, 1, 21, 6))
self.assertEqual(a + day, self.theclass(2002, 3, 3, 17, 6))
self.assertEqual(a - day, self.theclass(2002, 3, 1, 17, 6))
self.assertEqual(a + week, self.theclass(2002, 3, 9, 17, 6))
self.assertEqual(a - week, self.theclass(2002, 2, 23, 17, 6))
self.assertEqual(a + 52*week, self.theclass(2003, 3, 1, 17, 6))
self.assertEqual(a - 52*week, self.theclass(2001, 3, 3, 17, 6))
self.assertEqual((a + week) - a, week)
self.assertEqual((a + day) - a, day)
self.assertEqual((a + hour) - a, hour)
self.assertEqual((a + millisec) - a, millisec)
self.assertEqual((a - week) - a, -week)
self.assertEqual((a - day) - a, -day)
self.assertEqual((a - hour) - a, -hour)
self.assertEqual((a - millisec) - a, -millisec)
self.assertEqual(a - (a + week), -week)
self.assertEqual(a - (a + day), -day)
self.assertEqual(a - (a + hour), -hour)
self.assertEqual(a - (a + millisec), -millisec)
self.assertEqual(a - (a - week), week)
self.assertEqual(a - (a - day), day)
self.assertEqual(a - (a - hour), hour)
self.assertEqual(a - (a - millisec), millisec)
self.assertEqual(a + (week + day + hour + millisec),
self.theclass(2002, 3, 10, 18, 6, 0, 1000))
self.assertEqual(a + (week + day + hour + millisec),
(((a + week) + day) + hour) + millisec)
self.assertEqual(a - (week + day + hour + millisec),
self.theclass(2002, 2, 22, 16, 5, 59, 999000))
self.assertEqual(a - (week + day + hour + millisec),
(((a - week) - day) - hour) - millisec)
# Add/sub ints, longs, floats should be illegal
for i in 1, 1L, 1.0:
self.assertRaises(TypeError, lambda: a+i)
self.assertRaises(TypeError, lambda: a-i)
self.assertRaises(TypeError, lambda: i+a)
self.assertRaises(TypeError, lambda: i-a)
# delta - datetime is senseless.
self.assertRaises(TypeError, lambda: day - a)
# mixing datetime and (delta or datetime) via * or // is senseless
self.assertRaises(TypeError, lambda: day * a)
self.assertRaises(TypeError, lambda: a * day)
self.assertRaises(TypeError, lambda: day // a)
self.assertRaises(TypeError, lambda: a // day)
self.assertRaises(TypeError, lambda: a * a)
self.assertRaises(TypeError, lambda: a // a)
# datetime + datetime is senseless
self.assertRaises(TypeError, lambda: a + a)
def test_pickling(self):
args = 6, 7, 23, 20, 59, 1, 64**2
orig = self.theclass(*args)
for pickler, unpickler, proto in pickle_choices:
green = pickler.dumps(orig, proto)
derived = unpickler.loads(green)
self.assertEqual(orig, derived)
def test_more_pickling(self):
a = self.theclass(2003, 2, 7, 16, 48, 37, 444116)
s = pickle.dumps(a)
b = pickle.loads(s)
self.assertEqual(b.year, 2003)
self.assertEqual(b.month, 2)
self.assertEqual(b.day, 7)
def test_pickling_subclass_datetime(self):
args = 6, 7, 23, 20, 59, 1, 64**2
orig = SubclassDatetime(*args)
for pickler, unpickler, proto in pickle_choices:
green = pickler.dumps(orig, proto)
derived = unpickler.loads(green)
self.assertEqual(orig, derived)
def test_more_compare(self):
# The test_compare() inherited from TestDate covers the error cases.
# We just want to test lexicographic ordering on the members datetime
# has that date lacks.
args = [2000, 11, 29, 20, 58, 16, 999998]
t1 = self.theclass(*args)
t2 = self.theclass(*args)
self.assertTrue(t1 == t2)
self.assertTrue(t1 <= t2)
self.assertTrue(t1 >= t2)
self.assertTrue(not t1 != t2)
self.assertTrue(not t1 < t2)
self.assertTrue(not t1 > t2)
self.assertEqual(cmp(t1, t2), 0)
self.assertEqual(cmp(t2, t1), 0)
for i in range(len(args)):
newargs = args[:]
newargs[i] = args[i] + 1
t2 = self.theclass(*newargs) # this is larger than t1
self.assertTrue(t1 < t2)
self.assertTrue(t2 > t1)
self.assertTrue(t1 <= t2)
self.assertTrue(t2 >= t1)
self.assertTrue(t1 != t2)
self.assertTrue(t2 != t1)
self.assertTrue(not t1 == t2)
self.assertTrue(not t2 == t1)
self.assertTrue(not t1 > t2)
self.assertTrue(not t2 < t1)
self.assertTrue(not t1 >= t2)
self.assertTrue(not t2 <= t1)
self.assertEqual(cmp(t1, t2), -1)
self.assertEqual(cmp(t2, t1), 1)
# A helper for timestamp constructor tests.
def verify_field_equality(self, expected, got):
self.assertEqual(expected.tm_year, got.year)
self.assertEqual(expected.tm_mon, got.month)
self.assertEqual(expected.tm_mday, got.day)
self.assertEqual(expected.tm_hour, got.hour)
self.assertEqual(expected.tm_min, got.minute)
self.assertEqual(expected.tm_sec, got.second)
def test_fromtimestamp(self):
import time
ts = time.time()
expected = time.localtime(ts)
got = self.theclass.fromtimestamp(ts)
self.verify_field_equality(expected, got)
def test_utcfromtimestamp(self):
import time
ts = time.time()
expected = time.gmtime(ts)
got = self.theclass.utcfromtimestamp(ts)
self.verify_field_equality(expected, got)
def test_microsecond_rounding(self):
# Test whether fromtimestamp "rounds up" floats that are less
# than one microsecond smaller than an integer.
self.assertEqual(self.theclass.fromtimestamp(0.9999999),
self.theclass.fromtimestamp(1))
def test_insane_fromtimestamp(self):
# It's possible that some platform maps time_t to double,
# and that this test will fail there. This test should
# exempt such platforms (provided they return reasonable
# results!).
for insane in -1e200, 1e200:
self.assertRaises(ValueError, self.theclass.fromtimestamp,
insane)
def test_insane_utcfromtimestamp(self):
# It's possible that some platform maps time_t to double,
# and that this test will fail there. This test should
# exempt such platforms (provided they return reasonable
# results!).
for insane in -1e200, 1e200:
self.assertRaises(ValueError, self.theclass.utcfromtimestamp,
insane)
@unittest.skipIf(sys.platform == "win32", "Windows doesn't accept negative timestamps")
def test_negative_float_fromtimestamp(self):
# The result is tz-dependent; at least test that this doesn't
# fail (like it did before bug 1646728 was fixed).
self.theclass.fromtimestamp(-1.05)
@unittest.skipIf(sys.platform == "win32", "Windows doesn't accept negative timestamps")
def test_negative_float_utcfromtimestamp(self):
d = self.theclass.utcfromtimestamp(-1.05)
self.assertEqual(d, self.theclass(1969, 12, 31, 23, 59, 58, 950000))
def test_utcnow(self):
import time
# Call it a success if utcnow() and utcfromtimestamp() are within
# a second of each other.
tolerance = timedelta(seconds=1)
for dummy in range(3):
from_now = self.theclass.utcnow()
from_timestamp = self.theclass.utcfromtimestamp(time.time())
if abs(from_timestamp - from_now) <= tolerance:
break
# Else try again a few times.
self.assertTrue(abs(from_timestamp - from_now) <= tolerance)
def test_strptime(self):
import _strptime
string = '2004-12-01 13:02:47.197'
format = '%Y-%m-%d %H:%M:%S.%f'
result, frac = _strptime._strptime(string, format)
expected = self.theclass(*(result[0:6]+(frac,)))
got = self.theclass.strptime(string, format)
self.assertEqual(expected, got)
def test_more_timetuple(self):
# This tests fields beyond those tested by the TestDate.test_timetuple.
t = self.theclass(2004, 12, 31, 6, 22, 33)
self.assertEqual(t.timetuple(), (2004, 12, 31, 6, 22, 33, 4, 366, -1))
self.assertEqual(t.timetuple(),
(t.year, t.month, t.day,
t.hour, t.minute, t.second,
t.weekday(),
t.toordinal() - date(t.year, 1, 1).toordinal() + 1,
-1))
tt = t.timetuple()
self.assertEqual(tt.tm_year, t.year)
self.assertEqual(tt.tm_mon, t.month)
self.assertEqual(tt.tm_mday, t.day)
self.assertEqual(tt.tm_hour, t.hour)
self.assertEqual(tt.tm_min, t.minute)
self.assertEqual(tt.tm_sec, t.second)
self.assertEqual(tt.tm_wday, t.weekday())
self.assertEqual(tt.tm_yday, t.toordinal() -
date(t.year, 1, 1).toordinal() + 1)
self.assertEqual(tt.tm_isdst, -1)
def test_more_strftime(self):
# This tests fields beyond those tested by the TestDate.test_strftime.
t = self.theclass(2004, 12, 31, 6, 22, 33, 47)
self.assertEqual(t.strftime("%m %d %y %f %S %M %H %j"),
"12 31 04 000047 33 22 06 366")
def test_extract(self):
dt = self.theclass(2002, 3, 4, 18, 45, 3, 1234)
self.assertEqual(dt.date(), date(2002, 3, 4))
self.assertEqual(dt.time(), time(18, 45, 3, 1234))
def test_combine(self):
d = date(2002, 3, 4)
t = time(18, 45, 3, 1234)
expected = self.theclass(2002, 3, 4, 18, 45, 3, 1234)
combine = self.theclass.combine
dt = combine(d, t)
self.assertEqual(dt, expected)
dt = combine(time=t, date=d)
self.assertEqual(dt, expected)
self.assertEqual(d, dt.date())
self.assertEqual(t, dt.time())
self.assertEqual(dt, combine(dt.date(), dt.time()))
self.assertRaises(TypeError, combine) # need an arg
self.assertRaises(TypeError, combine, d) # need two args
self.assertRaises(TypeError, combine, t, d) # args reversed
self.assertRaises(TypeError, combine, d, t, 1) # too many args
self.assertRaises(TypeError, combine, "date", "time") # wrong types
def test_replace(self):
cls = self.theclass
args = [1, 2, 3, 4, 5, 6, 7]
base = cls(*args)
self.assertEqual(base, base.replace())
i = 0
for name, newval in (("year", 2),
("month", 3),
("day", 4),
("hour", 5),
("minute", 6),
("second", 7),
("microsecond", 8)):
newargs = args[:]
newargs[i] = newval
expected = cls(*newargs)
got = base.replace(**{name: newval})
self.assertEqual(expected, got)
i += 1
# Out of bounds.
base = cls(2000, 2, 29)
self.assertRaises(ValueError, base.replace, year=2001)
def test_astimezone(self):
# Pretty boring! The TZ test is more interesting here. astimezone()
# simply can't be applied to a naive object.
dt = self.theclass.now()
f = FixedOffset(44, "")
self.assertRaises(TypeError, dt.astimezone) # not enough args
self.assertRaises(TypeError, dt.astimezone, f, f) # too many args
self.assertRaises(TypeError, dt.astimezone, dt) # arg wrong type
self.assertRaises(ValueError, dt.astimezone, f) # naive
self.assertRaises(ValueError, dt.astimezone, tz=f) # naive
class Bogus(tzinfo):
def utcoffset(self, dt): return None
def dst(self, dt): return timedelta(0)
bog = Bogus()
self.assertRaises(ValueError, dt.astimezone, bog) # naive
class AlsoBogus(tzinfo):
def utcoffset(self, dt): return timedelta(0)
def dst(self, dt): return None
alsobog = AlsoBogus()
self.assertRaises(ValueError, dt.astimezone, alsobog) # also naive
def test_subclass_datetime(self):
class C(self.theclass):
theAnswer = 42
def __new__(cls, *args, **kws):
temp = kws.copy()
extra = temp.pop('extra')
result = self.theclass.__new__(cls, *args, **temp)
result.extra = extra
return result
def newmeth(self, start):
return start + self.year + self.month + self.second
args = 2003, 4, 14, 12, 13, 41
dt1 = self.theclass(*args)
dt2 = C(*args, **{'extra': 7})
self.assertEqual(dt2.__class__, C)
self.assertEqual(dt2.theAnswer, 42)
self.assertEqual(dt2.extra, 7)
self.assertEqual(dt1.toordinal(), dt2.toordinal())
self.assertEqual(dt2.newmeth(-7), dt1.year + dt1.month +
dt1.second - 7)
class SubclassTime(time):
sub_var = 1
class TestTime(HarmlessMixedComparison, unittest.TestCase):
theclass = time
def test_basic_attributes(self):
t = self.theclass(12, 0)
self.assertEqual(t.hour, 12)
self.assertEqual(t.minute, 0)
self.assertEqual(t.second, 0)
self.assertEqual(t.microsecond, 0)
def test_basic_attributes_nonzero(self):
# Make sure all attributes are non-zero so bugs in
# bit-shifting access show up.
t = self.theclass(12, 59, 59, 8000)
self.assertEqual(t.hour, 12)
self.assertEqual(t.minute, 59)
self.assertEqual(t.second, 59)
self.assertEqual(t.microsecond, 8000)
def test_roundtrip(self):
t = self.theclass(1, 2, 3, 4)
# Verify t -> string -> time identity.
s = repr(t)
self.assertTrue(s.startswith('datetime.'))
s = s[9:]
t2 = eval(s)
self.assertEqual(t, t2)
# Verify identity via reconstructing from pieces.
t2 = self.theclass(t.hour, t.minute, t.second,
t.microsecond)
self.assertEqual(t, t2)
def test_comparing(self):
args = [1, 2, 3, 4]
t1 = self.theclass(*args)
t2 = self.theclass(*args)
self.assertTrue(t1 == t2)
self.assertTrue(t1 <= t2)
self.assertTrue(t1 >= t2)
self.assertTrue(not t1 != t2)
self.assertTrue(not t1 < t2)
self.assertTrue(not t1 > t2)
self.assertEqual(cmp(t1, t2), 0)
self.assertEqual(cmp(t2, t1), 0)
for i in range(len(args)):
newargs = args[:]
newargs[i] = args[i] + 1
t2 = self.theclass(*newargs) # this is larger than t1
self.assertTrue(t1 < t2)
self.assertTrue(t2 > t1)
self.assertTrue(t1 <= t2)
self.assertTrue(t2 >= t1)
self.assertTrue(t1 != t2)
self.assertTrue(t2 != t1)
self.assertTrue(not t1 == t2)
self.assertTrue(not t2 == t1)
self.assertTrue(not t1 > t2)
self.assertTrue(not t2 < t1)
self.assertTrue(not t1 >= t2)
self.assertTrue(not t2 <= t1)
self.assertEqual(cmp(t1, t2), -1)
self.assertEqual(cmp(t2, t1), 1)
for badarg in OTHERSTUFF:
self.assertEqual(t1 == badarg, False)
self.assertEqual(t1 != badarg, True)
self.assertEqual(badarg == t1, False)
self.assertEqual(badarg != t1, True)
self.assertRaises(TypeError, lambda: t1 <= badarg)
self.assertRaises(TypeError, lambda: t1 < badarg)
self.assertRaises(TypeError, lambda: t1 > badarg)
self.assertRaises(TypeError, lambda: t1 >= badarg)
self.assertRaises(TypeError, lambda: badarg <= t1)
self.assertRaises(TypeError, lambda: badarg < t1)
self.assertRaises(TypeError, lambda: badarg > t1)
self.assertRaises(TypeError, lambda: badarg >= t1)
def test_bad_constructor_arguments(self):
# bad hours
self.theclass(0, 0) # no exception
self.theclass(23, 0) # no exception
self.assertRaises(ValueError, self.theclass, -1, 0)
self.assertRaises(ValueError, self.theclass, 24, 0)
# bad minutes
self.theclass(23, 0) # no exception
self.theclass(23, 59) # no exception
self.assertRaises(ValueError, self.theclass, 23, -1)
self.assertRaises(ValueError, self.theclass, 23, 60)
# bad seconds
self.theclass(23, 59, 0) # no exception
self.theclass(23, 59, 59) # no exception
self.assertRaises(ValueError, self.theclass, 23, 59, -1)
self.assertRaises(ValueError, self.theclass, 23, 59, 60)
# bad microseconds
self.theclass(23, 59, 59, 0) # no exception
self.theclass(23, 59, 59, 999999) # no exception
self.assertRaises(ValueError, self.theclass, 23, 59, 59, -1)
self.assertRaises(ValueError, self.theclass, 23, 59, 59, 1000000)
def test_hash_equality(self):
d = self.theclass(23, 30, 17)
e = self.theclass(23, 30, 17)
self.assertEqual(d, e)
self.assertEqual(hash(d), hash(e))
dic = {d: 1}
dic[e] = 2
self.assertEqual(len(dic), 1)
self.assertEqual(dic[d], 2)
self.assertEqual(dic[e], 2)
d = self.theclass(0, 5, 17)
e = self.theclass(0, 5, 17)
self.assertEqual(d, e)
self.assertEqual(hash(d), hash(e))
dic = {d: 1}
dic[e] = 2
self.assertEqual(len(dic), 1)
self.assertEqual(dic[d], 2)
self.assertEqual(dic[e], 2)
def test_isoformat(self):
t = self.theclass(4, 5, 1, 123)
self.assertEqual(t.isoformat(), "04:05:01.000123")
self.assertEqual(t.isoformat(), str(t))
t = self.theclass()
self.assertEqual(t.isoformat(), "00:00:00")
self.assertEqual(t.isoformat(), str(t))
t = self.theclass(microsecond=1)
self.assertEqual(t.isoformat(), "00:00:00.000001")
self.assertEqual(t.isoformat(), str(t))
t = self.theclass(microsecond=10)
self.assertEqual(t.isoformat(), "00:00:00.000010")
self.assertEqual(t.isoformat(), str(t))
t = self.theclass(microsecond=100)
self.assertEqual(t.isoformat(), "00:00:00.000100")
self.assertEqual(t.isoformat(), str(t))
t = self.theclass(microsecond=1000)
self.assertEqual(t.isoformat(), "00:00:00.001000")
self.assertEqual(t.isoformat(), str(t))
t = self.theclass(microsecond=10000)
self.assertEqual(t.isoformat(), "00:00:00.010000")
self.assertEqual(t.isoformat(), str(t))
t = self.theclass(microsecond=100000)
self.assertEqual(t.isoformat(), "00:00:00.100000")
self.assertEqual(t.isoformat(), str(t))
def test_1653736(self):
# verify it doesn't accept extra keyword arguments
t = self.theclass(second=1)
self.assertRaises(TypeError, t.isoformat, foo=3)
def test_strftime(self):
t = self.theclass(1, 2, 3, 4)
self.assertEqual(t.strftime('%H %M %S %f'), "01 02 03 000004")
# A naive object replaces %z and %Z with empty strings.
self.assertEqual(t.strftime("'%z' '%Z'"), "'' ''")
def test_format(self):
t = self.theclass(1, 2, 3, 4)
self.assertEqual(t.__format__(''), str(t))
# check that a derived class's __str__() gets called
class A(self.theclass):
def __str__(self):
return 'A'
a = A(1, 2, 3, 4)
self.assertEqual(a.__format__(''), 'A')
# check that a derived class's strftime gets called
class B(self.theclass):
def strftime(self, format_spec):
return 'B'
b = B(1, 2, 3, 4)
self.assertEqual(b.__format__(''), str(t))
for fmt in ['%H %M %S',
]:
self.assertEqual(t.__format__(fmt), t.strftime(fmt))
self.assertEqual(a.__format__(fmt), t.strftime(fmt))
self.assertEqual(b.__format__(fmt), 'B')
def test_str(self):
self.assertEqual(str(self.theclass(1, 2, 3, 4)), "01:02:03.000004")
self.assertEqual(str(self.theclass(10, 2, 3, 4000)), "10:02:03.004000")
self.assertEqual(str(self.theclass(0, 2, 3, 400000)), "00:02:03.400000")
self.assertEqual(str(self.theclass(12, 2, 3, 0)), "12:02:03")
self.assertEqual(str(self.theclass(23, 15, 0, 0)), "23:15:00")
def test_repr(self):
name = 'datetime.' + self.theclass.__name__
self.assertEqual(repr(self.theclass(1, 2, 3, 4)),
"%s(1, 2, 3, 4)" % name)
self.assertEqual(repr(self.theclass(10, 2, 3, 4000)),
"%s(10, 2, 3, 4000)" % name)
self.assertEqual(repr(self.theclass(0, 2, 3, 400000)),
"%s(0, 2, 3, 400000)" % name)
self.assertEqual(repr(self.theclass(12, 2, 3, 0)),
"%s(12, 2, 3)" % name)
self.assertEqual(repr(self.theclass(23, 15, 0, 0)),
"%s(23, 15)" % name)
def test_resolution_info(self):
self.assertIsInstance(self.theclass.min, self.theclass)
self.assertIsInstance(self.theclass.max, self.theclass)
self.assertIsInstance(self.theclass.resolution, timedelta)
self.assertTrue(self.theclass.max > self.theclass.min)
def test_pickling(self):
args = 20, 59, 16, 64**2
orig = self.theclass(*args)
for pickler, unpickler, proto in pickle_choices:
green = pickler.dumps(orig, proto)
derived = unpickler.loads(green)
self.assertEqual(orig, derived)
def test_pickling_subclass_time(self):
args = 20, 59, 16, 64**2
orig = SubclassTime(*args)
for pickler, unpickler, proto in pickle_choices:
green = pickler.dumps(orig, proto)
derived = unpickler.loads(green)
self.assertEqual(orig, derived)
def test_bool(self):
cls = self.theclass
self.assertTrue(cls(1))
self.assertTrue(cls(0, 1))
self.assertTrue(cls(0, 0, 1))
self.assertTrue(cls(0, 0, 0, 1))
self.assertTrue(not cls(0))
self.assertTrue(not cls())
def test_replace(self):
cls = self.theclass
args = [1, 2, 3, 4]
base = cls(*args)
self.assertEqual(base, base.replace())
i = 0
for name, newval in (("hour", 5),
("minute", 6),
("second", 7),
("microsecond", 8)):
newargs = args[:]
newargs[i] = newval
expected = cls(*newargs)
got = base.replace(**{name: newval})
self.assertEqual(expected, got)
i += 1
# Out of bounds.
base = cls(1)
self.assertRaises(ValueError, base.replace, hour=24)
self.assertRaises(ValueError, base.replace, minute=-1)
self.assertRaises(ValueError, base.replace, second=100)
self.assertRaises(ValueError, base.replace, microsecond=1000000)
def test_subclass_time(self):
class C(self.theclass):
theAnswer = 42
def __new__(cls, *args, **kws):
temp = kws.copy()
extra = temp.pop('extra')
result = self.theclass.__new__(cls, *args, **temp)
result.extra = extra
return result
def newmeth(self, start):
return start + self.hour + self.second
args = 4, 5, 6
dt1 = self.theclass(*args)
dt2 = C(*args, **{'extra': 7})
self.assertEqual(dt2.__class__, C)
self.assertEqual(dt2.theAnswer, 42)
self.assertEqual(dt2.extra, 7)
self.assertEqual(dt1.isoformat(), dt2.isoformat())
self.assertEqual(dt2.newmeth(-7), dt1.hour + dt1.second - 7)
def test_backdoor_resistance(self):
# see TestDate.test_backdoor_resistance().
base = '2:59.0'
for hour_byte in ' ', '9', chr(24), '\xff':
self.assertRaises(TypeError, self.theclass,
hour_byte + base[1:])
# A mixin for classes with a tzinfo= argument. Subclasses must define
# theclass as a class atribute, and theclass(1, 1, 1, tzinfo=whatever)
# must be legit (which is true for time and datetime).
class TZInfoBase:
def test_argument_passing(self):
cls = self.theclass
# A datetime passes itself on, a time passes None.
class introspective(tzinfo):
def tzname(self, dt): return dt and "real" or "none"
def utcoffset(self, dt):
return timedelta(minutes = dt and 42 or -42)
dst = utcoffset
obj = cls(1, 2, 3, tzinfo=introspective())
expected = cls is time and "none" or "real"
self.assertEqual(obj.tzname(), expected)
expected = timedelta(minutes=(cls is time and -42 or 42))
self.assertEqual(obj.utcoffset(), expected)
self.assertEqual(obj.dst(), expected)
def test_bad_tzinfo_classes(self):
cls = self.theclass
self.assertRaises(TypeError, cls, 1, 1, 1, tzinfo=12)
class NiceTry(object):
def __init__(self): pass
def utcoffset(self, dt): pass
self.assertRaises(TypeError, cls, 1, 1, 1, tzinfo=NiceTry)
class BetterTry(tzinfo):
def __init__(self): pass
def utcoffset(self, dt): pass
b = BetterTry()
t = cls(1, 1, 1, tzinfo=b)
self.assertTrue(t.tzinfo is b)
def test_utc_offset_out_of_bounds(self):
class Edgy(tzinfo):
def __init__(self, offset):
self.offset = timedelta(minutes=offset)
def utcoffset(self, dt):
return self.offset
cls = self.theclass
for offset, legit in ((-1440, False),
(-1439, True),
(1439, True),
(1440, False)):
if cls is time:
t = cls(1, 2, 3, tzinfo=Edgy(offset))
elif cls is datetime:
t = cls(6, 6, 6, 1, 2, 3, tzinfo=Edgy(offset))
else:
assert 0, "impossible"
if legit:
aofs = abs(offset)
h, m = divmod(aofs, 60)
tag = "%c%02d:%02d" % (offset < 0 and '-' or '+', h, m)
if isinstance(t, datetime):
t = t.timetz()
self.assertEqual(str(t), "01:02:03" + tag)
else:
self.assertRaises(ValueError, str, t)
def test_tzinfo_classes(self):
cls = self.theclass
class C1(tzinfo):
def utcoffset(self, dt): return None
def dst(self, dt): return None
def tzname(self, dt): return None
for t in (cls(1, 1, 1),
cls(1, 1, 1, tzinfo=None),
cls(1, 1, 1, tzinfo=C1())):
self.assertTrue(t.utcoffset() is None)
self.assertTrue(t.dst() is None)
self.assertTrue(t.tzname() is None)
class C3(tzinfo):
def utcoffset(self, dt): return timedelta(minutes=-1439)
def dst(self, dt): return timedelta(minutes=1439)
def tzname(self, dt): return "aname"
t = cls(1, 1, 1, tzinfo=C3())
self.assertEqual(t.utcoffset(), timedelta(minutes=-1439))
self.assertEqual(t.dst(), timedelta(minutes=1439))
self.assertEqual(t.tzname(), "aname")
# Wrong types.
class C4(tzinfo):
def utcoffset(self, dt): return "aname"
def dst(self, dt): return 7
def tzname(self, dt): return 0
t = cls(1, 1, 1, tzinfo=C4())
self.assertRaises(TypeError, t.utcoffset)
self.assertRaises(TypeError, t.dst)
self.assertRaises(TypeError, t.tzname)
# Offset out of range.
class C6(tzinfo):
def utcoffset(self, dt): return timedelta(hours=-24)
def dst(self, dt): return timedelta(hours=24)
t = cls(1, 1, 1, tzinfo=C6())
self.assertRaises(ValueError, t.utcoffset)
self.assertRaises(ValueError, t.dst)
# Not a whole number of minutes.
class C7(tzinfo):
def utcoffset(self, dt): return timedelta(seconds=61)
def dst(self, dt): return timedelta(microseconds=-81)
t = cls(1, 1, 1, tzinfo=C7())
self.assertRaises(ValueError, t.utcoffset)
self.assertRaises(ValueError, t.dst)
def test_aware_compare(self):
cls = self.theclass
# Ensure that utcoffset() gets ignored if the comparands have
# the same tzinfo member.
class OperandDependentOffset(tzinfo):
def utcoffset(self, t):
if t.minute < 10:
# d0 and d1 equal after adjustment
return timedelta(minutes=t.minute)
else:
# d2 off in the weeds
return timedelta(minutes=59)
base = cls(8, 9, 10, tzinfo=OperandDependentOffset())
d0 = base.replace(minute=3)
d1 = base.replace(minute=9)
d2 = base.replace(minute=11)
for x in d0, d1, d2:
for y in d0, d1, d2:
got = cmp(x, y)
expected = cmp(x.minute, y.minute)
self.assertEqual(got, expected)
# However, if they're different members, uctoffset is not ignored.
# Note that a time can't actually have an operand-depedent offset,
# though (and time.utcoffset() passes None to tzinfo.utcoffset()),
# so skip this test for time.
if cls is not time:
d0 = base.replace(minute=3, tzinfo=OperandDependentOffset())
d1 = base.replace(minute=9, tzinfo=OperandDependentOffset())
d2 = base.replace(minute=11, tzinfo=OperandDependentOffset())
for x in d0, d1, d2:
for y in d0, d1, d2:
got = cmp(x, y)
if (x is d0 or x is d1) and (y is d0 or y is d1):
expected = 0
elif x is y is d2:
expected = 0
elif x is d2:
expected = -1
else:
assert y is d2
expected = 1
self.assertEqual(got, expected)
# Testing time objects with a non-None tzinfo.
class TestTimeTZ(TestTime, TZInfoBase, unittest.TestCase):
theclass = time
def test_empty(self):
t = self.theclass()
self.assertEqual(t.hour, 0)
self.assertEqual(t.minute, 0)
self.assertEqual(t.second, 0)
self.assertEqual(t.microsecond, 0)
self.assertTrue(t.tzinfo is None)
def test_zones(self):
est = FixedOffset(-300, "EST", 1)
utc = FixedOffset(0, "UTC", -2)
met = FixedOffset(60, "MET", 3)
t1 = time( 7, 47, tzinfo=est)
t2 = time(12, 47, tzinfo=utc)
t3 = time(13, 47, tzinfo=met)
t4 = time(microsecond=40)
t5 = time(microsecond=40, tzinfo=utc)
self.assertEqual(t1.tzinfo, est)
self.assertEqual(t2.tzinfo, utc)
self.assertEqual(t3.tzinfo, met)
self.assertTrue(t4.tzinfo is None)
self.assertEqual(t5.tzinfo, utc)
self.assertEqual(t1.utcoffset(), timedelta(minutes=-300))
self.assertEqual(t2.utcoffset(), timedelta(minutes=0))
self.assertEqual(t3.utcoffset(), timedelta(minutes=60))
self.assertTrue(t4.utcoffset() is None)
self.assertRaises(TypeError, t1.utcoffset, "no args")
self.assertEqual(t1.tzname(), "EST")
self.assertEqual(t2.tzname(), "UTC")
self.assertEqual(t3.tzname(), "MET")
self.assertTrue(t4.tzname() is None)
self.assertRaises(TypeError, t1.tzname, "no args")
self.assertEqual(t1.dst(), timedelta(minutes=1))
self.assertEqual(t2.dst(), timedelta(minutes=-2))
self.assertEqual(t3.dst(), timedelta(minutes=3))
self.assertTrue(t4.dst() is None)
self.assertRaises(TypeError, t1.dst, "no args")
self.assertEqual(hash(t1), hash(t2))
self.assertEqual(hash(t1), hash(t3))
self.assertEqual(hash(t2), hash(t3))
self.assertEqual(t1, t2)
self.assertEqual(t1, t3)
self.assertEqual(t2, t3)
self.assertRaises(TypeError, lambda: t4 == t5) # mixed tz-aware & naive
self.assertRaises(TypeError, lambda: t4 < t5) # mixed tz-aware & naive
self.assertRaises(TypeError, lambda: t5 < t4) # mixed tz-aware & naive
self.assertEqual(str(t1), "07:47:00-05:00")
self.assertEqual(str(t2), "12:47:00+00:00")
self.assertEqual(str(t3), "13:47:00+01:00")
self.assertEqual(str(t4), "00:00:00.000040")
self.assertEqual(str(t5), "00:00:00.000040+00:00")
self.assertEqual(t1.isoformat(), "07:47:00-05:00")
self.assertEqual(t2.isoformat(), "12:47:00+00:00")
self.assertEqual(t3.isoformat(), "13:47:00+01:00")
self.assertEqual(t4.isoformat(), "00:00:00.000040")
self.assertEqual(t5.isoformat(), "00:00:00.000040+00:00")
d = 'datetime.time'
self.assertEqual(repr(t1), d + "(7, 47, tzinfo=est)")
self.assertEqual(repr(t2), d + "(12, 47, tzinfo=utc)")
self.assertEqual(repr(t3), d + "(13, 47, tzinfo=met)")
self.assertEqual(repr(t4), d + "(0, 0, 0, 40)")
self.assertEqual(repr(t5), d + "(0, 0, 0, 40, tzinfo=utc)")
self.assertEqual(t1.strftime("%H:%M:%S %%Z=%Z %%z=%z"),
"07:47:00 %Z=EST %z=-0500")
self.assertEqual(t2.strftime("%H:%M:%S %Z %z"), "12:47:00 UTC +0000")
self.assertEqual(t3.strftime("%H:%M:%S %Z %z"), "13:47:00 MET +0100")
yuck = FixedOffset(-1439, "%z %Z %%z%%Z")
t1 = time(23, 59, tzinfo=yuck)
self.assertEqual(t1.strftime("%H:%M %%Z='%Z' %%z='%z'"),
"23:59 %Z='%z %Z %%z%%Z' %z='-2359'")
# Check that an invalid tzname result raises an exception.
class Badtzname(tzinfo):
def tzname(self, dt): return 42
t = time(2, 3, 4, tzinfo=Badtzname())
self.assertEqual(t.strftime("%H:%M:%S"), "02:03:04")
self.assertRaises(TypeError, t.strftime, "%Z")
def test_hash_edge_cases(self):
# Offsets that overflow a basic time.
t1 = self.theclass(0, 1, 2, 3, tzinfo=FixedOffset(1439, ""))
t2 = self.theclass(0, 0, 2, 3, tzinfo=FixedOffset(1438, ""))
self.assertEqual(hash(t1), hash(t2))
t1 = self.theclass(23, 58, 6, 100, tzinfo=FixedOffset(-1000, ""))
t2 = self.theclass(23, 48, 6, 100, tzinfo=FixedOffset(-1010, ""))
self.assertEqual(hash(t1), hash(t2))
def test_pickling(self):
# Try one without a tzinfo.
args = 20, 59, 16, 64**2
orig = self.theclass(*args)
for pickler, unpickler, proto in pickle_choices:
green = pickler.dumps(orig, proto)
derived = unpickler.loads(green)
self.assertEqual(orig, derived)
# Try one with a tzinfo.
tinfo = PicklableFixedOffset(-300, 'cookie')
orig = self.theclass(5, 6, 7, tzinfo=tinfo)
for pickler, unpickler, proto in pickle_choices:
green = pickler.dumps(orig, proto)
derived = unpickler.loads(green)
self.assertEqual(orig, derived)
self.assertIsInstance(derived.tzinfo, PicklableFixedOffset)
self.assertEqual(derived.utcoffset(), timedelta(minutes=-300))
self.assertEqual(derived.tzname(), 'cookie')
def test_more_bool(self):
# Test cases with non-None tzinfo.
cls = self.theclass
t = cls(0, tzinfo=FixedOffset(-300, ""))
self.assertTrue(t)
t = cls(5, tzinfo=FixedOffset(-300, ""))
self.assertTrue(t)
t = cls(5, tzinfo=FixedOffset(300, ""))
self.assertTrue(not t)
t = cls(23, 59, tzinfo=FixedOffset(23*60 + 59, ""))
self.assertTrue(not t)
# Mostly ensuring this doesn't overflow internally.
t = cls(0, tzinfo=FixedOffset(23*60 + 59, ""))
self.assertTrue(t)
# But this should yield a value error -- the utcoffset is bogus.
t = cls(0, tzinfo=FixedOffset(24*60, ""))
self.assertRaises(ValueError, lambda: bool(t))
# Likewise.
t = cls(0, tzinfo=FixedOffset(-24*60, ""))
self.assertRaises(ValueError, lambda: bool(t))
def test_replace(self):
cls = self.theclass
z100 = FixedOffset(100, "+100")
zm200 = FixedOffset(timedelta(minutes=-200), "-200")
args = [1, 2, 3, 4, z100]
base = cls(*args)
self.assertEqual(base, base.replace())
i = 0
for name, newval in (("hour", 5),
("minute", 6),
("second", 7),
("microsecond", 8),
("tzinfo", zm200)):
newargs = args[:]
newargs[i] = newval
expected = cls(*newargs)
got = base.replace(**{name: newval})
self.assertEqual(expected, got)
i += 1
# Ensure we can get rid of a tzinfo.
self.assertEqual(base.tzname(), "+100")
base2 = base.replace(tzinfo=None)
self.assertTrue(base2.tzinfo is None)
self.assertTrue(base2.tzname() is None)
# Ensure we can add one.
base3 = base2.replace(tzinfo=z100)
self.assertEqual(base, base3)
self.assertTrue(base.tzinfo is base3.tzinfo)
# Out of bounds.
base = cls(1)
self.assertRaises(ValueError, base.replace, hour=24)
self.assertRaises(ValueError, base.replace, minute=-1)
self.assertRaises(ValueError, base.replace, second=100)
self.assertRaises(ValueError, base.replace, microsecond=1000000)
def test_mixed_compare(self):
t1 = time(1, 2, 3)
t2 = time(1, 2, 3)
self.assertEqual(t1, t2)
t2 = t2.replace(tzinfo=None)
self.assertEqual(t1, t2)
t2 = t2.replace(tzinfo=FixedOffset(None, ""))
self.assertEqual(t1, t2)
t2 = t2.replace(tzinfo=FixedOffset(0, ""))
self.assertRaises(TypeError, lambda: t1 == t2)
# In time w/ identical tzinfo objects, utcoffset is ignored.
class Varies(tzinfo):
def __init__(self):
self.offset = timedelta(minutes=22)
def utcoffset(self, t):
self.offset += timedelta(minutes=1)
return self.offset
v = Varies()
t1 = t2.replace(tzinfo=v)
t2 = t2.replace(tzinfo=v)
self.assertEqual(t1.utcoffset(), timedelta(minutes=23))
self.assertEqual(t2.utcoffset(), timedelta(minutes=24))
self.assertEqual(t1, t2)
# But if they're not identical, it isn't ignored.
t2 = t2.replace(tzinfo=Varies())
self.assertTrue(t1 < t2) # t1's offset counter still going up
def test_subclass_timetz(self):
class C(self.theclass):
theAnswer = 42
def __new__(cls, *args, **kws):
temp = kws.copy()
extra = temp.pop('extra')
result = self.theclass.__new__(cls, *args, **temp)
result.extra = extra
return result
def newmeth(self, start):
return start + self.hour + self.second
args = 4, 5, 6, 500, FixedOffset(-300, "EST", 1)
dt1 = self.theclass(*args)
dt2 = C(*args, **{'extra': 7})
self.assertEqual(dt2.__class__, C)
self.assertEqual(dt2.theAnswer, 42)
self.assertEqual(dt2.extra, 7)
self.assertEqual(dt1.utcoffset(), dt2.utcoffset())
self.assertEqual(dt2.newmeth(-7), dt1.hour + dt1.second - 7)
# Testing datetime objects with a non-None tzinfo.
class TestDateTimeTZ(TestDateTime, TZInfoBase, unittest.TestCase):
theclass = datetime
def test_trivial(self):
dt = self.theclass(1, 2, 3, 4, 5, 6, 7)
self.assertEqual(dt.year, 1)
self.assertEqual(dt.month, 2)
self.assertEqual(dt.day, 3)
self.assertEqual(dt.hour, 4)
self.assertEqual(dt.minute, 5)
self.assertEqual(dt.second, 6)
self.assertEqual(dt.microsecond, 7)
self.assertEqual(dt.tzinfo, None)
def test_even_more_compare(self):
# The test_compare() and test_more_compare() inherited from TestDate
# and TestDateTime covered non-tzinfo cases.
# Smallest possible after UTC adjustment.
t1 = self.theclass(1, 1, 1, tzinfo=FixedOffset(1439, ""))
# Largest possible after UTC adjustment.
t2 = self.theclass(MAXYEAR, 12, 31, 23, 59, 59, 999999,
tzinfo=FixedOffset(-1439, ""))
# Make sure those compare correctly, and w/o overflow.
self.assertTrue(t1 < t2)
self.assertTrue(t1 != t2)
self.assertTrue(t2 > t1)
self.assertTrue(t1 == t1)
self.assertTrue(t2 == t2)
# Equal afer adjustment.
t1 = self.theclass(1, 12, 31, 23, 59, tzinfo=FixedOffset(1, ""))
t2 = self.theclass(2, 1, 1, 3, 13, tzinfo=FixedOffset(3*60+13+2, ""))
self.assertEqual(t1, t2)
# Change t1 not to subtract a minute, and t1 should be larger.
t1 = self.theclass(1, 12, 31, 23, 59, tzinfo=FixedOffset(0, ""))
self.assertTrue(t1 > t2)
# Change t1 to subtract 2 minutes, and t1 should be smaller.
t1 = self.theclass(1, 12, 31, 23, 59, tzinfo=FixedOffset(2, ""))
self.assertTrue(t1 < t2)
# Back to the original t1, but make seconds resolve it.
t1 = self.theclass(1, 12, 31, 23, 59, tzinfo=FixedOffset(1, ""),
second=1)
self.assertTrue(t1 > t2)
# Likewise, but make microseconds resolve it.
t1 = self.theclass(1, 12, 31, 23, 59, tzinfo=FixedOffset(1, ""),
microsecond=1)
self.assertTrue(t1 > t2)
# Make t2 naive and it should fail.
t2 = self.theclass.min
self.assertRaises(TypeError, lambda: t1 == t2)
self.assertEqual(t2, t2)
# It's also naive if it has tzinfo but tzinfo.utcoffset() is None.
class Naive(tzinfo):
def utcoffset(self, dt): return None
t2 = self.theclass(5, 6, 7, tzinfo=Naive())
self.assertRaises(TypeError, lambda: t1 == t2)
self.assertEqual(t2, t2)
# OTOH, it's OK to compare two of these mixing the two ways of being
# naive.
t1 = self.theclass(5, 6, 7)
self.assertEqual(t1, t2)
# Try a bogus uctoffset.
class Bogus(tzinfo):
def utcoffset(self, dt):
return timedelta(minutes=1440) # out of bounds
t1 = self.theclass(2, 2, 2, tzinfo=Bogus())
t2 = self.theclass(2, 2, 2, tzinfo=FixedOffset(0, ""))
self.assertRaises(ValueError, lambda: t1 == t2)
def test_pickling(self):
# Try one without a tzinfo.
args = 6, 7, 23, 20, 59, 1, 64**2
orig = self.theclass(*args)
for pickler, unpickler, proto in pickle_choices:
green = pickler.dumps(orig, proto)
derived = unpickler.loads(green)
self.assertEqual(orig, derived)
# Try one with a tzinfo.
tinfo = PicklableFixedOffset(-300, 'cookie')
orig = self.theclass(*args, **{'tzinfo': tinfo})
derived = self.theclass(1, 1, 1, tzinfo=FixedOffset(0, "", 0))
for pickler, unpickler, proto in pickle_choices:
green = pickler.dumps(orig, proto)
derived = unpickler.loads(green)
self.assertEqual(orig, derived)
self.assertIsInstance(derived.tzinfo, PicklableFixedOffset)
self.assertEqual(derived.utcoffset(), timedelta(minutes=-300))
self.assertEqual(derived.tzname(), 'cookie')
def test_extreme_hashes(self):
# If an attempt is made to hash these via subtracting the offset
# then hashing a datetime object, OverflowError results. The
# Python implementation used to blow up here.
t = self.theclass(1, 1, 1, tzinfo=FixedOffset(1439, ""))
hash(t)
t = self.theclass(MAXYEAR, 12, 31, 23, 59, 59, 999999,
tzinfo=FixedOffset(-1439, ""))
hash(t)
# OTOH, an OOB offset should blow up.
t = self.theclass(5, 5, 5, tzinfo=FixedOffset(-1440, ""))
self.assertRaises(ValueError, hash, t)
def test_zones(self):
est = FixedOffset(-300, "EST")
utc = FixedOffset(0, "UTC")
met = FixedOffset(60, "MET")
t1 = datetime(2002, 3, 19, 7, 47, tzinfo=est)
t2 = datetime(2002, 3, 19, 12, 47, tzinfo=utc)
t3 = datetime(2002, 3, 19, 13, 47, tzinfo=met)
self.assertEqual(t1.tzinfo, est)
self.assertEqual(t2.tzinfo, utc)
self.assertEqual(t3.tzinfo, met)
self.assertEqual(t1.utcoffset(), timedelta(minutes=-300))
self.assertEqual(t2.utcoffset(), timedelta(minutes=0))
self.assertEqual(t3.utcoffset(), timedelta(minutes=60))
self.assertEqual(t1.tzname(), "EST")
self.assertEqual(t2.tzname(), "UTC")
self.assertEqual(t3.tzname(), "MET")
self.assertEqual(hash(t1), hash(t2))
self.assertEqual(hash(t1), hash(t3))
self.assertEqual(hash(t2), hash(t3))
self.assertEqual(t1, t2)
self.assertEqual(t1, t3)
self.assertEqual(t2, t3)
self.assertEqual(str(t1), "2002-03-19 07:47:00-05:00")
self.assertEqual(str(t2), "2002-03-19 12:47:00+00:00")
self.assertEqual(str(t3), "2002-03-19 13:47:00+01:00")
d = 'datetime.datetime(2002, 3, 19, '
self.assertEqual(repr(t1), d + "7, 47, tzinfo=est)")
self.assertEqual(repr(t2), d + "12, 47, tzinfo=utc)")
self.assertEqual(repr(t3), d + "13, 47, tzinfo=met)")
def test_combine(self):
met = FixedOffset(60, "MET")
d = date(2002, 3, 4)
tz = time(18, 45, 3, 1234, tzinfo=met)
dt = datetime.combine(d, tz)
self.assertEqual(dt, datetime(2002, 3, 4, 18, 45, 3, 1234,
tzinfo=met))
def test_extract(self):
met = FixedOffset(60, "MET")
dt = self.theclass(2002, 3, 4, 18, 45, 3, 1234, tzinfo=met)
self.assertEqual(dt.date(), date(2002, 3, 4))
self.assertEqual(dt.time(), time(18, 45, 3, 1234))
self.assertEqual(dt.timetz(), time(18, 45, 3, 1234, tzinfo=met))
def test_tz_aware_arithmetic(self):
import random
now = self.theclass.now()
tz55 = FixedOffset(-330, "west 5:30")
timeaware = now.time().replace(tzinfo=tz55)
nowaware = self.theclass.combine(now.date(), timeaware)
self.assertTrue(nowaware.tzinfo is tz55)
self.assertEqual(nowaware.timetz(), timeaware)
# Can't mix aware and non-aware.
self.assertRaises(TypeError, lambda: now - nowaware)
self.assertRaises(TypeError, lambda: nowaware - now)
# And adding datetime's doesn't make sense, aware or not.
self.assertRaises(TypeError, lambda: now + nowaware)
self.assertRaises(TypeError, lambda: nowaware + now)
self.assertRaises(TypeError, lambda: nowaware + nowaware)
# Subtracting should yield 0.
self.assertEqual(now - now, timedelta(0))
self.assertEqual(nowaware - nowaware, timedelta(0))
# Adding a delta should preserve tzinfo.
delta = timedelta(weeks=1, minutes=12, microseconds=5678)
nowawareplus = nowaware + delta
self.assertTrue(nowaware.tzinfo is tz55)
nowawareplus2 = delta + nowaware
self.assertTrue(nowawareplus2.tzinfo is tz55)
self.assertEqual(nowawareplus, nowawareplus2)
# that - delta should be what we started with, and that - what we
# started with should be delta.
diff = nowawareplus - delta
self.assertTrue(diff.tzinfo is tz55)
self.assertEqual(nowaware, diff)
self.assertRaises(TypeError, lambda: delta - nowawareplus)
self.assertEqual(nowawareplus - nowaware, delta)
# Make up a random timezone.
tzr = FixedOffset(random.randrange(-1439, 1440), "randomtimezone")
# Attach it to nowawareplus.
nowawareplus = nowawareplus.replace(tzinfo=tzr)
self.assertTrue(nowawareplus.tzinfo is tzr)
# Make sure the difference takes the timezone adjustments into account.
got = nowaware - nowawareplus
# Expected: (nowaware base - nowaware offset) -
# (nowawareplus base - nowawareplus offset) =
# (nowaware base - nowawareplus base) +
# (nowawareplus offset - nowaware offset) =
# -delta + nowawareplus offset - nowaware offset
expected = nowawareplus.utcoffset() - nowaware.utcoffset() - delta
self.assertEqual(got, expected)
# Try max possible difference.
min = self.theclass(1, 1, 1, tzinfo=FixedOffset(1439, "min"))
max = self.theclass(MAXYEAR, 12, 31, 23, 59, 59, 999999,
tzinfo=FixedOffset(-1439, "max"))
maxdiff = max - min
self.assertEqual(maxdiff, self.theclass.max - self.theclass.min +
timedelta(minutes=2*1439))
def test_tzinfo_now(self):
meth = self.theclass.now
# Ensure it doesn't require tzinfo (i.e., that this doesn't blow up).
base = meth()
# Try with and without naming the keyword.
off42 = FixedOffset(42, "42")
another = meth(off42)
again = meth(tz=off42)
self.assertTrue(another.tzinfo is again.tzinfo)
self.assertEqual(another.utcoffset(), timedelta(minutes=42))
# Bad argument with and w/o naming the keyword.
self.assertRaises(TypeError, meth, 16)
self.assertRaises(TypeError, meth, tzinfo=16)
# Bad keyword name.
self.assertRaises(TypeError, meth, tinfo=off42)
# Too many args.
self.assertRaises(TypeError, meth, off42, off42)
# We don't know which time zone we're in, and don't have a tzinfo
# class to represent it, so seeing whether a tz argument actually
# does a conversion is tricky.
weirdtz = FixedOffset(timedelta(hours=15, minutes=58), "weirdtz", 0)
utc = FixedOffset(0, "utc", 0)
for dummy in range(3):
now = datetime.now(weirdtz)
self.assertTrue(now.tzinfo is weirdtz)
utcnow = datetime.utcnow().replace(tzinfo=utc)
now2 = utcnow.astimezone(weirdtz)
if abs(now - now2) < timedelta(seconds=30):
break
# Else the code is broken, or more than 30 seconds passed between
# calls; assuming the latter, just try again.
else:
# Three strikes and we're out.
self.fail("utcnow(), now(tz), or astimezone() may be broken")
def test_tzinfo_fromtimestamp(self):
import time
meth = self.theclass.fromtimestamp
ts = time.time()
# Ensure it doesn't require tzinfo (i.e., that this doesn't blow up).
base = meth(ts)
# Try with and without naming the keyword.
off42 = FixedOffset(42, "42")
another = meth(ts, off42)
again = meth(ts, tz=off42)
self.assertTrue(another.tzinfo is again.tzinfo)
self.assertEqual(another.utcoffset(), timedelta(minutes=42))
# Bad argument with and w/o naming the keyword.
self.assertRaises(TypeError, meth, ts, 16)
self.assertRaises(TypeError, meth, ts, tzinfo=16)
# Bad keyword name.
self.assertRaises(TypeError, meth, ts, tinfo=off42)
# Too many args.
self.assertRaises(TypeError, meth, ts, off42, off42)
# Too few args.
self.assertRaises(TypeError, meth)
# Try to make sure tz= actually does some conversion.
timestamp = 1000000000
utcdatetime = datetime.utcfromtimestamp(timestamp)
# In POSIX (epoch 1970), that's 2001-09-09 01:46:40 UTC, give or take.
# But on some flavor of Mac, it's nowhere near that. So we can't have
# any idea here what time that actually is, we can only test that
# relative changes match.
utcoffset = timedelta(hours=-15, minutes=39) # arbitrary, but not zero
tz = FixedOffset(utcoffset, "tz", 0)
expected = utcdatetime + utcoffset
got = datetime.fromtimestamp(timestamp, tz)
self.assertEqual(expected, got.replace(tzinfo=None))
def test_tzinfo_utcnow(self):
meth = self.theclass.utcnow
# Ensure it doesn't require tzinfo (i.e., that this doesn't blow up).
base = meth()
# Try with and without naming the keyword; for whatever reason,
# utcnow() doesn't accept a tzinfo argument.
off42 = FixedOffset(42, "42")
self.assertRaises(TypeError, meth, off42)
self.assertRaises(TypeError, meth, tzinfo=off42)
def test_tzinfo_utcfromtimestamp(self):
import time
meth = self.theclass.utcfromtimestamp
ts = time.time()
# Ensure it doesn't require tzinfo (i.e., that this doesn't blow up).
base = meth(ts)
# Try with and without naming the keyword; for whatever reason,
# utcfromtimestamp() doesn't accept a tzinfo argument.
off42 = FixedOffset(42, "42")
self.assertRaises(TypeError, meth, ts, off42)
self.assertRaises(TypeError, meth, ts, tzinfo=off42)
def test_tzinfo_timetuple(self):
# TestDateTime tested most of this. datetime adds a twist to the
# DST flag.
class DST(tzinfo):
def __init__(self, dstvalue):
if isinstance(dstvalue, int):
dstvalue = timedelta(minutes=dstvalue)
self.dstvalue = dstvalue
def dst(self, dt):
return self.dstvalue
cls = self.theclass
for dstvalue, flag in (-33, 1), (33, 1), (0, 0), (None, -1):
d = cls(1, 1, 1, 10, 20, 30, 40, tzinfo=DST(dstvalue))
t = d.timetuple()
self.assertEqual(1, t.tm_year)
self.assertEqual(1, t.tm_mon)
self.assertEqual(1, t.tm_mday)
self.assertEqual(10, t.tm_hour)
self.assertEqual(20, t.tm_min)
self.assertEqual(30, t.tm_sec)
self.assertEqual(0, t.tm_wday)
self.assertEqual(1, t.tm_yday)
self.assertEqual(flag, t.tm_isdst)
# dst() returns wrong type.
self.assertRaises(TypeError, cls(1, 1, 1, tzinfo=DST("x")).timetuple)
# dst() at the edge.
self.assertEqual(cls(1,1,1, tzinfo=DST(1439)).timetuple().tm_isdst, 1)
self.assertEqual(cls(1,1,1, tzinfo=DST(-1439)).timetuple().tm_isdst, 1)
# dst() out of range.
self.assertRaises(ValueError, cls(1,1,1, tzinfo=DST(1440)).timetuple)
self.assertRaises(ValueError, cls(1,1,1, tzinfo=DST(-1440)).timetuple)
def test_utctimetuple(self):
class DST(tzinfo):
def __init__(self, dstvalue):
if isinstance(dstvalue, int):
dstvalue = timedelta(minutes=dstvalue)
self.dstvalue = dstvalue
def dst(self, dt):
return self.dstvalue
cls = self.theclass
# This can't work: DST didn't implement utcoffset.
self.assertRaises(NotImplementedError,
cls(1, 1, 1, tzinfo=DST(0)).utcoffset)
class UOFS(DST):
def __init__(self, uofs, dofs=None):
DST.__init__(self, dofs)
self.uofs = timedelta(minutes=uofs)
def utcoffset(self, dt):
return self.uofs
# Ensure tm_isdst is 0 regardless of what dst() says: DST is never
# in effect for a UTC time.
for dstvalue in -33, 33, 0, None:
d = cls(1, 2, 3, 10, 20, 30, 40, tzinfo=UOFS(-53, dstvalue))
t = d.utctimetuple()
self.assertEqual(d.year, t.tm_year)
self.assertEqual(d.month, t.tm_mon)
self.assertEqual(d.day, t.tm_mday)
self.assertEqual(11, t.tm_hour) # 20mm + 53mm = 1hn + 13mm
self.assertEqual(13, t.tm_min)
self.assertEqual(d.second, t.tm_sec)
self.assertEqual(d.weekday(), t.tm_wday)
self.assertEqual(d.toordinal() - date(1, 1, 1).toordinal() + 1,
t.tm_yday)
self.assertEqual(0, t.tm_isdst)
# At the edges, UTC adjustment can normalize into years out-of-range
# for a datetime object. Ensure that a correct timetuple is
# created anyway.
tiny = cls(MINYEAR, 1, 1, 0, 0, 37, tzinfo=UOFS(1439))
# That goes back 1 minute less than a full day.
t = tiny.utctimetuple()
self.assertEqual(t.tm_year, MINYEAR-1)
self.assertEqual(t.tm_mon, 12)
self.assertEqual(t.tm_mday, 31)
self.assertEqual(t.tm_hour, 0)
self.assertEqual(t.tm_min, 1)
self.assertEqual(t.tm_sec, 37)
self.assertEqual(t.tm_yday, 366) # "year 0" is a leap year
self.assertEqual(t.tm_isdst, 0)
huge = cls(MAXYEAR, 12, 31, 23, 59, 37, 999999, tzinfo=UOFS(-1439))
# That goes forward 1 minute less than a full day.
t = huge.utctimetuple()
self.assertEqual(t.tm_year, MAXYEAR+1)
self.assertEqual(t.tm_mon, 1)
self.assertEqual(t.tm_mday, 1)
self.assertEqual(t.tm_hour, 23)
self.assertEqual(t.tm_min, 58)
self.assertEqual(t.tm_sec, 37)
self.assertEqual(t.tm_yday, 1)
self.assertEqual(t.tm_isdst, 0)
def test_tzinfo_isoformat(self):
zero = FixedOffset(0, "+00:00")
plus = FixedOffset(220, "+03:40")
minus = FixedOffset(-231, "-03:51")
unknown = FixedOffset(None, "")
cls = self.theclass
datestr = '0001-02-03'
for ofs in None, zero, plus, minus, unknown:
for us in 0, 987001:
d = cls(1, 2, 3, 4, 5, 59, us, tzinfo=ofs)
timestr = '04:05:59' + (us and '.987001' or '')
ofsstr = ofs is not None and d.tzname() or ''
tailstr = timestr + ofsstr
iso = d.isoformat()
self.assertEqual(iso, datestr + 'T' + tailstr)
self.assertEqual(iso, d.isoformat('T'))
self.assertEqual(d.isoformat('k'), datestr + 'k' + tailstr)
self.assertEqual(str(d), datestr + ' ' + tailstr)
def test_replace(self):
cls = self.theclass
z100 = FixedOffset(100, "+100")
zm200 = FixedOffset(timedelta(minutes=-200), "-200")
args = [1, 2, 3, 4, 5, 6, 7, z100]
base = cls(*args)
self.assertEqual(base, base.replace())
i = 0
for name, newval in (("year", 2),
("month", 3),
("day", 4),
("hour", 5),
("minute", 6),
("second", 7),
("microsecond", 8),
("tzinfo", zm200)):
newargs = args[:]
newargs[i] = newval
expected = cls(*newargs)
got = base.replace(**{name: newval})
self.assertEqual(expected, got)
i += 1
# Ensure we can get rid of a tzinfo.
self.assertEqual(base.tzname(), "+100")
base2 = base.replace(tzinfo=None)
self.assertTrue(base2.tzinfo is None)
self.assertTrue(base2.tzname() is None)
# Ensure we can add one.
base3 = base2.replace(tzinfo=z100)
self.assertEqual(base, base3)
self.assertTrue(base.tzinfo is base3.tzinfo)
# Out of bounds.
base = cls(2000, 2, 29)
self.assertRaises(ValueError, base.replace, year=2001)
def test_more_astimezone(self):
# The inherited test_astimezone covered some trivial and error cases.
fnone = FixedOffset(None, "None")
f44m = FixedOffset(44, "44")
fm5h = FixedOffset(-timedelta(hours=5), "m300")
dt = self.theclass.now(tz=f44m)
self.assertTrue(dt.tzinfo is f44m)
# Replacing with degenerate tzinfo raises an exception.
self.assertRaises(ValueError, dt.astimezone, fnone)
# Ditto with None tz.
self.assertRaises(TypeError, dt.astimezone, None)
# Replacing with same tzinfo makes no change.
x = dt.astimezone(dt.tzinfo)
self.assertTrue(x.tzinfo is f44m)
self.assertEqual(x.date(), dt.date())
self.assertEqual(x.time(), dt.time())
# Replacing with different tzinfo does adjust.
got = dt.astimezone(fm5h)
self.assertTrue(got.tzinfo is fm5h)
self.assertEqual(got.utcoffset(), timedelta(hours=-5))
expected = dt - dt.utcoffset() # in effect, convert to UTC
expected += fm5h.utcoffset(dt) # and from there to local time
expected = expected.replace(tzinfo=fm5h) # and attach new tzinfo
self.assertEqual(got.date(), expected.date())
self.assertEqual(got.time(), expected.time())
self.assertEqual(got.timetz(), expected.timetz())
self.assertTrue(got.tzinfo is expected.tzinfo)
self.assertEqual(got, expected)
def test_aware_subtract(self):
cls = self.theclass
# Ensure that utcoffset() is ignored when the operands have the
# same tzinfo member.
class OperandDependentOffset(tzinfo):
def utcoffset(self, t):
if t.minute < 10:
# d0 and d1 equal after adjustment
return timedelta(minutes=t.minute)
else:
# d2 off in the weeds
return timedelta(minutes=59)
base = cls(8, 9, 10, 11, 12, 13, 14, tzinfo=OperandDependentOffset())
d0 = base.replace(minute=3)
d1 = base.replace(minute=9)
d2 = base.replace(minute=11)
for x in d0, d1, d2:
for y in d0, d1, d2:
got = x - y
expected = timedelta(minutes=x.minute - y.minute)
self.assertEqual(got, expected)
# OTOH, if the tzinfo members are distinct, utcoffsets aren't
# ignored.
base = cls(8, 9, 10, 11, 12, 13, 14)
d0 = base.replace(minute=3, tzinfo=OperandDependentOffset())
d1 = base.replace(minute=9, tzinfo=OperandDependentOffset())
d2 = base.replace(minute=11, tzinfo=OperandDependentOffset())
for x in d0, d1, d2:
for y in d0, d1, d2:
got = x - y
if (x is d0 or x is d1) and (y is d0 or y is d1):
expected = timedelta(0)
elif x is y is d2:
expected = timedelta(0)
elif x is d2:
expected = timedelta(minutes=(11-59)-0)
else:
assert y is d2
expected = timedelta(minutes=0-(11-59))
self.assertEqual(got, expected)
def test_mixed_compare(self):
t1 = datetime(1, 2, 3, 4, 5, 6, 7)
t2 = datetime(1, 2, 3, 4, 5, 6, 7)
self.assertEqual(t1, t2)
t2 = t2.replace(tzinfo=None)
self.assertEqual(t1, t2)
t2 = t2.replace(tzinfo=FixedOffset(None, ""))
self.assertEqual(t1, t2)
t2 = t2.replace(tzinfo=FixedOffset(0, ""))
self.assertRaises(TypeError, lambda: t1 == t2)
# In datetime w/ identical tzinfo objects, utcoffset is ignored.
class Varies(tzinfo):
def __init__(self):
self.offset = timedelta(minutes=22)
def utcoffset(self, t):
self.offset += timedelta(minutes=1)
return self.offset
v = Varies()
t1 = t2.replace(tzinfo=v)
t2 = t2.replace(tzinfo=v)
self.assertEqual(t1.utcoffset(), timedelta(minutes=23))
self.assertEqual(t2.utcoffset(), timedelta(minutes=24))
self.assertEqual(t1, t2)
# But if they're not identical, it isn't ignored.
t2 = t2.replace(tzinfo=Varies())
self.assertTrue(t1 < t2) # t1's offset counter still going up
def test_subclass_datetimetz(self):
class C(self.theclass):
theAnswer = 42
def __new__(cls, *args, **kws):
temp = kws.copy()
extra = temp.pop('extra')
result = self.theclass.__new__(cls, *args, **temp)
result.extra = extra
return result
def newmeth(self, start):
return start + self.hour + self.year
args = 2002, 12, 31, 4, 5, 6, 500, FixedOffset(-300, "EST", 1)
dt1 = self.theclass(*args)
dt2 = C(*args, **{'extra': 7})
self.assertEqual(dt2.__class__, C)
self.assertEqual(dt2.theAnswer, 42)
self.assertEqual(dt2.extra, 7)
self.assertEqual(dt1.utcoffset(), dt2.utcoffset())
self.assertEqual(dt2.newmeth(-7), dt1.hour + dt1.year - 7)
# Pain to set up DST-aware tzinfo classes.
def first_sunday_on_or_after(dt):
days_to_go = 6 - dt.weekday()
if days_to_go:
dt += timedelta(days_to_go)
return dt
ZERO = timedelta(0)
HOUR = timedelta(hours=1)
DAY = timedelta(days=1)
# In the US, DST starts at 2am (standard time) on the first Sunday in April.
DSTSTART = datetime(1, 4, 1, 2)
# and ends at 2am (DST time; 1am standard time) on the last Sunday of Oct,
# which is the first Sunday on or after Oct 25. Because we view 1:MM as
# being standard time on that day, there is no spelling in local time of
# the last hour of DST (that's 1:MM DST, but 1:MM is taken as standard time).
DSTEND = datetime(1, 10, 25, 1)
class USTimeZone(tzinfo):
def __init__(self, hours, reprname, stdname, dstname):
self.stdoffset = timedelta(hours=hours)
self.reprname = reprname
self.stdname = stdname
self.dstname = dstname
def __repr__(self):
return self.reprname
def tzname(self, dt):
if self.dst(dt):
return self.dstname
else:
return self.stdname
def utcoffset(self, dt):
return self.stdoffset + self.dst(dt)
def dst(self, dt):
if dt is None or dt.tzinfo is None:
# An exception instead may be sensible here, in one or more of
# the cases.
return ZERO
assert dt.tzinfo is self
# Find first Sunday in April.
start = first_sunday_on_or_after(DSTSTART.replace(year=dt.year))
assert start.weekday() == 6 and start.month == 4 and start.day <= 7
# Find last Sunday in October.
end = first_sunday_on_or_after(DSTEND.replace(year=dt.year))
assert end.weekday() == 6 and end.month == 10 and end.day >= 25
# Can't compare naive to aware objects, so strip the timezone from
# dt first.
if start <= dt.replace(tzinfo=None) < end:
return HOUR
else:
return ZERO
Eastern = USTimeZone(-5, "Eastern", "EST", "EDT")
Central = USTimeZone(-6, "Central", "CST", "CDT")
Mountain = USTimeZone(-7, "Mountain", "MST", "MDT")
Pacific = USTimeZone(-8, "Pacific", "PST", "PDT")
utc_real = FixedOffset(0, "UTC", 0)
# For better test coverage, we want another flavor of UTC that's west of
# the Eastern and Pacific timezones.
utc_fake = FixedOffset(-12*60, "UTCfake", 0)
class TestTimezoneConversions(unittest.TestCase):
# The DST switch times for 2002, in std time.
dston = datetime(2002, 4, 7, 2)
dstoff = datetime(2002, 10, 27, 1)
theclass = datetime
# Check a time that's inside DST.
def checkinside(self, dt, tz, utc, dston, dstoff):
self.assertEqual(dt.dst(), HOUR)
# Conversion to our own timezone is always an identity.
self.assertEqual(dt.astimezone(tz), dt)
asutc = dt.astimezone(utc)
there_and_back = asutc.astimezone(tz)
# Conversion to UTC and back isn't always an identity here,
# because there are redundant spellings (in local time) of
# UTC time when DST begins: the clock jumps from 1:59:59
# to 3:00:00, and a local time of 2:MM:SS doesn't really
# make sense then. The classes above treat 2:MM:SS as
# daylight time then (it's "after 2am"), really an alias
# for 1:MM:SS standard time. The latter form is what
# conversion back from UTC produces.
if dt.date() == dston.date() and dt.hour == 2:
# We're in the redundant hour, and coming back from
# UTC gives the 1:MM:SS standard-time spelling.
self.assertEqual(there_and_back + HOUR, dt)
# Although during was considered to be in daylight
# time, there_and_back is not.
self.assertEqual(there_and_back.dst(), ZERO)
# They're the same times in UTC.
self.assertEqual(there_and_back.astimezone(utc),
dt.astimezone(utc))
else:
# We're not in the redundant hour.
self.assertEqual(dt, there_and_back)
# Because we have a redundant spelling when DST begins, there is
# (unfortunately) an hour when DST ends that can't be spelled at all in
# local time. When DST ends, the clock jumps from 1:59 back to 1:00
# again. The hour 1:MM DST has no spelling then: 1:MM is taken to be
# standard time. 1:MM DST == 0:MM EST, but 0:MM is taken to be
# daylight time. The hour 1:MM daylight == 0:MM standard can't be
# expressed in local time. Nevertheless, we want conversion back
# from UTC to mimic the local clock's "repeat an hour" behavior.
nexthour_utc = asutc + HOUR
nexthour_tz = nexthour_utc.astimezone(tz)
if dt.date() == dstoff.date() and dt.hour == 0:
# We're in the hour before the last DST hour. The last DST hour
# is ineffable. We want the conversion back to repeat 1:MM.
self.assertEqual(nexthour_tz, dt.replace(hour=1))
nexthour_utc += HOUR
nexthour_tz = nexthour_utc.astimezone(tz)
self.assertEqual(nexthour_tz, dt.replace(hour=1))
else:
self.assertEqual(nexthour_tz - dt, HOUR)
# Check a time that's outside DST.
def checkoutside(self, dt, tz, utc):
self.assertEqual(dt.dst(), ZERO)
# Conversion to our own timezone is always an identity.
self.assertEqual(dt.astimezone(tz), dt)
# Converting to UTC and back is an identity too.
asutc = dt.astimezone(utc)
there_and_back = asutc.astimezone(tz)
self.assertEqual(dt, there_and_back)
def convert_between_tz_and_utc(self, tz, utc):
dston = self.dston.replace(tzinfo=tz)
# Because 1:MM on the day DST ends is taken as being standard time,
# there is no spelling in tz for the last hour of daylight time.
# For purposes of the test, the last hour of DST is 0:MM, which is
# taken as being daylight time (and 1:MM is taken as being standard
# time).
dstoff = self.dstoff.replace(tzinfo=tz)
for delta in (timedelta(weeks=13),
DAY,
HOUR,
timedelta(minutes=1),
timedelta(microseconds=1)):
self.checkinside(dston, tz, utc, dston, dstoff)
for during in dston + delta, dstoff - delta:
self.checkinside(during, tz, utc, dston, dstoff)
self.checkoutside(dstoff, tz, utc)
for outside in dston - delta, dstoff + delta:
self.checkoutside(outside, tz, utc)
def test_easy(self):
# Despite the name of this test, the endcases are excruciating.
self.convert_between_tz_and_utc(Eastern, utc_real)
self.convert_between_tz_and_utc(Pacific, utc_real)
self.convert_between_tz_and_utc(Eastern, utc_fake)
self.convert_between_tz_and_utc(Pacific, utc_fake)
# The next is really dancing near the edge. It works because
# Pacific and Eastern are far enough apart that their "problem
# hours" don't overlap.
self.convert_between_tz_and_utc(Eastern, Pacific)
self.convert_between_tz_and_utc(Pacific, Eastern)
# OTOH, these fail! Don't enable them. The difficulty is that
# the edge case tests assume that every hour is representable in
# the "utc" class. This is always true for a fixed-offset tzinfo
# class (lke utc_real and utc_fake), but not for Eastern or Central.
# For these adjacent DST-aware time zones, the range of time offsets
# tested ends up creating hours in the one that aren't representable
# in the other. For the same reason, we would see failures in the
# Eastern vs Pacific tests too if we added 3*HOUR to the list of
# offset deltas in convert_between_tz_and_utc().
#
# self.convert_between_tz_and_utc(Eastern, Central) # can't work
# self.convert_between_tz_and_utc(Central, Eastern) # can't work
def test_tricky(self):
# 22:00 on day before daylight starts.
fourback = self.dston - timedelta(hours=4)
ninewest = FixedOffset(-9*60, "-0900", 0)
fourback = fourback.replace(tzinfo=ninewest)
# 22:00-0900 is 7:00 UTC == 2:00 EST == 3:00 DST. Since it's "after
# 2", we should get the 3 spelling.
# If we plug 22:00 the day before into Eastern, it "looks like std
# time", so its offset is returned as -5, and -5 - -9 = 4. Adding 4
# to 22:00 lands on 2:00, which makes no sense in local time (the
# local clock jumps from 1 to 3). The point here is to make sure we
# get the 3 spelling.
expected = self.dston.replace(hour=3)
got = fourback.astimezone(Eastern).replace(tzinfo=None)
self.assertEqual(expected, got)
# Similar, but map to 6:00 UTC == 1:00 EST == 2:00 DST. In that
# case we want the 1:00 spelling.
sixutc = self.dston.replace(hour=6, tzinfo=utc_real)
# Now 6:00 "looks like daylight", so the offset wrt Eastern is -4,
# and adding -4-0 == -4 gives the 2:00 spelling. We want the 1:00 EST
# spelling.
expected = self.dston.replace(hour=1)
got = sixutc.astimezone(Eastern).replace(tzinfo=None)
self.assertEqual(expected, got)
# Now on the day DST ends, we want "repeat an hour" behavior.
# UTC 4:MM 5:MM 6:MM 7:MM checking these
# EST 23:MM 0:MM 1:MM 2:MM
# EDT 0:MM 1:MM 2:MM 3:MM
# wall 0:MM 1:MM 1:MM 2:MM against these
for utc in utc_real, utc_fake:
for tz in Eastern, Pacific:
first_std_hour = self.dstoff - timedelta(hours=2) # 23:MM
# Convert that to UTC.
first_std_hour -= tz.utcoffset(None)
# Adjust for possibly fake UTC.
asutc = first_std_hour + utc.utcoffset(None)
# First UTC hour to convert; this is 4:00 when utc=utc_real &
# tz=Eastern.
asutcbase = asutc.replace(tzinfo=utc)
for tzhour in (0, 1, 1, 2):
expectedbase = self.dstoff.replace(hour=tzhour)
for minute in 0, 30, 59:
expected = expectedbase.replace(minute=minute)
asutc = asutcbase.replace(minute=minute)
astz = asutc.astimezone(tz)
self.assertEqual(astz.replace(tzinfo=None), expected)
asutcbase += HOUR
def test_bogus_dst(self):
class ok(tzinfo):
def utcoffset(self, dt): return HOUR
def dst(self, dt): return HOUR
now = self.theclass.now().replace(tzinfo=utc_real)
# Doesn't blow up.
now.astimezone(ok())
# Does blow up.
class notok(ok):
def dst(self, dt): return None
self.assertRaises(ValueError, now.astimezone, notok())
def test_fromutc(self):
self.assertRaises(TypeError, Eastern.fromutc) # not enough args
now = datetime.utcnow().replace(tzinfo=utc_real)
self.assertRaises(ValueError, Eastern.fromutc, now) # wrong tzinfo
now = now.replace(tzinfo=Eastern) # insert correct tzinfo
enow = Eastern.fromutc(now) # doesn't blow up
self.assertEqual(enow.tzinfo, Eastern) # has right tzinfo member
self.assertRaises(TypeError, Eastern.fromutc, now, now) # too many args
self.assertRaises(TypeError, Eastern.fromutc, date.today()) # wrong type
# Always converts UTC to standard time.
class FauxUSTimeZone(USTimeZone):
def fromutc(self, dt):
return dt + self.stdoffset
FEastern = FauxUSTimeZone(-5, "FEastern", "FEST", "FEDT")
# UTC 4:MM 5:MM 6:MM 7:MM 8:MM 9:MM
# EST 23:MM 0:MM 1:MM 2:MM 3:MM 4:MM
# EDT 0:MM 1:MM 2:MM 3:MM 4:MM 5:MM
# Check around DST start.
start = self.dston.replace(hour=4, tzinfo=Eastern)
fstart = start.replace(tzinfo=FEastern)
for wall in 23, 0, 1, 3, 4, 5:
expected = start.replace(hour=wall)
if wall == 23:
expected -= timedelta(days=1)
got = Eastern.fromutc(start)
self.assertEqual(expected, got)
expected = fstart + FEastern.stdoffset
got = FEastern.fromutc(fstart)
self.assertEqual(expected, got)
# Ensure astimezone() calls fromutc() too.
got = fstart.replace(tzinfo=utc_real).astimezone(FEastern)
self.assertEqual(expected, got)
start += HOUR
fstart += HOUR
# Check around DST end.
start = self.dstoff.replace(hour=4, tzinfo=Eastern)
fstart = start.replace(tzinfo=FEastern)
for wall in 0, 1, 1, 2, 3, 4:
expected = start.replace(hour=wall)
got = Eastern.fromutc(start)
self.assertEqual(expected, got)
expected = fstart + FEastern.stdoffset
got = FEastern.fromutc(fstart)
self.assertEqual(expected, got)
# Ensure astimezone() calls fromutc() too.
got = fstart.replace(tzinfo=utc_real).astimezone(FEastern)
self.assertEqual(expected, got)
start += HOUR
fstart += HOUR
#############################################################################
# oddballs
class Oddballs(unittest.TestCase):
def test_bug_1028306(self):
# Trying to compare a date to a datetime should act like a mixed-
# type comparison, despite that datetime is a subclass of date.
as_date = date.today()
as_datetime = datetime.combine(as_date, time())
self.assertTrue(as_date != as_datetime)
self.assertTrue(as_datetime != as_date)
self.assertTrue(not as_date == as_datetime)
self.assertTrue(not as_datetime == as_date)
self.assertRaises(TypeError, lambda: as_date < as_datetime)
self.assertRaises(TypeError, lambda: as_datetime < as_date)
self.assertRaises(TypeError, lambda: as_date <= as_datetime)
self.assertRaises(TypeError, lambda: as_datetime <= as_date)
self.assertRaises(TypeError, lambda: as_date > as_datetime)
self.assertRaises(TypeError, lambda: as_datetime > as_date)
self.assertRaises(TypeError, lambda: as_date >= as_datetime)
self.assertRaises(TypeError, lambda: as_datetime >= as_date)
# Neverthelss, comparison should work with the base-class (date)
# projection if use of a date method is forced.
self.assertTrue(as_date.__eq__(as_datetime))
different_day = (as_date.day + 1) % 20 + 1
self.assertTrue(not as_date.__eq__(as_datetime.replace(day=
different_day)))
# And date should compare with other subclasses of date. If a
# subclass wants to stop this, it's up to the subclass to do so.
date_sc = SubclassDate(as_date.year, as_date.month, as_date.day)
self.assertEqual(as_date, date_sc)
self.assertEqual(date_sc, as_date)
# Ditto for datetimes.
datetime_sc = SubclassDatetime(as_datetime.year, as_datetime.month,
as_date.day, 0, 0, 0)
self.assertEqual(as_datetime, datetime_sc)
self.assertEqual(datetime_sc, as_datetime)
def test_main():
test_support.run_unittest(__name__)
if __name__ == "__main__":
test_main()
| gpl-2.0 |
klahnakoski/TestFailures | pyLibrary/queries/containers/list_usingSQLite.py | 1 | 85283 | # encoding: utf-8
#
#
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this file,
# You can obtain one at http:# mozilla.org/MPL/2.0/.
#
# Author: Kyle Lahnakoski (kyle@lahnakoski.com)
#
from __future__ import absolute_import
from __future__ import division
from __future__ import unicode_literals
import itertools
import re
from collections import Mapping, OrderedDict
from copy import copy
from pyLibrary import convert
from pyLibrary.collections import UNION
from pyLibrary.collections.matrix import Matrix, index_to_coordinate
from pyLibrary.debugs.logs import Log
from pyLibrary.dot import listwrap, coalesce, Dict, wrap, Null, unwraplist, split_field, join_field, startswith_field, literal_field, unwrap, \
relative_field
from pyLibrary.maths import Math
from pyLibrary.maths.randoms import Random
from pyLibrary.meta import use_settings, DataClass
from pyLibrary.queries import jx
from pyLibrary.queries.containers import Container, STRUCT
from pyLibrary.queries.domains import SimpleSetDomain, DefaultDomain
from pyLibrary.queries.expressions import jx_expression, Variable, sql_type_to_json_type, TupleOp, LeavesOp
from pyLibrary.queries.meta import Column
from pyLibrary.queries.query import QueryOp
from pyLibrary.sql.sqlite import Sqlite
from pyLibrary.strings import expand_template
from pyLibrary.times.dates import Date
_containers = None
UID = "__id__" # will not be quoted
GUID = "__guid__"
ORDER = "__order__"
PARENT = "__parent__"
COLUMN = "__column"
ALL_TYPES="bns"
def late_import():
global _containers
from pyLibrary.queries import containers as _containers
_ = _containers
class Table_usingSQLite(Container):
@use_settings
def __init__(self, name, db=None, uid=GUID, exists=False, settings=None):
"""
:param name: NAME FOR THIS TABLE
:param db: THE DB TO USE
:param uid: THE UNIQUE INDEX FOR THIS TABLE
:return: HANDLE FOR TABLE IN db
"""
global _containers
Container.__init__(self, frum=None)
if db:
self.db = db
else:
self.db = db = Sqlite()
self.name = name
self.uid = listwrap(uid)
self._next_uid = 1
self._make_digits_table()
late_import()
if not _containers.config.default:
_containers.config.default = {
"type": "sqlite",
"settings": {"db": db}
}
self.uid_accessor = jx.get(self.uid)
self.nested_tables = OrderedDict() # MAP FROM NESTED PATH TO Table OBJECT, PARENTS PROCEED CHILDREN
self.nested_tables["."] = self
self.columns = {".": set()} # MAP FROM DOCUMENT ABS PROPERTY NAME TO THE SET OF SQL COLUMNS IT REPRESENTS (ONE FOR EACH REALIZED DATATYPE)
if not exists:
for u in self.uid:
if u == GUID:
if self.columns.get(u) is None:
self.columns[u] = set()
else:
c = Column(name=u, table=name, type="string", es_column=typed_column(u, "string"), es_index=name)
add_column_to_schema(self.columns, c)
command = "CREATE TABLE " + quote_table(name) + "(" + \
(",".join(
[quoted_UID + " INTEGER"] +
[_quote_column(c) + " " + sql_types[c.type] for u, cs in self.columns.items() for c in cs]
)) + \
", PRIMARY KEY (" + \
(", ".join(
[quoted_UID] +
[_quote_column(c) for u in self.uid for c in self.columns[u]]
)) + \
"))"
self.db.execute(command)
else:
# LOAD THE COLUMNS
command = "PRAGMA table_info(" + quote_table(name) + ")"
details = self.db.query(command)
for r in details:
cname = untyped_column(r[1])
ctype = r[2].lower()
column = Column(
name=cname,
table=name,
type=ctype,
es_column=typed_column(cname, ctype),
es_index=name
)
add_column_to_schema(self.columns, column)
# TODO: FOR ALL TABLES, FIND THE MAX ID
def _make_digits_table(self):
existence = self.db.query("PRAGMA table_info(__digits__)")
if not existence.data:
self.db.execute("CREATE TABLE __digits__(value INTEGER)")
self.db.execute("INSERT INTO __digits__ " + "\nUNION ALL ".join("SELECT " + unicode(i) for i in range(10)))
def next_uid(self):
try:
return self._next_uid
finally:
self._next_uid += 1
def __del__(self):
self.db.execute("DROP TABLE " + quote_table(self.name))
def add(self, doc):
self.insert([doc])
def get_leaves(self, table_name=None):
output = []
for columns_by_type in self.columns.values():
for c in columns_by_type:
if c.type in STRUCT:
continue
c = c.__copy__()
c.type = "value" # MULTI-VALUED, SO HIDE THE TYPE IN THIS GENERIC NAME
output.append(c)
break
return output
def _get_sql_schema(self, frum):
"""
:param nest: the path to the nested sub-table
:return: relative schema for the sub-table; change `es_index` to sql alias
"""
nest_path = split_field(frum)[len(split_field(self.name)):]
nest = join_field(nest_path)
# WE MUST HAVE THE ALIAS NAMES FOR THE TABLES
nest_to_alias = {
nested_path: "__" + unichr(ord('a') + i) + "__"
for i, (nested_path, sub_table) in enumerate(self.nested_tables.items())
}
def paths(field):
path = split_field(field)
for i in range(len(path)+1):
yield join_field(path[0:i])
columns = Dict()
for k in set(kk for k in self.columns.keys() for kk in paths(k)):
for j, c in ((j, cc) for j, c in self.columns.items() for cc in c):
if startswith_field(j, k):
if c.type in STRUCT:
continue
c = copy(c)
c.es_index = nest_to_alias[c.nested_path[0]]
columns[literal_field(k)] += [c]
return unwrap(columns)
def insert(self, docs):
doc_collection = self.flatten_many(docs)
self._insert(doc_collection)
def add_column(self, column):
"""
ADD COLUMN, IF IT DOES NOT EXIST ALREADY
"""
if column.name not in self.columns:
self.columns[column.name] = {column}
elif column.type not in [c.type for c in self.columns[column.name]]:
self.columns[column.name].add(column)
if column.type == "nested":
nested_table_name = join_field(split_field(self.name) + split_field(column.name))
# MAKE THE TABLE
table = Table_usingSQLite(nested_table_name, self.db, exists=False)
self.nested_tables[column.name] = table
else:
self.db.execute(
"ALTER TABLE " + quote_table(self.name) + " ADD COLUMN " + _quote_column(column) + " " + column.type
)
def __len__(self):
counter = self.db.query("SELECT COUNT(*) FROM " + quote_table(self.name))[0][0]
return counter
def __nonzero__(self):
counter = self.db.query("SELECT COUNT(*) FROM " + quote_table(self.name))[0][0]
return bool(counter)
def __getattr__(self, item):
return self.__getitem__(item)
def __getitem__(self, item):
cs = self.columns.get(item, None)
if not cs:
return [Null]
command = " UNION ALL ".join(
"SELECT " + _quote_column(c) + " FROM " + quote_table(c.es_index)
for c in cs
)
output = self.db.query(command)
return [o[0] for o in output]
def __iter__(self):
columns = [c for c, cs in self.columns.items() for c in cs if c.type not in STRUCT]
command = "SELECT " + \
",\n".join(_quote_column(c) for c in columns) + \
" FROM " + quote_table(self.name)
rows = self.db.query(command)
for r in rows:
output = Dict()
for (k, t), v in zip(columns, r):
output[k] = v
yield output
def delete(self, where):
filter = where.to_sql()
self.db.execute("DELETE FROM " + quote_table(self.name) + " WHERE " + filter)
def vars(self):
return set(self.columns.keys())
def map(self, map_):
return self
def update(self, command):
"""
:param command: EXPECTING dict WITH {"set": s, "clear": c, "where": w} FORMAT
"""
command = wrap(command)
# REJECT DEEP UPDATES
touched_columns = command.set.keys() | set(listwrap(command['clear']))
for c in self.get_leaves():
if c.name in touched_columns and c.nested_path and len(c.name) > len(c.nested_path[0]):
Log.error("Deep update not supported")
# ADD NEW COLUMNS
where = jx_expression(command.where)
_vars = where.vars()
_map = {
v: c.es_column
for v in _vars
for c in self.columns.get(v, Null)
if c.type not in STRUCT
}
where_sql = where.map(_map).to_sql()
new_columns = set(command.set.keys()) - set(self.columns.keys())
for new_column_name in new_columns:
nested_value = command.set[new_column_name]
ctype = get_type(nested_value)
column = Column(
name=new_column_name,
type=ctype,
table=self.name,
es_index=self.name,
es_column=typed_column(new_column_name, ctype)
)
self.add_column(column)
# UPDATE THE NESTED VALUES
for nested_column_name, nested_value in command.set.items():
if get_type(nested_value) == "nested":
nested_table_name = join_field(split_field(self.name)+split_field(nested_column_name))
nested_table = nested_tables[nested_column_name]
self_primary_key = ",".join(quote_table(c.es_column) for u in self.uid for c in self.columns[u])
extra_key_name = UID_PREFIX+"id"+unicode(len(self.uid))
extra_key = [e for e in nested_table.columns[extra_key_name]][0]
sql_command = "DELETE FROM " + quote_table(nested_table.name) + \
"\nWHERE EXISTS (" + \
"\nSELECT 1 " + \
"\nFROM " + quote_table(nested_table.name) + " n" + \
"\nJOIN (" + \
"\nSELECT " + self_primary_key + \
"\nFROM " + quote_table(self.name) + \
"\nWHERE " + where_sql + \
"\n) t ON " + \
" AND ".join(
"t." + quote_table(c.es_column) + " = n." + quote_table(c.es_column)
for u in self.uid
for c in self.columns[u]
) + \
")"
self.db.execute(sql_command)
# INSERT NEW RECORDS
if not nested_value:
continue
doc_collection = {}
for d in listwrap(nested_value):
nested_table.flatten(d, Dict(), doc_collection, path=nested_column_name)
prefix = "INSERT INTO " + quote_table(nested_table.name) + \
"(" + \
self_primary_key + "," + \
_quote_column(extra_key) + "," + \
",".join(
quote_table(c.es_column)
for c in doc_collection.get(".", Null).active_columns
) + ")"
# BUILD THE PARENT TABLES
parent = "\nSELECT " + \
self_primary_key + \
"\nFROM " + quote_table(self.name) + \
"\nWHERE " + jx_expression(command.where).to_sql()
# BUILD THE RECORDS
children = " UNION ALL ".join(
"\nSELECT " +
quote_value(i) + " " +quote_table(extra_key.es_column) + "," +
",".join(
quote_value(row[c.name]) + " " + quote_table(c.es_column)
for c in doc_collection.get(".", Null).active_columns
)
for i, row in enumerate(doc_collection.get(".", Null).rows)
)
sql_command = prefix + \
"\nSELECT " + \
",".join(
"p." + quote_table(c.es_column)
for u in self.uid for c in self.columns[u]
) + "," + \
"c." + _quote_column(extra_key) + "," + \
",".join(
"c." + quote_table(c.es_column)
for c in doc_collection.get(".", Null).active_columns
) + \
"\nFROM (" + parent + ") p " + \
"\nJOIN (" + children + \
"\n) c on 1=1"
self.db.execute(sql_command)
# THE CHILD COLUMNS COULD HAVE EXPANDED
# ADD COLUMNS TO SELF
for n, cs in nested_table.columns.items():
for c in cs:
column = Column(
name=c.name,
type=c.type,
table=self.name,
es_index=c.es_index,
es_column=c.es_column,
nested_path=[nested_column_name]+c.nested_path
)
if c.name not in self.columns:
self.columns[column.name] = {column}
elif c.type not in [c.type for c in self.columns[c.name]]:
self.columns[column.name].add(column)
command = "UPDATE " + quote_table(self.name) + " SET " + \
",\n".join(
[
_quote_column(c) + "=" + quote_value(get_if_type(v, c.type))
for k, v in command.set.items()
if get_type(v) != "nested"
for c in self.columns[k]
if c.type != "nested" and len(c.nested_path) == 1
] +
[
_quote_column(c) + "=NULL"
for k in listwrap(command['clear'])
if k in self.columns
for c in self.columns[k]
if c.type != "nested" and len(c.nested_path) == 1
]
) + \
" WHERE " + where_sql
self.db.execute(command)
def upsert(self, doc, where):
old_docs = self.filter(where)
if len(old_docs) == 0:
self.insert(doc)
else:
self.delete(where)
self.insert(doc)
def where(self, filter):
"""
WILL NOT PULL WHOLE OBJECT, JUST TOP-LEVEL PROPERTIES
:param filter: jx_expression filter
:return: list of objects that match
"""
select = []
column_names = []
for cname, cs in self.columns.items():
cs = [c for c in cs if c.type not in STRUCT and len(c.nested_path) == 1]
if len(cs) == 0:
continue
column_names.append(cname)
if len(cs) == 1:
select.append(quote_table(c.es_column) + " " + quote_table(c.name))
else:
select.append(
"coalesce(" +
",".join(quote_table(c.es_column) for c in cs) +
") " + quote_table(c.name)
)
result = self.db.query(
" SELECT " + "\n,".join(select) +
" FROM " + quote_table(self.name) +
" WHERE " + jx_expression(filter).to_sql()
)
return wrap([{c: v for c, v in zip(column_names, r)} for r in result.data])
def query(self, query):
"""
:param query: JSON Query Expression, SET `format="container"` TO MAKE NEW TABLE OF RESULT
:return:
"""
if not startswith_field(query['from'], self.name):
Log.error("Expecting table, or some nested table")
frum, query['from'] = query['from'], self
query = QueryOp.wrap(query, self.columns)
# TYPE CONFLICTS MUST NOW BE RESOLVED DURING
# TYPE-SPECIFIC QUERY NORMALIZATION
# vars_ = query.vars(exclude_select=True)
# type_map = {
# v: c.es_column
# for v in vars_
# if v in self.columns and len([c for c in self.columns[v] if c.type != "nested"]) == 1
# for c in self.columns[v]
# if c.type != "nested"
# }
#
# sql_query = query.map(type_map)
query = query
new_table = "temp_"+unique_name()
if query.format == "container":
create_table = "CREATE TABLE " + quote_table(new_table) + " AS "
else:
create_table = ""
if query.groupby:
op, index_to_columns = self._groupby_op(query, frum)
command = create_table + op
elif query.edges or any(a!="none" for a in listwrap(query.select).aggregate):
op, index_to_columns = self._edges_op(query, frum)
command = create_table + op
else:
op = self._set_op(query, frum)
return op
if query.sort:
command += "\nORDER BY " + ",\n".join(
s.value.to_sql() + (" DESC" if s.sort == -1 else "")
for s in query.sort
)
result = self.db.query(command)
column_names = query.edges.name + query.groupby.name + listwrap(query.select).name
if query.format == "container":
output = Table_usingSQLite(new_table, db=self.db, uid=self.uid, exists=True)
elif query.format == "cube" or (not query.format and query.edges):
if len(query.edges) == 0 and len(query.groupby) == 0:
data = {n: Dict() for n in column_names}
for s in index_to_columns.values():
data[s.push_name][s.push_child] = unwrap(s.pull(result.data[0]))
return Dict(
data=unwrap(data),
meta={"format": "cube"}
)
if not result.data:
edges = []
dims = []
for i, e in enumerate(query.edges+query.groupby):
allowNulls = coalesce(e.allowNulls, True)
if e.domain.type == "set" and e.domain.partitions:
domain = SimpleSetDomain(partitions=e.domain.partitions.name)
elif e.domain.type == "range":
domain = e.domain
elif isinstance(e.value, TupleOp):
pulls = jx.sort([c for c in index_to_columns.values() if c.push_name==e.name], "push_child").pull
parts = [tuple(p(d) for p in pulls) for d in result.data]
domain = SimpleSetDomain(partitions=jx.sort(set(parts)))
else:
domain = SimpleSetDomain(partitions=[])
dims.append(1 if allowNulls else 0)
edges.append(Dict(
name=e.name,
allowNulls=allowNulls,
domain=domain
))
zeros = [
0 if s.aggregate == "count" and index_to_columns[si].push_child == "." else Dict
for si, s in enumerate(listwrap(query.select))
]
data = {s.name: Matrix(dims=dims, zeros=zeros[si]) for si, s in enumerate(listwrap(query.select))}
if isinstance(query.select, list):
select = [{"name": s.name} for s in query.select]
else:
select = {"name": query.select.name}
return Dict(
meta={"format": "cube"},
edges=edges,
select=select,
data={k: v.cube for k, v in data.items()}
)
columns = None
edges = []
dims = []
for i, e in enumerate(query.edges+query.groupby):
allowNulls = coalesce(e.allowNulls, True)
if e.domain.type == "set" and e.domain.partitions:
domain=SimpleSetDomain(partitions=e.domain.partitions.name)
elif e.domain.type == "range":
domain = e.domain
elif isinstance(e.value, TupleOp):
pulls = jx.sort([c for c in index_to_columns.values() if c.push_name==e.name], "push_child").pull
parts = [tuple(p(d) for p in pulls) for d in result.data]
domain = SimpleSetDomain(partitions=jx.sort(set(parts)))
else:
if not columns:
columns = zip(*result.data)
parts = set(columns[i]) - {None}
domain = SimpleSetDomain(partitions=jx.sort(parts))
dims.append(len(domain.partitions)+(1 if allowNulls else 0))
edges.append(Dict(
name=e.name,
allowNulls=allowNulls,
domain=domain
))
zeros = [
0 if s.aggregate == "count" and index_to_columns[si].push_child == "." else Dict
for si, s in enumerate(listwrap(query.select))
]
data = {s.name: Matrix(dims=dims, zeros=zeros[si]) for si, s in enumerate(listwrap(query.select))}
r2c = index_to_coordinate(dims) # WORKS BECAUSE THE DATABASE SORTED THE EDGES TO CONFORM
for rownum, row in enumerate(result.data):
coord = r2c(rownum)
for i, s in enumerate(index_to_columns.values()):
if s.is_edge:
continue
if s.push_child == ".":
data[s.push_name][coord] = s.pull(row)
else:
data[s.push_name][coord][s.push_child] = s.pull(row)
if isinstance(query.select, list):
select = [{"name": s.name} for s in query.select]
else:
select = {"name": query.select.name}
return Dict(
meta={"format": "cube"},
edges=edges,
select=select,
data={k: v.cube for k, v in data.items()}
)
elif query.format == "table" or (not query.format and query.groupby):
data = []
for d in result.data:
row = [None for _ in column_names]
for s in index_to_columns.values():
if s.push_child == ".":
row[s.push_column] = s.pull(d)
elif s.num_push_columns:
tuple_value = row[s.push_column]
if tuple_value == None:
tuple_value = row[s.push_column] = [None] * s.num_push_columns
tuple_value[s.push_child] = s.pull(d)
elif row[s.push_column] == None:
row[s.push_column] = Dict()
row[s.push_column][s.push_child] = s.pull(d)
else:
row[s.push_column][s.push_child] = s.pull(d)
data.append(tuple(unwrap(r) for r in row))
output = Dict(
meta={"format": "table"},
header=column_names,
data=data
)
elif query.format == "list" or (not query.edges and not query.groupby):
if not query.edges and not query.groupby and any(listwrap(query.select).aggregate):
if isinstance(query.select, list):
data = Dict()
for c in index_to_columns.values():
if c.push_child==".":
data[c.push_name] = c.pull(result.data[0])
else:
data[c.push_name][c.push_child] = c.pull(result.data[0])
output = Dict(
meta={"format": "value"},
data=data
)
else:
data = Dict()
for s in index_to_columns.values():
data[s.push_child] = s.pull(result.data[0])
output = Dict(
meta={"format": "value"},
data=unwrap(data)
)
else:
data = []
for rownum in result.data:
row = Dict()
for c in index_to_columns.values():
if c.push_child == ".":
row[c.push_name] = c.pull(rownum)
elif c.num_push_columns:
tuple_value = row[c.push_name]
if not tuple_value:
tuple_value = row[c.push_name] = [None] * c.num_push_columns
tuple_value[c.push_child] = c.pull(rownum)
else:
row[c.push_name][c.push_child] = c.pull(rownum)
data.append(row)
output = Dict(
meta={"format": "list"},
data=data
)
else:
Log.error("unknown format {{format}}", format=query.format)
return output
def _edges_op(self, query, frum):
index_to_column = {} # MAP FROM INDEX TO COLUMN (OR SELECT CLAUSE)
outer_selects = [] # EVERY SELECT CLAUSE (NOT TO BE USED ON ALL TABLES, OF COURSE)
tables = []
base_table = split_field(frum)[0]
path = join_field(split_field(frum)[1:])
nest_to_alias = {nested_path: "__" + unichr(ord('a') + i) + "__" for i, (nested_path, sub_table) in enumerate(self.nested_tables.items())}
columns = self._get_sql_schema(frum)
tables = []
for n, a in nest_to_alias.items():
if startswith_field(path, n):
tables.append({"nest": n, "alias": a})
tables = jx.sort(tables, {"value": {"length": "nest"}})
from_sql = join_field([base_table] + split_field(tables[0].nest)) + " " + tables[0].alias
previous = tables[0]
for t in tables[1::]:
from_sql += "\nLEFT JOIN\n" + join_field([base_table] + split_field(t.nest)) + " " + t.alias + " ON " + t.alias + "." + PARENT + " = " + previous.alias + "." + GUID
# SHIFT THE COLUMN DEFINITIONS BASED ON THE NESTED QUERY DEPTH
ons = []
groupby = []
orderby = []
domains = []
select_clause = ["1 __exists__"] # USED TO DISTINGUISH BETWEEN NULL-BECAUSE-LEFT-JOIN OR NULL-BECAUSE-NULL-VALUE
for edge_index, query_edge in enumerate(query.edges):
edge_alias = "e" + unicode(edge_index)
if query_edge.value:
edge_values = [p for c in query_edge.value.to_sql(columns).sql for p in c.items()]
elif not query_edge.value and any(query_edge.domain.partitions.where):
case = "CASE "
for pp, p in enumerate(query_edge.domain.partitions):
w = p.where.to_sql(columns)[0].sql.b
t = quote_value(pp)
case += " WHEN " + w + " THEN " + t
case += " ELSE NULL END "
edge_values = [("n", case)]
elif query_edge.range:
edge_values = query_edge.range.min.to_sql(columns)[0].sql.items() + query_edge.range.max.to_sql(columns)[0].sql.items()
else:
Log.error("Do not know how to handle")
edge_names = []
for column_index, (json_type, sql) in enumerate(edge_values):
sql_name = "e"+unicode(edge_index)+"c"+unicode(column_index)
edge_names.append(sql_name)
num_sql_columns = len(index_to_column)
if not query_edge.value and any(query_edge.domain.partitions.where):
def __(parts, num_sql_columns):
def _get(row):
return parts[row[num_sql_columns]].name
return _get
pull = __(query_edge.domain.partitions, num_sql_columns)
else:
pull = get_column(num_sql_columns)
if isinstance(query_edge.value, TupleOp):
query_edge.allowNulls = False
push_child = column_index
num_push_columns=len(query_edge.value.terms)
else:
push_child = "."
num_push_columns = None
index_to_column[num_sql_columns] = Dict(
is_edge=True,
push_name=query_edge.name,
push_column=edge_index,
num_push_columns=num_push_columns,
push_child=push_child, # CAN NOT HANDLE TUPLES IN COLUMN
pull=pull,
sql=sql,
type=sql_type_to_json_type[json_type]
)
vals = [g[1] for g in edge_values]
if query_edge.domain.type == "set":
domain_name = "d"+unicode(edge_index)+"c"+unicode(column_index)
domain_names =[domain_name]
if len(edge_names) > 1:
Log.error("Do not know how to handle")
if query_edge.value:
domain = "\nUNION ALL\n".join(
"SELECT " +quote_value(coalesce(p.dataIndex, i))+" AS rownum, " + quote_value(p.value) + " AS " + domain_name
for i, p in enumerate(query_edge.domain.partitions)
)
domain += "\nUNION ALL\nSELECT "+quote_value(len(query_edge.domain.partitions))+" AS rownum, NULL AS " + domain_name
on_clause = (
" OR ".join(
edge_alias + "." + k + " = " + v
for k, (t, v) in zip(domain_names, edge_values)
) + " OR (" + (
" AND ".join(edge_alias + "." + dn + " IS NULL" for dn in domain_names) +
" AND (" + edge_values[0][1] + " IS NULL OR " + edge_values[0][1] + " NOT IN (" + ",".join(
map(quote_value, query_edge.domain.partitions.value)
) + "))"
) +
")"
)
else:
domain = "\nUNION ALL\n".join(
"SELECT " + quote_value(pp) + " AS " + domain_name for pp, p in enumerate(query_edge.domain.partitions)
)
limit = Math.min(query.limit, query_edge.domain.limit)
domain += "\nORDER BY \n" + ",\n".join("COUNT(" + g + ") DESC" for g in vals) + \
"\nLIMIT\n"+unicode(limit)
on_clause = " AND ".join(
edge_alias + "." + k + " = " + sql
for k, (t, sql) in zip(domain_names, edge_values)
)
elif query_edge.domain.type == "range":
domain_name = "d"+unicode(edge_index)+"c0"
domain_names = [domain_name] # ONLY EVER SEEN ONE DOMAIN VALUE, DOMAIN TUPLES CERTAINLY EXIST
d = query_edge.domain
if d.max == None or d.min == None or d.min == d.max:
Log.error("Invalid range: {{range|json}}", range=d)
if len(edge_names) == 1:
domain = self._make_range_domain(domain=d, column_name=domain_name)
limit = Math.min(query.limit, query_edge.domain.limit)
domain += "\nORDER BY \n" + ",\n".join("COUNT(" + g + ") DESC" for g in vals) + \
"\nLIMIT\n"+unicode(limit)
on_clause = " AND ".join(
edge_alias + "." + k + " <= " + v + " AND " + v + "< (" + edge_alias + "." + k + " + " + unicode(
d.interval) + ")"
for k, (t, v) in zip(domain_names, edge_values)
)
elif query_edge.range:
domain = self._make_range_domain(domain=d, column_name=domain_name)
limit = Math.min(query.limit, query_edge.domain.limit)
domain += "\nORDER BY \n" + ",\n".join("COUNT(" + g + ") DESC" for g in vals) + \
"\nLIMIT\n"+unicode(limit)
on_clause = edge_alias + "." + domain_name + " < " + edge_values[1][1] + " AND " + \
edge_values[0][1] + " < (" + edge_alias + "." + domain_name + " + " + unicode(d.interval) + ")"
else:
Log.error("do not know how to handle")
# select_clause.extend(v[0] + " " + k for k, v in zip(domain_names, edge_values))
elif len(edge_names) > 1:
domain_names = ["d" + unicode(edge_index) + "c" + unicode(i) for i, _ in enumerate(edge_names)]
query_edge.allowNulls = False
domain = "\nSELECT " + ",\n".join(g + " AS " + n for n, g in zip(domain_names, vals)) + \
"\nFROM\n" + quote_table(self.name) + " " + nest_to_alias["."] + \
"\nGROUP BY\n" + ",\n".join(vals)
limit = Math.min(query.limit, query_edge.domain.limit)
domain += "\nORDER BY \n" + ",\n".join("COUNT(" + g + ") DESC" for g in vals) + \
"\nLIMIT\n"+unicode(limit)
on_clause = " AND ".join(
"((" + edge_alias + "." + k + " IS NULL AND " + v + " IS NULL) OR " + edge_alias + "." + k + " = " + v + ")"
for k, v in zip(domain_names, vals)
)
elif isinstance(query_edge.domain, DefaultDomain):
domain_names = ["d"+unicode(edge_index)+"c"+unicode(i) for i, _ in enumerate(edge_names)]
domain = (
"\nSELECT " + ",".join(domain_names) + " FROM ("
"\nSELECT " + ",\n".join(g + " AS " + n for n, g in zip(domain_names, vals)) +
"\nFROM\n" + quote_table(self.name) + " " + nest_to_alias["."] +
"\nWHERE\n" + " AND ".join(g + " IS NOT NULL" for g in vals) +
"\nGROUP BY\n" + ",\n".join(g for g in vals)
)
limit = Math.min(query.limit, query_edge.domain.limit)
domain += (
"\nORDER BY \n" + ",\n".join("COUNT(" + g + ") DESC" for g in vals) +
"\nLIMIT\n" + unicode(limit) +
")"
)
domain += "\nUNION ALL SELECT " + ",\n".join("NULL AS " + dn for dn in domain_names)
on_clause = (
" OR ".join(
edge_alias + "." + k + " = " + v
for k, v in zip(domain_names, vals)
) + " OR (" + (
" AND ".join(edge_alias + "." + dn + " IS NULL" for dn in domain_names) + " AND " +
" AND ".join(v + " IS NULL" for v in vals)
) +
")"
)
else:
Log.note("not handled")
domains.append(domain)
ons.append(on_clause)
for d in domain_names:
groupby.append(edge_alias + "." + d)
for k in domain_names:
outer_selects.append(edge_alias + "." + k + " AS " + k)
orderby.append(k + " IS NULL")
orderby.append(k)
offset = len(query.edges)
for ssi, s in enumerate(listwrap(query.select)):
si = ssi+offset
if isinstance(s.value, Variable) and s.value.var == "." and s.aggregate == "count":
# COUNT RECORDS, NOT ANY ONE VALUE
sql = "COUNT(__exists__) AS " + quote_table(s.name)
column_number = len(outer_selects)
outer_selects.append(sql)
index_to_column[column_number] = Dict(
push_name=s.name,
push_column=si,
push_child=".",
pull=get_column(column_number),
sql=sql,
type=sql_type_to_json_type["n"]
)
elif s.aggregate == "percentile":
if not isinstance(s.percentile, (int, float)):
Log.error("Expecting percentile to be a float between 0 and 1")
Log.error("not implemented")
elif s.aggregate == "cardinality":
for details in s.value.to_sql(columns):
for json_type, sql in details.sql.items():
column_number = len(outer_selects)
count_sql = "COUNT(DISTINCT(" + sql + ")) AS " + _make_column_name(column_number)
outer_selects.append(count_sql)
index_to_column[column_number] = Dict(
push_name=s.name,
push_column=si,
push_child=".",
pull=get_column(column_number),
sql=count_sql,
type=sql_type_to_json_type[json_type]
)
elif s.aggregate == "union":
for details in s.value.to_sql(columns):
concat_sql = []
column_number = len(outer_selects)
for json_type, sql in details.sql.items():
concat_sql.append("GROUP_CONCAT(QUOTE(DISTINCT("+sql+")))")
if len(concat_sql)>1:
concat_sql = "CONCAT(" + ",".join(concat_sql) + ") AS " + _make_column_name(column_number)
else:
concat_sql = concat_sql[0] + " AS " + _make_column_name(column_number)
outer_selects.append(concat_sql)
index_to_column[column_number] = Dict(
push_name=s.name,
push_column=si,
push_child=".",
pull=sql_text_array_to_set(column_number),
sql=concat_sql,
type=sql_type_to_json_type[json_type]
)
elif s.aggregate == "stats": # THE STATS OBJECT
for details in s.value.to_sql(columns):
sql = details.sql["n"]
for name, code in STATS.items():
full_sql = expand_template(code, {"value": sql})
column_number = len(outer_selects)
outer_selects.append(full_sql + " AS " + _make_column_name(column_number))
index_to_column[column_number] = Dict(
push_name=s.name,
push_column=si,
push_child=name,
pull=get_column(column_number),
sql=full_sql,
type="number"
)
else: # STANDARD AGGREGATES
for details in s.value.to_sql(columns):
for sql_type, sql in details.sql.items():
column_number = len(outer_selects)
sql = sql_aggs[s.aggregate] + "(" + sql + ")"
if s.default != None:
sql = "COALESCE(" + sql + ", " + quote_value(s.default) + ")"
outer_selects.append(sql+" AS " + _make_column_name(column_number))
index_to_column[column_number] = Dict(
push_name=s.name,
push_column=si,
push_child=".", #join_field(split_field(details.name)[1::]),
pull=get_column(column_number),
sql=sql,
type=sql_type_to_json_type[sql_type]
)
for w in query.window:
outer_selects.append(self._window_op(self, query, w))
main_filter = query.where.to_sql(columns)[0].sql.b
all_parts = []
sources = [
"(" +
"\nSELECT\n" + ",\n".join(select_clause) + ",\n" + "*" +
"\nFROM " + from_sql +
"\nWHERE " + main_filter +
") " + nest_to_alias["."]
]
joins = []
join_types = []
where_clause = []
for edge_index, query_edge in enumerate(query.edges):
edge_alias = "e" + unicode(edge_index)
domain = domains[edge_index]
sources.insert(0, "(" + domain + ") "+edge_alias)
if ons:
join_types.insert(0, "LEFT JOIN")
joins.insert(0, "\nAND\n".join("(" + o + ")" for o in ons))
ons = []
else:
join_types.insert(0, "JOIN")
joins.insert(0, "1=1")
part = "SELECT " + (",\n".join(outer_selects)) + "\nFROM\n" + sources[0]
for join_type, s, j in zip(join_types, sources[1:], joins):
part += "\n"+join_type+"\n" + s + "\nON\n" + j
if where_clause:
part += "\nWHERE\n" + "\nAND\n".join(where_clause)
if groupby:
part += "\nGROUP BY\n" + ",\n".join(groupby)
all_parts.append(part)
command = "SELECT * FROM (\n"+"\nUNION ALL\n".join(all_parts)+"\n)"
if orderby:
command += "\nORDER BY\n" + ",\n".join(orderby)
return command, index_to_column
def _make_range_domain(self, domain, column_name):
width = (domain.max - domain.min) / domain.interval
digits = Math.floor(Math.log10(width-1))
if digits == 0:
value = "a.value"
else:
value = "+".join("1" + ("0" * j) + "*" + unicode(chr(ord(b'a') + j)) + ".value" for j in range(digits + 1))
if domain.interval == 1:
if domain.min == 0:
domain = "SELECT " + value + " " + column_name + \
"\nFROM __digits__ a"
else:
domain = "SELECT (" + value + ") - " + quote_value(domain.min) + " " + column_name + \
"\nFROM __digits__ a"
else:
if domain.min == 0:
domain = "SELECT " + value + " * " + unicode(domain.interval) + " " + column_name + \
"\nFROM __digits__ a"
else:
domain = "SELECT (" + value + " * " + unicode(domain.interval) + ") - " + quote_value(
domain.min) + " " + column_name + \
"\nFROM __digits__ a"
for j in range(digits):
domain += "\nJOIN __digits__ " + unicode(chr(ord(b'a') + j + 1)) + " ON 1=1"
domain += "\nWHERE " + value + " < " + quote_value(width)
return domain
def _groupby_op(self, query, frum):
columns = self._get_sql_schema(frum)
index_to_column={}
nest_to_alias = {nested_path: "__" + unichr(ord('a') + i) + "__" for i, (nested_path, sub_table) in enumerate(self.nested_tables.items())}
selects = []
groupby = []
for i, e in enumerate(query.groupby):
column_number = len(selects)
sql_type, sql = e.value.to_sql(columns)[0].sql.items()[0]
groupby.append(sql)
selects.append(sql+" AS "+e.name)
index_to_column[column_number] = Dict(
push_name=e.name,
push_column=column_number,
push_child=".",
pull=get_column(column_number),
sql=sql,
type=sql_type_to_json_type[sql_type]
)
for s in listwrap(query.select):
column_number = len(selects)
sql_type, sql =s.value.to_sql(columns)[0].sql.items()[0]
if s.value == "." and s.aggregate == "count":
selects.append("COUNT(1) AS " + quote_table(s.name))
else:
selects.append(sql_aggs[s.aggregate] + "(" + sql + ") AS " + quote_table(s.name))
index_to_column[column_number] = Dict(
push_name=s.name,
push_column=column_number,
push_child=".",
pull=get_column(column_number),
sql=sql,
type=sql_type_to_json_type[sql_type]
)
for w in query.window:
selects.append(self._window_op(self, query, w))
where = query.where.to_sql(columns)[0].sql.b
command = "SELECT\n" + (",\n".join(selects)) + \
"\nFROM\n" + quote_table(self.name) + " " + nest_to_alias["."] + \
"\nWHERE\n" + where + \
"\nGROUP BY\n" + ",\n".join(groupby)
return command, index_to_column
def _set_op(self, query, frum):
# GET LIST OF COLUMNS
primary_nested_path = join_field(split_field(frum)[1:])
vars_ = UNION([s.value.vars() for s in listwrap(query.select)])
nest_to_alias = {nested_path: "__" + unichr(ord('a') + i) + "__" for i, (nested_path, sub_table) in enumerate(self.nested_tables.items())}
# columns = self._get_sql_schema(frum)
active_columns = {}
for cname, cols in self.columns.items():
if any(startswith_field(cname, v) for v in vars_):
for c in cols:
if c.type in STRUCT:
continue
nest = c.nested_path[0]
active = active_columns.get(nest)
if not active:
active = active_columns[nest] = []
active.append(c)
# EVERY COLUMN, AND THE INDEX IT TAKES UP
index_to_column = {} # MAP FROM INDEX TO COLUMN (OR SELECT CLAUSE)
index_to_uid = {} # FROM NESTED PATH TO THE INDEX OF UID
sql_selects = [] # EVERY SELECT CLAUSE (NOT TO BE USED ON ALL TABLES, OF COURSE)
nest_to_alias = {nested_path: "__" + unichr(ord('a') + i) + "__" for i, (nested_path, sub_table) in enumerate(self.nested_tables.items())}
# WE MUST HAVE THE ALIAS NAMES FOR THE TABLES
def copy_cols(cols):
output = set()
for c in cols:
c = copy(c)
c.es_index = nest_to_alias[c.nested_path[0]]
output.add(c)
return output
columns = {k: copy_cols(v) for k, v in self.columns.items()}
sorts = []
if query.sort:
for s in query.sort:
col = s.value.to_sql(columns)[0]
for t, sql in col.sql.items():
json_type = sql_type_to_json_type[t]
if json_type in STRUCT:
continue
column_number = len(sql_selects)
# SQL HAS ABS TABLE REFERENCE
column_alias = _make_column_name(column_number)
sql_selects.append(sql + " AS " + column_alias)
if s.sort == -1:
sorts.append(column_alias+" IS NOT NULL")
sorts.append(column_alias+" DESC")
else:
sorts.append(column_alias+" IS NULL")
sorts.append(column_alias)
primary_doc_details = Dict()
# EVERY SELECT STATEMENT THAT WILL BE REQUIRED, NO MATTER THE DEPTH
# WE WILL CREATE THEM ACCORDING TO THE DEPTH REQUIRED
for nested_path, sub_table in self.nested_tables.items():
nested_doc_details = {
"sub_table":sub_table,
"children": [],
"index_to_column": {},
"nested_path": [nested_path] # fake the real nested path, we only look at [0] anyway
}
# INSERT INTO TREE
if not primary_doc_details:
primary_doc_details = nested_doc_details
else:
def place(parent_doc_details):
if startswith_field(nested_path, parent_doc_details['nested_path'][0]):
for c in parent_doc_details['children']:
if place(c):
return True
parent_doc_details['children'].append(nested_doc_details)
place(primary_doc_details)
alias = nested_doc_details['alias'] = nest_to_alias[nested_path]
# WE ALWAYS ADD THE UID AND ORDER
column_number = index_to_uid[nested_path] = nested_doc_details['id_coord'] = len(sql_selects)
column_alias = _make_column_name(column_number)
sql_selects.append(alias + "." + quoted_UID + " AS " +column_alias)
if nested_path != ".":
column_alias = _make_column_name(column_number + 1)
sql_selects.append(alias + "." + quote_table(ORDER) + " AS " + column_alias)
# ALWAYS ADD SORTS
# WE DO NOT NEED DATA FROM TABLES WE REQUEST NOTHING FROM
if nested_path not in active_columns:
continue
if primary_nested_path == nested_path:
# ADD SQL SELECT COLUMNS FOR EACH jx SELECT CLAUSE
si = 0
for s in listwrap(query.select):
try:
column_number = len(sql_selects)
s.pull = get_column(column_number)
db_columns = s.value.to_sql(columns)
if isinstance(s.value, LeavesOp):
for column in db_columns:
for t, unsorted_sql in column.sql.items():
json_type = sql_type_to_json_type[t]
if json_type in STRUCT:
continue
column_number = len(sql_selects)
# SQL HAS ABS TABLE REFERENCE
column_alias = _make_column_name(column_number)
sql_selects.append(unsorted_sql + " AS " + column_alias)
index_to_column[column_number] = nested_doc_details['index_to_column'][column_number] = Dict(
push_name=join_field(split_field(s.name)+split_field(column.name)),
push_column=si,
push_child=".",
pull=get_column(column_number),
sql=unsorted_sql,
type=json_type,
nested_path=[nested_path] # fake the real nested path, we only look at [0] anyway
)
si += 1
else:
for column in db_columns:
for t, unsorted_sql in column.sql.items():
json_type = sql_type_to_json_type[t]
if json_type in STRUCT:
continue
column_number = len(sql_selects)
# SQL HAS ABS TABLE REFERENCE
column_alias = _make_column_name(column_number)
sql_selects.append(unsorted_sql + " AS " + column_alias)
index_to_column[column_number] = nested_doc_details['index_to_column'][column_number] = Dict(
push_name=s.name,
push_column=si,
push_child=column.name,
pull=get_column(column_number),
sql=unsorted_sql,
type=json_type,
nested_path=[nested_path] # fake the real nested path, we only look at [0] anyway
)
finally:
si += 1
elif startswith_field(nested_path, primary_nested_path):
# ADD REQUIRED COLUMNS, FOR DEEP STUFF
for ci, c in enumerate(active_columns[nested_path]):
if c.type in STRUCT:
continue
column_number = len(sql_selects)
nested_path = c.nested_path
unsorted_sql = nest_to_alias[nested_path[0]] + "." + quote_table(c.es_column)
column_alias = _make_column_name(column_number)
sql_selects.append(unsorted_sql + " AS " + column_alias)
index_to_column[column_number] = nested_doc_details['index_to_column'][column_number] = Dict(
push_name=c.name,
push_column=ci,
push_child=".",
pull=get_column(column_number),
sql=unsorted_sql,
type=c.type,
nested_path=nested_path
)
where_clause = query.where.to_sql(columns, boolean=True)[0].sql.b
unsorted_sql = self._make_sql_for_one_nest_in_set_op(
".",
sql_selects,
where_clause,
active_columns,
index_to_column
)
for n, _ in self.nested_tables.items():
sorts.append(COLUMN+unicode(index_to_uid[n]))
ordered_sql = (
"SELECT * FROM (\n" +
unsorted_sql +
"\n)" +
"\nORDER BY\n" + ",\n".join(sorts) +
"\nLIMIT\n" + quote_value(query.limit)
)
result = self.db.query(ordered_sql)
def _accumulate_nested(rows, row, nested_doc_details, parent_doc_id, parent_id_coord):
"""
:param rows: REVERSED STACK OF ROWS (WITH push() AND pop())
:param row: CURRENT ROW BEING EXTRACTED
:param nested_doc_details: {
"nested_path": wrap_nested_path(nested_path),
"index_to_column": map from column number to column details
"children": all possible direct decedents' nested_doc_details
}
:param parent_doc_id: the id of the parent doc (for detecting when to step out of loop)
:param parent_id_coord: the column number for the parent id (so we ca extract from each row)
:return: the nested property (usually an array)
"""
previous_doc_id = None
doc = Dict()
output = []
id_coord = nested_doc_details['id_coord']
while True:
doc_id = row[id_coord]
if doc_id == None or (parent_id_coord is not None and row[parent_id_coord] != parent_doc_id):
rows.append(row) # UNDO
output = unwraplist(output)
return output if output else None
if doc_id != previous_doc_id:
previous_doc_id = doc_id
doc = Dict()
curr_nested_path = nested_doc_details['nested_path'][0]
if isinstance(query.select, list) or isinstance(query.select.value, LeavesOp):
# ASSIGN INNER PROPERTIES
for i, c in nested_doc_details['index_to_column'].items():
value = row[i]
if value == None:
continue
if value == '':
continue
relative_path = relative_field(join_field(split_field(c.push_name)+[c.push_child]), curr_nested_path)
if relative_path == ".":
doc = value
else:
doc[relative_path] = value
else:
# ASSIGN INNER PROPERTIES
for i, c in nested_doc_details['index_to_column'].items():
value = row[i]
if value is not None:
relative_path = relative_field(c.push_child, curr_nested_path)
if relative_path == ".":
doc = value
else:
doc[relative_path] = value
output.append(doc)
# ASSIGN NESTED ARRAYS
for child_details in nested_doc_details['children']:
child_id = row[child_details['id_coord']]
if child_id is not None:
nested_value = _accumulate_nested(rows, row, child_details, doc_id, id_coord)
if nested_value is not None:
path = child_details['nested_path'][0]
doc[path] = nested_value
try:
row = rows.pop()
except IndexError:
output = unwraplist(output)
return output if output else None
cols = tuple(index_to_column.values())
if query.format == "cube":
num_rows = len(result.data)
num_cols = Math.MAX([c.push_column for c in cols])+1
map_index_to_name = {c.push_column: c.push_name for c in cols}
temp_data = [[None]*num_rows for _ in range(num_cols)]
for rownum, d in enumerate(result.data):
for c in cols:
if c.push_child == ".":
temp_data[c.push_column][rownum]=c.pull(d)
else:
column = temp_data[c.push_column][rownum]
if column is None:
column = temp_data[c.push_column][rownum] = {}
Dict(column)[c.push_child] = c.pull(d)
output = Dict(
meta={"format": "cube"},
data={n: temp_data[c] for c, n in map_index_to_name.items()},
edges=[{
"name": "rownum",
"domain": {
"type": "rownum",
"min": 0,
"max": num_rows,
"interval": 1
}
}]
)
return output
elif query.format=="table":
num_column = Math.MAX([c.push_column for c in cols])+1
header = [None]*num_column
for c in cols:
header[c.push_column]=c.push_name
output_data = []
for d in result.data:
row = [None]*num_column
for c in cols:
set_column(row, c.push_column, c.push_child, c.pull(d))
output_data.append(row)
return Dict(
meta={"format": "table"},
header=header,
data=output_data
)
else:
rows = list(reversed(unwrap(result.data)))
row = rows.pop()
output = Dict(
meta={"format": "list"},
data=listwrap(_accumulate_nested(rows, row, primary_doc_details, None, None))
)
return output
def _make_sql_for_one_nest_in_set_op(
self,
primary_nested_path,
selects, # EVERY SELECT CLAUSE (NOT TO BE USED ON ALL TABLES, OF COURSE
where_clause,
active_columns,
index_to_sql_select # MAP FROM INDEX TO COLUMN (OR SELECT CLAUSE)
):
"""
FOR EACH NESTED LEVEL, WE MAKE A QUERY THAT PULLS THE VALUES/COLUMNS REQUIRED
WE `UNION ALL` THEM WHEN DONE
:param primary_nested_path:
:param selects:
:param where_clause:
:param active_columns:
:param index_to_sql_select:
:return: SQL FOR ONE NESTED LEVEL
"""
parent_alias = "a"
from_clause = ""
select_clause = []
children_sql = []
done = []
# STATEMENT FOR EACH NESTED PATH
for i, (nested_path, sub_table) in enumerate(self.nested_tables.items()):
if any(startswith_field(nested_path, d) for d in done):
continue
alias = "__" + unichr(ord('a') + i) + "__"
if primary_nested_path == nested_path:
select_clause = []
# ADD SELECT CLAUSE HERE
for select_index, s in enumerate(selects):
sql_select = index_to_sql_select.get(select_index)
if not sql_select:
select_clause.append(s)
continue
if startswith_field(sql_select.nested_path[0], nested_path):
select_clause.append(sql_select.sql + " AS " + _make_column_name(select_index))
else:
# DO NOT INCLUDE DEEP STUFF AT THIS LEVEL
select_clause.append("NULL AS " + _make_column_name(select_index))
if nested_path == ".":
from_clause += "\nFROM "+quote_table(self.name) + " " + alias + "\n"
else:
from_clause += "\nLEFT JOIN " + quote_table(sub_table.name) + " " + alias + "\n" \
" ON " + alias + "." + quoted_PARENT + " = " + parent_alias + "." + quoted_UID+"\n"
where_clause = "("+where_clause+") AND " + alias + "." + quote_table(ORDER) + " > 0\n"
elif startswith_field(primary_nested_path, nested_path):
# PARENT TABLE
# NO NEED TO INCLUDE COLUMNS, BUT WILL INCLUDE ID AND ORDER
if nested_path == ".":
from_clause += "\nFROM "+quote_table(self.name) + " " + alias + "\n"
else:
parent_alias = alias = unichr(ord('a') + i - 1)
from_clause += "\nLEFT JOIN " + quote_table(sub_table.name) + " " + alias + \
" ON " + alias + "." + quoted_PARENT + " = " + parent_alias + "." + quoted_UID
where_clause = "("+where_clause+") AND " + parent_alias + "." + quote_table(ORDER) + " > 0\n"
elif startswith_field(nested_path, primary_nested_path):
# CHILD TABLE
# GET FIRST ROW FOR EACH NESTED TABLE
from_clause += "\nLEFT JOIN " + quote_table(sub_table.name) + " " + alias + \
" ON " + alias + "." + quoted_PARENT + " = " + parent_alias + "." + quoted_UID + \
" AND " + alias + "." + quote_table(ORDER) + " = 0\n"
# IMMEDIATE CHILDREN ONLY
done.append(nested_path)
# NESTED TABLES WILL USE RECURSION
children_sql.append(self._make_sql_for_one_nest_in_set_op(
nested_path,
selects, # EVERY SELECT CLAUSE (NOT TO BE USED ON ALL TABLES, OF COURSE
where_clause,
active_columns,
index_to_sql_select # MAP FROM INDEX TO COLUMN (OR SELECT CLAUSE)
))
else:
# SIBLING PATHS ARE IGNORED
continue
parent_alias = alias
sql = "\nUNION ALL\n".join(
["SELECT\n" + ",\n".join(select_clause) + from_clause + "\nWHERE\n" + where_clause] +
children_sql
)
return sql
def _window_op(self, query, window):
# http://www2.sqlite.org/cvstrac/wiki?p=UnsupportedSqlAnalyticalFunctions
if window.value == "rownum":
return "ROW_NUMBER()-1 OVER (" + \
" PARTITION BY " + (", ".join(window.edges.values)) + \
" ORDER BY " + (", ".join(window.edges.sort)) + \
") AS " + quote_table(window.name)
range_min = unicode(coalesce(window.range.min, "UNBOUNDED"))
range_max = unicode(coalesce(window.range.max, "UNBOUNDED"))
return sql_aggs[window.aggregate] + "(" + window.value.to_sql() + ") OVER (" + \
" PARTITION BY " + (", ".join(window.edges.values)) + \
" ORDER BY " + (", ".join(window.edges.sort)) + \
" ROWS BETWEEN " + range_min + " PRECEDING AND " + range_max + " FOLLOWING " + \
") AS " + quote_table(window.name)
def _normalize_select(self, select):
output = []
if select.value == ".":
for cname, cs in self.columns.items():
for c in cs:
if c.type in STRUCT:
continue
new_select = select.copy()
new_select.name = cname
new_select.value = Variable(cname)
output.append(new_select)
break
elif select.value.endswith(".*"):
Log.error("not done")
else:
Log.error("not done")
return output
def change_schema(self, required_changes):
required_changes = wrap(required_changes)
for required_change in required_changes:
if required_change.add:
column = required_change.add
if column.type == "nested":
# WE ARE ALSO NESTING
self._nest_column(column, column.name)
table = join_field([self.name] + split_field(column.nested_path[0]))
self.db.execute(
"ALTER TABLE " + quote_table(table) + " ADD COLUMN " + _quote_column(column) + " " + sql_types[column.type]
)
cols = self.columns.get(column.name)
if cols is None:
cols = self.columns[column.name] = set()
cols.add(column)
elif required_change.nest:
column = required_change.nest
new_path = required_change.new_path
self._nest_column(column, new_path)
# REMOVE KNOWLEDGE OF PARENT COLUMNS (DONE AUTOMATICALLY)
# TODO: DELETE PARENT COLUMNS?
def _nest_column(self, column, new_path):
destination_table = join_field([self.name] + split_field(new_path))
existing_table = join_field([self.name] + split_field(column.nested_path[0]))
# FIND THE INNER COLUMNS WE WILL BE MOVING
new_columns = {}
for cname, cols in self.columns.items():
if startswith_field(cname, column.name):
new_columns[cname] = set()
for col in cols:
new_columns[cname].add(col)
col.nested_path = [new_path] + col.nested_path
# TODO: IF THERE ARE CHILD TABLES, WE MUST UPDATE THEIR RELATIONS TOO?
# DEFINE A NEW TABLE?
# LOAD THE COLUMNS
command = "PRAGMA table_info(" + quote_table(destination_table) + ")"
details = self.db.query(command)
if details.data:
raise Log.error("not expected, new nesting!")
self.nested_tables[new_path] = sub_table = Table_usingSQLite(destination_table, self.db, exists=False)
self.db.execute(
"ALTER TABLE " + quote_table(sub_table.name) + " ADD COLUMN " + quoted_PARENT + " INTEGER"
)
self.db.execute(
"ALTER TABLE " + quote_table(sub_table.name) + " ADD COLUMN " + quote_table(ORDER) + " INTEGER"
)
for cname, cols in new_columns.items():
for c in cols:
sub_table.add_column(c)
# TEST IF THERE IS ANY DATA IN THE NEW NESTED ARRAY
all_cols = [c for _, cols in sub_table.columns.items() for c in cols]
if not all_cols:
has_nested_data = "0"
elif len(all_cols) == 1:
has_nested_data = _quote_column(all_cols[0]) + " is NOT NULL"
else:
has_nested_data = "COALESCE(" + \
",".join(_quote_column(c) for c in all_cols) + \
") IS NOT NULL"
# FILL TABLE WITH EXISTING COLUMN DATA
command = "INSERT INTO " + quote_table(destination_table) + "(\n" + \
",\n".join(
[quoted_UID, quoted_PARENT, quote_table(ORDER)] +
[_quote_column(c) for _, cols in sub_table.columns.items() for c in cols]
) + \
"\n)\n" + \
"\nSELECT\n" + ",".join(
[quoted_UID, quoted_UID, "0"] +
[_quote_column(c) for _, cols in sub_table.columns.items() for c in cols]
) + \
"\nFROM\n" + quote_table(existing_table) + \
"\nWHERE\n" + has_nested_data
self.db.execute(command)
def flatten_many(self, docs, path="."):
"""
:param doc: THE JSON DOCUMENT
:param path: FULL PATH TO THIS (INNER/NESTED) DOCUMENT
:return: TUPLE (success, command, doc_collection) WHERE
success: BOOLEAN INDICATING PROPER PARSING
command: SCHEMA CHANGES REQUIRED TO BE SUCCESSFUL NEXT TIME
doc_collection: MAP FROM NESTED PATH TO INSERTION PARAMETERS:
{"active_columns": list, "rows": list of objects}
"""
# TODO: COMMAND TO ADD COLUMNS
# TODO: COMMAND TO NEST EXISTING COLUMNS
# COLLECT AS MANY doc THAT DO NOT REQUIRE SCHEMA CHANGE
required_changes = []
_insertion = Dict(
active_columns=set(),
rows=[]
)
doc_collection = {".": _insertion}
nested_tables = copy(self.nested_tables) # KEEP TRACK OF WHAT TABLE WILL BE MADE (SHORTLY)
columns = copy(self.columns)
def _flatten(data, uid, parent_id, order, full_path, nested_path, row=None):
"""
:param data: the data we are pulling apart
:param uid: the uid we are giving this doc
:param parent_id: the parent id of this (sub)doc
:param order: the number of siblings before this one
:param full_path: path to this (sub)doc
:param nested_path: list of paths, deepest first
:param row: we will be filling this
:return:
"""
insertion = doc_collection[nested_path[0]]
if not row:
row = {UID: uid, PARENT: parent_id, ORDER: order}
insertion.rows.append(row)
if isinstance(data, Mapping):
for k, v in data.items():
cname = join_field(split_field(full_path) + [k])
value_type = get_type(v)
if value_type is None:
continue
if value_type in STRUCT:
c = unwraplist(
[cc for cc in columns.get(cname, Null) if cc.type in STRUCT]
)
else:
c = unwraplist(
[cc for cc in columns.get(cname, Null) if cc.type == value_type]
)
if not c:
# WHAT IS THE NESTING LEVEL FOR THIS PATH?
deeper_nested_path = "."
for path, _ in nested_tables.items():
if startswith_field(cname, path) and len(deeper_nested_path) < len(path):
deeper_nested_path = path
if deeper_nested_path != nested_path[0]:
# I HIGHLY SUSPECT, THROUGH CALLING _flatten() AGAIN THE REST OF THIS BLOCK IS NOT NEEDED
nested_column = unwraplist(
[cc for cc in columns.get(deeper_nested_path, Null) if cc.type in STRUCT]
)
insertion.active_columns.add(nested_column)
row[nested_column.es_column] = "."
nested_path = [deeper_nested_path] + nested_path
insertion = doc_collection.get(nested_path[0], None)
if not insertion:
insertion = doc_collection[nested_path[0]] = Dict(
active_columns=set(),
rows=[]
)
uid, parent_id, order = self.next_uid(), uid, 0
row = {UID: uid, PARENT: parent_id, ORDER: order}
insertion.rows.append(row)
c = Column(
name=cname,
table=self.name,
type=value_type,
es_column=typed_column(cname, value_type),
es_index=self.name, # THIS MAY BE THE WRONG TABLE, IF THIS PATH IS A NESTED DOC
nested_path=nested_path
)
add_column_to_schema(columns, c)
if value_type == "nested":
nested_tables[cname] = "fake table"
required_changes.append({"add": c})
# INSIDE IF BLOCK BECAUSE WE DO NOT WANT IT TO ADD WHAT WE columns.get() ALREADY
insertion.active_columns.add(c)
# BE SURE TO NEST VALUES, IF NEEDED
if value_type == "nested":
row[c.es_column] = "."
deeper_nested_path = [cname] + nested_path
insertion = doc_collection.get(cname, None)
if not insertion:
insertion = doc_collection[cname] = Dict(
active_columns=set(),
rows=[]
)
for i, r in enumerate(v):
child_uid = self.next_uid()
_flatten(r, child_uid, uid, i, cname, deeper_nested_path)
elif value_type == "object":
row[c.es_column] = "."
_flatten(v, uid, parent_id, order, cname, nested_path, row=row)
elif c.type:
row[c.es_column] = v
else:
k = "."
v = data
cname = join_field(split_field(full_path) + [k])
value_type = get_type(v)
if value_type is None:
return
if value_type in STRUCT:
c = unwraplist([c for c in columns.get(cname, Null) if c.type in STRUCT])
else:
try:
c = unwraplist([c for c in columns.get(cname, Null) if c.type == value_type])
except Exception, e:
Log.error("not expected", cause=e)
if not c:
c = Column(
name=cname,
table=self.name,
type=value_type,
es_column=typed_column(cname, value_type),
es_index=self.name,
nested_path=nested_path
)
add_column_to_schema(columns, c)
if value_type == "nested":
nested_tables[cname] = "fake table"
required_changes.append({"add": c})
insertion.active_columns.add(c)
if value_type == "nested":
if c.type == "object":
# WE CAN FIX THIS,
Log.error("fix this")
row[c.es_column] = "."
deeper_nested_path = [cname] + nested_path
insertion = doc_collection.get(cname, None)
if not insertion:
doc_collection[cname] = Dict(
active_columns=set(),
rows=[]
)
for i, r in enumerate(v):
child_uid = self.next_uid()
_flatten(r, child_uid, uid, i, cname, deeper_nested_path)
elif value_type == "object":
if c.type == "nested":
# MOVE TO SINGLE-VALUED LIST
child_uid = self.next_uid()
row[c.es_column] = "."
deeper_nested_path = [cname] + nested_path
_flatten(v, child_uid, uid, 0, cname, deeper_nested_path)
else:
row[c.es_column] = "."
_flatten(v, uid, parent_id, order, nested_path, row=row)
elif c.type:
row[c.es_column] = v
for doc in docs:
_flatten(doc, self.next_uid(), 0, 0, full_path=path, nested_path=["."])
if required_changes:
self.change_schema(required_changes)
required_changes = []
return doc_collection
def _insert(self, collection):
for nested_path, details in collection.items():
active_columns = wrap(list(details.active_columns))
rows = details.rows
table_name = join_field(split_field(self.name)+split_field(nested_path))
if table_name == self.name:
# DO NOT REQUIRE PARENT OR ORDER COLUMNS
meta_columns = [UID]
else:
meta_columns = [UID, PARENT, ORDER]
all_columns = meta_columns + active_columns.es_column
prefix = "INSERT INTO " + quote_table(table_name) + \
"(" + ",".join(map(quote_table, all_columns)) + ")"
# BUILD THE RECORDS
records = " UNION ALL ".join(
"\nSELECT " + ",".join(quote_value(row.get(c)) for c in all_columns)
for row in unwrap(rows)
)
self.db.execute(prefix + records)
def add_column_to_schema(schema, column):
columns = schema.get(column.name)
if not columns:
columns = schema[column.name] = set()
for var_name, db_columns in schema.items():
if startswith_field(column.name, var_name):
db_columns.add(column)
if startswith_field(var_name, column.name):
columns.add(column)
_do_not_quote = re.compile(r"^\w+$", re.UNICODE)
def quote_table(column):
if _do_not_quote.match(column):
return column
return convert.string2quote(column)
def _quote_column(column):
return convert.string2quote(column.es_column)
def quote_value(value):
if isinstance(value, (Mapping, list)):
return "."
elif isinstance(value, basestring):
return "'" + value.replace("'", "''") + "'"
elif value == None:
return "NULL"
elif value is True:
return "1"
elif value is False:
return "0"
else:
return unicode(value)
def unique_name():
return Random.string(20)
def column_key(k, v):
if v == None:
return None
elif isinstance(v, basestring):
return k, "string"
elif isinstance(v, list):
return k, None
elif isinstance(v, Mapping):
return k, "object"
elif isinstance(v, Date):
return k, "number"
else:
return k, "number"
def get_type(v):
if v == None:
return None
elif isinstance(v, basestring):
return "string"
elif isinstance(v, Mapping):
return "object"
elif isinstance(v, (int, float, Date)):
return "number"
elif isinstance(v, list):
return "nested"
return None
def get_document_value(document, column):
"""
RETURN DOCUMENT VALUE IF MATCHES THE column (name, type)
:param document: THE DOCUMENT
:param column: A (name, type) PAIR
:return: VALUE, IF IT IS THE SAME NAME AND TYPE
"""
v = document.get(split_field(column.name)[0], None)
return get_if_type(v, column.type)
def get_if_type(value, type):
if is_type(value, type):
if type == "object":
return "."
if isinstance(value, Date):
return value.unix
return value
return None
def is_type(value, type):
if value == None:
return False
elif isinstance(value, basestring) and type == "string":
return value
elif isinstance(value, list):
return False
elif isinstance(value, Mapping) and type == "object":
return True
elif isinstance(value, (int, float, Date)) and type == "number":
return True
return False
def typed_column(name, type_):
if type_ == "nested":
type_ = "object"
return join_field(split_field(name) + ["$" + type_])
def untyped_column(column_name):
if "$" in column_name:
return join_field(split_field(column_name)[:-1])
else:
return column_name
# return column_name.split(".$")[0]
def _make_column_name(number):
return COLUMN+unicode(number)
sql_aggs = {
"avg": "AVG",
"average": "AVG",
"count": "COUNT",
"first": "FIRST_VALUE",
"last": "LAST_VALUE",
"max": "MAX",
"maximum": "MAX",
"median": "MEDIAN",
"min": "MIN",
"minimum": "MIN",
"sum": "SUM"
}
sql_types = {
"string": "TEXT",
"integer": "INTEGER",
"number": "REAL",
"boolean": "INTEGER",
"object": "TEXT",
"nested": "TEXT"
}
STATS = {
"count": "COUNT({{value}})",
"std": "SQRT((1-1.0/COUNT({{value}}))*VARIANCE({{value}}))",
"min": "MIN({{value}})",
"max": "MAX({{value}})",
"sum": "SUM({{value}})",
"median": "MEDIAN({{value}})",
"sos": "SUM({{value}}*{{value}})",
"var": "(1-1.0/COUNT({{value}}))*VARIANCE({{value}})",
"avg": "AVG({{value}})"
}
quoted_UID = quote_table(UID)
quoted_ORDER = quote_table(ORDER)
quoted_PARENT = quote_table(PARENT)
def sql_text_array_to_set(column):
def _convert(row):
text = row[column]
return set(eval('['+text.replace("''", "\'")+']'))
return _convert
def get_column(column):
"""
:param column: The column you want extracted
:return: a function that can pull the given column out of sql resultset
"""
def _get(row):
return row[column]
return _get
def set_column(row, col, child, value):
if child==".":
row[col]=value
else:
column = row[col]
if column is None:
column = row[col] = {}
Dict(column)[child] = value
| mpl-2.0 |
ThomasZh/legend-club-wxpub | foo/wx/xml_parser.py | 1 | 5757 | #!/usr/bin/env python
# _*_ coding: utf-8_*_
#
# Copyright 2016 planc2c.com
# thomas@time2box.com
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from xml.etree import ElementTree
#def print_node(node):
# '''打印结点基本信息'''
# print "=============================================="
# print "node.attrib:%s" % node.attrib
# if node.attrib.has_key("age") > 0 :
# print "node.attrib['age']:%s" % node.attrib['age']
# print "node.tag:%s" % node.tag
# print "node.text:%s" % node.text
#<xml>
# <return_code><![CDATA[SUCCESS]]></return_code>\n
# <return_msg><![CDATA[OK]]></return_msg>\n
# <appid><![CDATA[wxaa328c83d3132bfb]]></appid>\n
# <mch_id><![CDATA[1340430801]]></mch_id>\n
# <nonce_str><![CDATA[lYeiS0ISsMakYRGu]]></nonce_str>\n
# <sign><![CDATA[4A80190EFDDA2B22A46535BF77CC3C7D]]></sign>\n
# <result_code><![CDATA[SUCCESS]]></result_code>\n
# <prepay_id><![CDATA[wx2016051011051929643983670302291635]]></prepay_id>\n
# <trade_type><![CDATA[JSAPI]]></trade_type>\n
#</xml>
def parseWxOrderReturn(xml):
root = ElementTree.fromstring(xml)
order_return = {}
lst_node = root.getiterator("return_code")
for node in lst_node:
#print_node(node)
order_return['return_code'] = node.text
lst_node = root.getiterator("return_msg")
for node in lst_node:
#print_node(node)
order_return['return_msg'] = node.text
lst_node = root.getiterator("appid")
for node in lst_node:
#print_node(node)
order_return['appid'] = node.text
lst_node = root.getiterator("mch_id")
for node in lst_node:
#print_node(node)
order_return['mch_id'] = node.text
lst_node = root.getiterator("nonce_str")
for node in lst_node:
#print_node(node)
order_return['nonce_str'] = node.text
lst_node = root.getiterator("sign")
for node in lst_node:
#print_node(node)
order_return['sign'] = node.text
lst_node = root.getiterator("result_code")
for node in lst_node:
#print_node(node)
order_return['result_code'] = node.text
lst_node = root.getiterator("prepay_id")
for node in lst_node:
#print_node(node)
order_return['prepay_id'] = node.text
lst_node = root.getiterator("trade_type")
for node in lst_node:
#print_node(node)
order_return['trade_type'] = node.text
return order_return
#<xml>
# <appid><![CDATA[wxaa328c83d3132bfb]]></appid>\n
# <attach><![CDATA[Aplan]]></attach>\n
# <bank_type><![CDATA[CFT]]></bank_type>\n
# <cash_fee><![CDATA[1]]></cash_fee>\n
# <fee_type><![CDATA[CNY]]></fee_type>\n
# <is_subscribe><![CDATA[Y]]></is_subscribe>\n
# <mch_id><![CDATA[1340430801]]></mch_id>\n
# <nonce_str><![CDATA[jOhHjqDfx9VQGmU]]></nonce_str>\n
# <openid><![CDATA[oy0Kxt7zNpZFEldQmHwFF-RSLNV0]]></openid>\n
# <out_trade_no><![CDATA[e358738e30fe11e69a7e00163e007b3e]]></out_trade_no>\n
# <result_code><![CDATA[SUCCESS]]></result_code>\n
# <return_code><![CDATA[SUCCESS]]></return_code>\n
# <sign><![CDATA[6291D73149D05F09D18C432E986C4DEB]]></sign>\n
# <time_end><![CDATA[20160613083651]]></time_end>\n
# <total_fee>1</total_fee>\n
# <trade_type><![CDATA[JSAPI]]></trade_type>\n
# <transaction_id><![CDATA[4007652001201606137183943151]]></transaction_id>\n
#</xml>
def parseWxPayReturn(xml):
root = ElementTree.fromstring(xml)
pay_return = {}
lst_node = root.getiterator("return_code")
for node in lst_node:
#print_node(node)
pay_return['return_code'] = node.text
lst_node = root.getiterator("return_msg")
for node in lst_node:
#print_node(node)
pay_return['return_msg'] = node.text
lst_node = root.getiterator("appid")
for node in lst_node:
#print_node(node)
pay_return['appid'] = node.text
lst_node = root.getiterator("mch_id")
for node in lst_node:
#print_node(node)
pay_return['mch_id'] = node.text
lst_node = root.getiterator("nonce_str")
for node in lst_node:
#print_node(node)
pay_return['nonce_str'] = node.text
lst_node = root.getiterator("sign")
for node in lst_node:
#print_node(node)
pay_return['sign'] = node.text
lst_node = root.getiterator("result_code")
for node in lst_node:
#print_node(node)
pay_return['result_code'] = node.text
lst_node = root.getiterator("prepay_id")
for node in lst_node:
#print_node(node)
pay_return['prepay_id'] = node.text
lst_node = root.getiterator("trade_type")
for node in lst_node:
#print_node(node)
pay_return['trade_type'] = node.text
lst_node = root.getiterator("time_end")
for node in lst_node:
#print_node(node)
pay_return['time_end'] = node.text
lst_node = root.getiterator("total_fee")
for node in lst_node:
#print_node(node)
pay_return['total_fee'] = node.text
lst_node = root.getiterator("transaction_id")
for node in lst_node:
#print_node(node)
pay_return['transaction_id'] = node.text
lst_node = root.getiterator("out_trade_no")
for node in lst_node:
#print_node(node)
pay_return['out_trade_no'] = node.text
return pay_return
| apache-2.0 |
ClarkYan/msc-thesis | code/query_provider/get_key.py | 3 | 1185 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# -*- Author: ClarkYAN -*-
from flask import Flask, request
from werkzeug.utils import secure_filename
import os
app = Flask(__name__)
app.config['UPLOAD_FOLDER'] = 'keypair/'
app.config['ALLOWED_EXTENSIONS'] = set(['png', 'jpg', 'jpeg', 'gif', 'xls', 'xlsx', 'pdf', 'p'])
# For a given file, return whether it's an allowed type or not
def allowed_file(filename):
return '.' in filename and \
filename.rsplit('.', 1)[1] in app.config['ALLOWED_EXTENSIONS']
@app.route('/', methods=['POST'])
def connect():
return 'success'
@app.route('/upload', methods=['POST'])
def upload():
upload_file = request.files['dataset']
if upload_file and allowed_file(upload_file.filename):
filename = secure_filename(upload_file.filename)
upload_file.save(os.path.join(app.root_path, app.config['UPLOAD_FOLDER'], filename))
return 'hello, ' + request.form.get('name', 'little apple') + '. success'
else:
return 'hello, ' + request.form.get('name', 'little apple') + '. failed'
if __name__ == '__main__':
# get key hosted in 6000
app.run(host='127.0.0.1', port=6000, debug=True)
| apache-2.0 |
EmreAtes/spack | var/spack/repos/builtin/packages/xscope/package.py | 5 | 1727 | ##############################################################################
# Copyright (c) 2013-2018, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class Xscope(AutotoolsPackage):
"""XSCOPE -- a program to monitor X11/Client conversations."""
homepage = "http://cgit.freedesktop.org/xorg/app/xscope"
url = "https://www.x.org/archive/individual/app/xscope-1.4.1.tar.gz"
version('1.4.1', 'c476fb73b354f4a5c388f3814052ce0d')
depends_on('xproto@7.0.17:', type='build')
depends_on('xtrans', type='build')
depends_on('pkgconfig', type='build')
depends_on('util-macros', type='build')
| lgpl-2.1 |
thaumos/ansible | hacking/aws_config/build_iam_policy_framework.py | 25 | 11861 | # Requires pandas, bs4, html5lib, and lxml
#
# Call script with the output from aws_resource_actions callback, e.g.
# python build_iam_policy_framework.py ['ec2:AuthorizeSecurityGroupEgress', 'ec2:AuthorizeSecurityGroupIngress', 'sts:GetCallerIdentity']
#
# The sample output:
# {
# "Version": "2012-10-17",
# "Statement": [
# {
# "Sid": "AnsibleEditor0",
# "Effect": "Allow",
# "Action": [
# "ec2:AuthorizeSecurityGroupEgress",
# "ec2:AuthorizeSecurityGroupIngress"
# ],
# "Resource": "arn:aws:ec2:${Region}:${Account}:security-group/${SecurityGroupId}"
# },
# {
# "Sid": "AnsibleEditor1",
# "Effect": "Allow",
# "Action": [
# "sts:GetCallerIdentity"
# ],
# "Resource": "*"
# }
# ]
# }
#
# Policy troubleshooting:
# - If there are more actions in the policy than you provided, AWS has documented dependencies for some of your actions and
# those have been added to the policy.
# - If there are fewer actions in the policy than you provided, some of your actions are not in the IAM table of actions for
# that service. For example, the API call s3:DeleteObjects does not actually correlate to the permission needed in a policy.
# In this case s3:DeleteObject is the permission required to allow both the s3:DeleteObjects action and the s3:DeleteObject action.
# - The policies output are only as accurate as the AWS documentation. If the policy does not permit the
# necessary actions, look for undocumented dependencies. For example, redshift:CreateCluster requires ec2:DescribeVpcs,
# ec2:DescribeSubnets, ec2:DescribeSecurityGroups, and ec2:DescribeInternetGateways, but AWS does not document this.
#
import json
import requests
import sys
missing_dependencies = []
try:
import pandas as pd
except ImportError:
missing_dependencies.append('pandas')
try:
import bs4
except ImportError:
missing_dependencies.append('bs4')
try:
import html5lib
except ImportError:
missing_dependencies.append('html5lib')
try:
import lxml
except ImportError:
missing_dependencies.append('lxml')
irregular_service_names = {
'a4b': 'alexaforbusiness',
'appstream': 'appstream2.0',
'acm': 'certificatemanager',
'acm-pca': 'certificatemanagerprivatecertificateauthority',
'aws-marketplace-management': 'marketplacemanagementportal',
'ce': 'costexplorerservice',
'cognito-identity': 'cognitoidentity',
'cognito-sync': 'cognitosync',
'cognito-idp': 'cognitouserpools',
'cur': 'costandusagereport',
'dax': 'dynamodbacceleratordax',
'dlm': 'datalifecyclemanager',
'dms': 'databasemigrationservice',
'ds': 'directoryservice',
'ec2messages': 'messagedeliveryservice',
'ecr': 'ec2containerregistry',
'ecs': 'elasticcontainerservice',
'eks': 'elasticcontainerserviceforkubernetes',
'efs': 'elasticfilesystem',
'es': 'elasticsearchservice',
'events': 'cloudwatchevents',
'firehose': 'kinesisfirehose',
'fms': 'firewallmanager',
'health': 'healthapisandnotifications',
'importexport': 'importexportdiskservice',
'iot1click': 'iot1-click',
'kafka': 'managedstreamingforkafka',
'kinesisvideo': 'kinesisvideostreams',
'kms': 'keymanagementservice',
'license-manager': 'licensemanager',
'logs': 'cloudwatchlogs',
'opsworks-cm': 'opsworksconfigurationmanagement',
'mediaconnect': 'elementalmediaconnect',
'mediaconvert': 'elementalmediaconvert',
'medialive': 'elementalmedialive',
'mediapackage': 'elementalmediapackage',
'mediastore': 'elementalmediastore',
'mgh': 'migrationhub',
'mobiletargeting': 'pinpoint',
'pi': 'performanceinsights',
'pricing': 'pricelist',
'ram': 'resourceaccessmanager',
'resource-groups': 'resourcegroups',
'sdb': 'simpledb',
'servicediscovery': 'cloudmap',
'serverlessrepo': 'serverlessapplicationrepository',
'sms': 'servermigrationservice',
'sms-voice': 'pinpointsmsandvoiceservice',
'sso-directory': 'ssodirectory',
'ssm': 'systemsmanager',
'ssmmessages': 'sessionmanagermessagegatewayservice',
'states': 'stepfunctions',
'sts': 'securitytokenservice',
'swf': 'simpleworkflowservice',
'tag': 'resourcegrouptaggingapi',
'transfer': 'transferforsftp',
'waf-regional': 'wafregional',
'wam': 'workspacesapplicationmanager',
'xray': 'x-ray'
}
irregular_service_links = {
'apigateway': [
'https://docs.aws.amazon.com/IAM/latest/UserGuide/list_manageamazonapigateway.html'
],
'aws-marketplace': [
'https://docs.aws.amazon.com/IAM/latest/UserGuide/list_awsmarketplace.html',
'https://docs.aws.amazon.com/IAM/latest/UserGuide/list_awsmarketplacemeteringservice.html',
'https://docs.aws.amazon.com/IAM/latest/UserGuide/list_awsprivatemarketplace.html'
],
'discovery': [
'https://docs.aws.amazon.com/IAM/latest/UserGuide/list_applicationdiscovery.html'
],
'elasticloadbalancing': [
'https://docs.aws.amazon.com/IAM/latest/UserGuide/list_elasticloadbalancing.html',
'https://docs.aws.amazon.com/IAM/latest/UserGuide/list_elasticloadbalancingv2.html'
],
'globalaccelerator': [
'https://docs.aws.amazon.com/IAM/latest/UserGuide/list_globalaccelerator.html'
]
}
def get_docs_by_prefix(prefix):
amazon_link_form = 'https://docs.aws.amazon.com/IAM/latest/UserGuide/list_amazon{0}.html'
aws_link_form = 'https://docs.aws.amazon.com/IAM/latest/UserGuide/list_aws{0}.html'
if prefix in irregular_service_links:
links = irregular_service_links[prefix]
else:
if prefix in irregular_service_names:
prefix = irregular_service_names[prefix]
links = [amazon_link_form.format(prefix), aws_link_form.format(prefix)]
return links
def get_html(links):
html_list = []
for link in links:
html = requests.get(link).content
try:
parsed_html = pd.read_html(html)
html_list.append(parsed_html)
except ValueError as e:
if 'No tables found' in str(e):
pass
else:
raise e
return html_list
def get_tables(service):
links = get_docs_by_prefix(service)
html_list = get_html(links)
action_tables = []
arn_tables = []
for df_list in html_list:
for df in df_list:
table = json.loads(df.to_json(orient='split'))
table_data = table['data'][0]
if 'Actions' in table_data and 'Resource Types (*required)' in table_data:
action_tables.append(table['data'][1::])
elif 'Resource Types' in table_data and 'ARN' in table_data:
arn_tables.append(table['data'][1::])
# Action table indices:
# 0: Action, 1: Description, 2: Access level, 3: Resource type, 4: Condition keys, 5: Dependent actions
# ARN tables indices:
# 0: Resource type, 1: ARN template, 2: Condition keys
return action_tables, arn_tables
def add_dependent_action(resources, dependency):
resource, action = dependency.split(':')
if resource in resources:
resources[resource].append(action)
else:
resources[resource] = [action]
return resources
def get_dependent_actions(resources):
for service in dict(resources):
action_tables, arn_tables = get_tables(service)
for found_action_table in action_tables:
for action_stuff in found_action_table:
if action_stuff is None:
continue
if action_stuff[0] in resources[service] and action_stuff[5]:
dependencies = action_stuff[5].split()
if isinstance(dependencies, list):
for dependency in dependencies:
resources = add_dependent_action(resources, dependency)
else:
resources = add_dependent_action(resources, dependencies)
return resources
def get_actions_by_service(resources):
service_action_dict = {}
dependencies = {}
for service in resources:
action_tables, arn_tables = get_tables(service)
# Create dict of the resource type to the corresponding ARN
arn_dict = {}
for found_arn_table in arn_tables:
for arn_stuff in found_arn_table:
arn_dict["{0}*".format(arn_stuff[0])] = arn_stuff[1]
# Create dict of the action to the corresponding ARN
action_dict = {}
for found_action_table in action_tables:
for action_stuff in found_action_table:
if action_stuff[0] is None:
continue
if arn_dict.get(action_stuff[3]):
action_dict[action_stuff[0]] = arn_dict[action_stuff[3]]
else:
action_dict[action_stuff[0]] = None
service_action_dict[service] = action_dict
return service_action_dict
def get_resource_arns(aws_actions, action_dict):
resource_arns = {}
for resource_action in aws_actions:
resource, action = resource_action.split(':')
if action not in action_dict:
continue
if action_dict[action] is None:
resource = "*"
else:
resource = action_dict[action].replace("${Partition}", "aws")
if resource not in resource_arns:
resource_arns[resource] = []
resource_arns[resource].append(resource_action)
return resource_arns
def get_resources(actions):
resources = {}
for action in actions:
resource, action = action.split(':')
if resource not in resources:
resources[resource] = []
resources[resource].append(action)
return resources
def combine_arn_actions(resources, service_action_arn_dict):
arn_actions = {}
for service in service_action_arn_dict:
service_arn_actions = get_resource_arns(aws_actions, service_action_arn_dict[service])
for resource in service_arn_actions:
if resource in arn_actions:
arn_actions[resource].extend(service_arn_actions[resource])
else:
arn_actions[resource] = service_arn_actions[resource]
return arn_actions
def combine_actions_and_dependent_actions(resources):
aws_actions = []
for resource in resources:
for action in resources[resource]:
aws_actions.append('{0}:{1}'.format(resource, action))
return set(aws_actions)
def get_actions_restricted_by_arn(aws_actions):
resources = get_resources(aws_actions)
resources = get_dependent_actions(resources)
service_action_arn_dict = get_actions_by_service(resources)
aws_actions = combine_actions_and_dependent_actions(resources)
return combine_arn_actions(aws_actions, service_action_arn_dict)
def main(aws_actions):
arn_actions = get_actions_restricted_by_arn(aws_actions)
statement = []
for resource_restriction in arn_actions:
statement.append({
"Sid": "AnsibleEditor{0}".format(len(statement)),
"Effect": "Allow",
"Action": arn_actions[resource_restriction],
"Resource": resource_restriction
})
policy = {"Version": "2012-10-17", "Statement": statement}
print(json.dumps(policy, indent=4))
if __name__ == '__main__':
if missing_dependencies:
sys.exit('Missing Python libraries: {0}'.format(', '.join(missing_dependencies)))
actions = sys.argv[1:]
if len(actions) == 1:
actions = sys.argv[1].split(',')
aws_actions = [action.strip('[], "\'') for action in actions]
main(aws_actions)
| gpl-3.0 |
lrocheWB/navitia | source/navitiacommon/navitiacommon/launch_exec.py | 12 | 3684 | # Copyright (c) 2001-2014, Canal TP and/or its affiliates. All rights reserved.
#
# This file is part of Navitia,
# the software to build cool stuff with public transport.
#
# Hope you'll enjoy and contribute to this project,
# powered by Canal TP (www.canaltp.fr).
# Help us simplify mobility and open public transport:
# a non ending quest to the responsive locomotion way of traveling!
#
# LICENCE: This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
# Stay tuned using
# twitter @navitia
# IRC #navitia on freenode
# https://groups.google.com/d/forum/navitia
# www.navitia.io
"""
Function to launch a bin
"""
import subprocess
import os
import select
import re
import fcntl
import errno
class LogLine(object):
def __init__(self, line):
if line.startswith('DEBUG') or line.startswith('TRACE') or line.startswith('NOTICE') or not line:
self.level = 10
elif line.startswith('INFO'):
self.level = 20
elif line.startswith('WARN'):
self.level = 30
else:
self.level = 40
pos = line.find(' - ')
if 0 < pos < 10:
self.msg = line[pos+3:]
else:
self.msg = line
def parse_log(buff):
logs = []
line, sep, buff = buff.partition('\n')
while sep and buff:
logs.append(LogLine(line))
line, sep, buff = buff.partition('\n')
if not sep:
buff = line#we put back the last unterminated line in the buffer
return (logs, buff)
#from: http://stackoverflow.com/questions/7729336/how-can-i-print-and-display-subprocess-stdout-and-stderr-output-without-distorti/7730201#7730201
def make_async(fd):
fcntl.fcntl(fd, fcntl.F_SETFL, fcntl.fcntl(fd, fcntl.F_GETFL) | os.O_NONBLOCK)
# Helper function to read some data from a file descriptor, ignoring EAGAIN errors
# (those errors mean that there are no data available for the moment)
def read_async(fd):
try:
return fd.read()
except IOError, e:
if e.errno != errno.EAGAIN:
raise e
else:
return ''
def launch_exec(exec_name, args, logger):
""" Launch an exec with args, log the outputs """
log = 'Launching ' + exec_name + ' ' + ' '.join(args)
#we hide the password in logs
logger.info(re.sub('password=\w+', 'password=xxxxxxxxx', log))
args.insert(0, exec_name)
proc = subprocess.Popen(args,
stderr=subprocess.PIPE,
stdout=subprocess.PIPE,
close_fds=True)
try:
make_async(proc.stderr)
make_async(proc.stdout)
while True:
select.select([proc.stdout, proc.stderr], [], [])
for pipe in proc.stdout, proc.stderr:
log_pipe = read_async(pipe)
logs, line = parse_log(log_pipe)
for l in logs:
logger.log(l.level, l.msg)
if proc.poll() is not None:
break
finally:
proc.stdout.close()
proc.stderr.close()
return proc.returncode
| agpl-3.0 |
petermat/phantomjs | src/qt/qtwebkit/Tools/Scripts/webkitpy/layout_tests/controllers/layout_test_finder.py | 123 | 7521 | # Copyright (C) 2012 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import errno
import logging
import re
from webkitpy.layout_tests.models import test_expectations
_log = logging.getLogger(__name__)
class LayoutTestFinder(object):
def __init__(self, port, options):
self._port = port
self._options = options
self._filesystem = self._port.host.filesystem
self.LAYOUT_TESTS_DIRECTORY = 'LayoutTests'
def find_tests(self, options, args):
paths = self._strip_test_dir_prefixes(args)
if options.test_list:
paths += self._strip_test_dir_prefixes(self._read_test_names_from_file(options.test_list, self._port.TEST_PATH_SEPARATOR))
test_files = self._port.tests(paths)
return (paths, test_files)
def _strip_test_dir_prefixes(self, paths):
return [self._strip_test_dir_prefix(path) for path in paths if path]
def _strip_test_dir_prefix(self, path):
# Handle both "LayoutTests/foo/bar.html" and "LayoutTests\foo\bar.html" if
# the filesystem uses '\\' as a directory separator.
if path.startswith(self.LAYOUT_TESTS_DIRECTORY + self._port.TEST_PATH_SEPARATOR):
return path[len(self.LAYOUT_TESTS_DIRECTORY + self._port.TEST_PATH_SEPARATOR):]
if path.startswith(self.LAYOUT_TESTS_DIRECTORY + self._filesystem.sep):
return path[len(self.LAYOUT_TESTS_DIRECTORY + self._filesystem.sep):]
return path
def _read_test_names_from_file(self, filenames, test_path_separator):
fs = self._filesystem
tests = []
for filename in filenames:
try:
if test_path_separator != fs.sep:
filename = filename.replace(test_path_separator, fs.sep)
file_contents = fs.read_text_file(filename).split('\n')
for line in file_contents:
line = self._strip_comments(line)
if line:
tests.append(line)
except IOError, e:
if e.errno == errno.ENOENT:
_log.critical('')
_log.critical('--test-list file "%s" not found' % file)
raise
return tests
@staticmethod
def _strip_comments(line):
commentIndex = line.find('//')
if commentIndex is -1:
commentIndex = len(line)
line = re.sub(r'\s+', ' ', line[:commentIndex].strip())
if line == '':
return None
else:
return line
def skip_tests(self, paths, all_tests_list, expectations, http_tests):
all_tests = set(all_tests_list)
tests_to_skip = expectations.get_tests_with_result_type(test_expectations.SKIP)
if self._options.skip_failing_tests:
tests_to_skip.update(expectations.get_tests_with_result_type(test_expectations.FAIL))
tests_to_skip.update(expectations.get_tests_with_result_type(test_expectations.FLAKY))
if self._options.skipped == 'only':
tests_to_skip = all_tests - tests_to_skip
elif self._options.skipped == 'ignore':
tests_to_skip = set()
elif self._options.skipped != 'always':
# make sure we're explicitly running any tests passed on the command line; equivalent to 'default'.
tests_to_skip -= set(paths)
# unless of course we don't want to run the HTTP tests :)
if not self._options.http:
tests_to_skip.update(set(http_tests))
return tests_to_skip
def split_into_chunks(self, test_names):
"""split into a list to run and a set to skip, based on --run-chunk and --run-part."""
if not self._options.run_chunk and not self._options.run_part:
return test_names, set()
# If the user specifies they just want to run a subset of the tests,
# just grab a subset of the non-skipped tests.
chunk_value = self._options.run_chunk or self._options.run_part
try:
(chunk_num, chunk_len) = chunk_value.split(":")
chunk_num = int(chunk_num)
assert(chunk_num >= 0)
test_size = int(chunk_len)
assert(test_size > 0)
except AssertionError:
_log.critical("invalid chunk '%s'" % chunk_value)
return (None, None)
# Get the number of tests
num_tests = len(test_names)
# Get the start offset of the slice.
if self._options.run_chunk:
chunk_len = test_size
# In this case chunk_num can be really large. We need
# to make the slave fit in the current number of tests.
slice_start = (chunk_num * chunk_len) % num_tests
else:
# Validate the data.
assert(test_size <= num_tests)
assert(chunk_num <= test_size)
# To count the chunk_len, and make sure we don't skip
# some tests, we round to the next value that fits exactly
# all the parts.
rounded_tests = num_tests
if rounded_tests % test_size != 0:
rounded_tests = (num_tests + test_size - (num_tests % test_size))
chunk_len = rounded_tests / test_size
slice_start = chunk_len * (chunk_num - 1)
# It does not mind if we go over test_size.
# Get the end offset of the slice.
slice_end = min(num_tests, slice_start + chunk_len)
tests_to_run = test_names[slice_start:slice_end]
_log.debug('chunk slice [%d:%d] of %d is %d tests' % (slice_start, slice_end, num_tests, (slice_end - slice_start)))
# If we reached the end and we don't have enough tests, we run some
# from the beginning.
if slice_end - slice_start < chunk_len:
extra = chunk_len - (slice_end - slice_start)
_log.debug(' last chunk is partial, appending [0:%d]' % extra)
tests_to_run.extend(test_names[0:extra])
return (tests_to_run, set(test_names) - set(tests_to_run))
| bsd-3-clause |
dmS0Zq/ganeti_webmgr | ganeti_webmgr/django_test_tools/views.py | 3 | 9453 | # Copyright (C) 2011 Oregon State University et al.
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301,
# USA.
from django.test.client import Client
from ganeti_webmgr.django_test_tools.users import UserTestMixin
class ViewTestMixin():
"""
Helper for testing standard things on a view like anonymous users,
unauthorized users, and permission tests
this works with predefined users with password=secret and permission
defined as needed for the specific test.
"""
def assert_404(self, url, args, data=dict(), method='get'):
"""
Verifies that invalid url args will result in 404
@param url - url to test
@param args - args for the url string
@param data - dictionary of data to be passed to the request
@param method - http method to be used
"""
c = Client()
superuser = UserTestMixin.create_user('superuser', is_superuser=True)
method = getattr(c, method)
# test 404s - replace each argument one at a time
# with a nonexistent value
self.assertTrue(c.login(username=superuser.username,
password='secret'))
for i in range(len(args)):
temp_args = [arg for arg in args]
temp_args[i] = 'DOES.NOT.EXIST.WILL.FAIL'
response = method(url % tuple(temp_args), data)
self.assertEqual(404, response.status_code)
def assert_401(self, url, args, data=dict(), method='get'):
"""
Asserts that an anonymous user will be required to login
@param url - url to test
@param args - args for the url string
@param data - dictionary of data to be passed to the request
@param method - http method to be used
"""
c = Client()
method = getattr(c, method)
response = method(url % args, data, follow=True)
self.assertEqual(200, response.status_code)
self.assertTemplateUsed(response, 'registration/login.html')
def assert_standard_fails(self, url, args,
data=dict(), method='get',
login_required=True, authorized=True):
"""
shortcut function for running standard tests:
* assert_404
* assert_401 for anonymous user
* assert_403 for a user with no permissions
@param url - url to test
@param args - args for the url string
@param data - dictionary of data to be passed to the request
@param method - http method to be used
@param login_required - run assert_401 test, default=True
@param authorized - run assert_403 test for unauthorized user,
default=True
"""
# unauthenticated
if login_required:
self.assert_401(url, args, data, method)
# unauthorized
if authorized:
unauthorized = UserTestMixin.create_user('unauthorized')
self.assert_403(url, args, [unauthorized], data, method)
# test 404s - replace each argument one at a time with a
# nonexistent value
self.assert_404(url, args, method=method)
def assert_403(self, url, args, users, data=dict(), method='get'):
"""
all users given to this function must fail access
@param url - url to test
@param args - args for the url string
@param users - list of users, all of which must result in 403
@param data - dictionary of data to be passed to the request
@param method - http method to be used
"""
c = Client()
client_method = getattr(c, method)
for user in users:
self.assertTrue(c.login(username=user.username, password='secret'))
response = client_method(url % args, data)
self.assertEqual(403, response.status_code)
def assert_200(self, url, args, users, template=None,
mime=None, tests=None, setup=False, data=dict(),
method='get', follow=False):
"""
@param url - url to test
@param args - args for the url string
@param users - list of users, all of which should result in 200
@param template - if given, template that responses should use
@param mime - if given, mime for response
@param tests - a function that executes additional tests
on the responses from the client
@param setup - call setup before each iteration
@param data - dictionary of data to be passed to the request
@param method - http method to be used
@param follow - follow http redirect
"""
mime = mime if mime else 'text/html; charset=utf-8'
c = Client()
client_method = getattr(c, method)
for user in users:
if setup:
self.setUp()
self.assertTrue(c.login(username=user.username, password='secret'))
response = client_method(url % args, data, follow=follow)
self.assertEqual(200, response.status_code,
'user unauthorized: %s' % user.username)
if template is not None:
self.assertTemplateUsed(response, template)
if mime is not None:
self.assertEqual(response['content-type'], mime)
if tests is not None:
tests(user, response)
def assert_view_missing_fields(self, url, args, data, fields=None,
template=None, mime=None, tests=None,
method='post'):
"""
Tests fields that should raise an error in a view, usually from form
validation
@param url - url to test
@param args - args for the url string
@param data - dictionary of data to be passed to the request
@param fields - list of field keys that are required
@param template - if given, template that responses should use
@param mime - if given, mime for response
@param tests - a function that executes additional tests
on the responses from the client
@param method - http method to be used
"""
fields = data.keys if fields is None else fields
mime = mime if mime else 'text/html; charset=utf-8'
c = Client()
client_method = getattr(c, method)
superuser = UserTestMixin.create_user('superuser', is_superuser=True)
self.assertTrue(c.login(username=superuser.username,
password='secret'))
# check required fields
for name in fields:
data_ = data.copy()
del data_[name]
response = client_method(url % args, data_)
self.assertEqual(200, response.status_code)
if template is not None:
self.assertTemplateUsed(response, template)
if mime is not None:
self.assertEqual(response['content-type'], mime)
if tests is not None:
tests(superuser, response)
def assert_view_values(self, url, args, data, fields,
template=None, mime=None, tests=None,
method='post'):
"""
Tests fields that should raise an error for a specific type of invalid
data is sent. This is used for blackbox testing form validation via
the view it is used in.
@param url - url to test
@param args - args for the url string
@param data - dictionary of data to be passed to the request
@param fields - list of dictionaries of invalid data combinations
@param template - if given, template that responses should use
@param mime - if given, mime for response
@param tests - a function that executes additional tests
on the responses from the client
@param method - http method to be used
"""
mime = mime if mime else 'text/html; charset=utf-8'
c = Client()
client_method = getattr(c, method)
superuser = UserTestMixin.create_user('superuser', is_superuser=True)
self.assertTrue(c.login(username=superuser.username,
password='secret'))
# check required fields
for values in fields:
data_ = data.copy()
data_.update(values)
response = client_method(url % args, data_)
self.assertEqual(200, response.status_code)
if template is not None:
self.assertTemplateUsed(response, template)
if mime is not None:
self.assertEqual(response['content-type'], mime)
if tests is not None:
tests(superuser, response)
| gpl-2.0 |
Floobits/floobits-vim | plugin/floo/common/reactor.py | 2 | 4414 | import socket
import select
try:
from . import api, msg
from .. import editor
from ..common.exc_fmt import str_e, pp_e
from ..common.handlers import tcp_server
assert msg and tcp_server
except (ImportError, ValueError):
from floo.common.exc_fmt import str_e, pp_e
from floo.common.handlers import tcp_server
from floo.common import api, msg
from floo import editor
reactor = None
class _Reactor(object):
''' Low level event driver '''
def __init__(self):
self._protos = []
self._handlers = []
def connect(self, factory, host, port, secure, conn=None):
proto = factory.build_protocol(host, port, secure)
self._protos.append(proto)
proto.connect(conn)
self._handlers.append(factory)
def listen(self, factory, host='127.0.0.1', port=0):
listener_factory = tcp_server.TCPServerHandler(factory, self)
proto = listener_factory.build_protocol(host, port)
factory.listener_factory = listener_factory
self._protos.append(proto)
self._handlers.append(listener_factory)
return proto.sockname()
def stop_handler(self, handler):
try:
handler.proto.stop()
except Exception as e:
msg.warn('Error stopping connection: ', str_e(e))
try:
self._handlers.remove(handler)
except Exception:
pass
try:
self._protos.remove(handler.proto)
except Exception:
pass
if hasattr(handler, 'listener_factory'):
return handler.listener_factory.stop()
if not self._handlers and not self._protos:
msg.log('All handlers stopped. Stopping reactor.')
self.stop()
def stop(self):
for _conn in self._protos:
_conn.stop()
self._protos = []
self._handlers = []
msg.log('Reactor shut down.')
editor.status_message('Disconnected.')
def is_ready(self):
if not self._handlers:
return False
for f in self._handlers:
if not f.is_ready():
return False
return True
def _reconnect(self, fd, *fd_sets):
for fd_set in fd_sets:
try:
fd_set.remove(fd)
except ValueError:
pass
fd.reconnect()
@api.send_errors
def tick(self, timeout=0):
for factory in self._handlers:
factory.tick()
self.select(timeout)
editor.call_timeouts()
def block(self):
while self._protos or self._handlers:
self.tick(.05)
def select(self, timeout=0):
if not self._protos:
return
readable = []
writeable = []
errorable = []
fd_map = {}
for fd in self._protos:
fileno = fd.fileno()
if not fileno:
continue
fd.fd_set(readable, writeable, errorable)
fd_map[fileno] = fd
if not readable and not writeable:
return
try:
_in, _out, _except = select.select(readable, writeable, errorable, timeout)
except (select.error, socket.error, Exception) as e:
# TODO: with multiple FDs, must call select with just one until we find the error :(
for fileno in readable:
try:
select.select([fileno], [], [], 0)
except (select.error, socket.error, Exception) as e:
fd_map[fileno].reconnect()
msg.error('Error in select(): ', fileno, str_e(e))
return
for fileno in _except:
fd = fd_map[fileno]
self._reconnect(fd, _in, _out)
for fileno in _out:
fd = fd_map[fileno]
try:
fd.write()
except Exception as e:
msg.error('Couldn\'t write to socket: ', str_e(e))
msg.debug('Couldn\'t write to socket: ', pp_e(e))
return self._reconnect(fd, _in)
for fileno in _in:
fd = fd_map[fileno]
try:
fd.read()
except Exception as e:
msg.error('Couldn\'t read from socket: ', str_e(e))
msg.debug('Couldn\'t read from socket: ', pp_e(e))
fd.reconnect()
reactor = _Reactor()
| apache-2.0 |
edx/django-oauth2-provider | provider/oauth2/views.py | 1 | 6130 | from datetime import timedelta
import json
from django.core.exceptions import ObjectDoesNotExist
from django.http import HttpResponseBadRequest, HttpResponse
from django.urls import reverse
from django.views.generic import View
from provider import constants
from provider.oauth2.backends import BasicClientBackend, RequestParamsClientBackend, PublicPasswordBackend
from provider.oauth2.forms import (AuthorizationCodeGrantForm, AuthorizationRequestForm, AuthorizationForm,
PasswordGrantForm, RefreshTokenGrantForm, ClientCredentialsGrantForm)
from provider.oauth2.models import Client, RefreshToken, AccessToken
from provider.utils import now
from provider.views import AccessToken as AccessTokenView, OAuthError, AccessTokenMixin, Capture, Authorize, Redirect
class OAuth2AccessTokenMixin(AccessTokenMixin):
def get_access_token(self, request, user, scope, client):
try:
# Attempt to fetch an existing access token.
at = AccessToken.objects.get(user=user, client=client, scope=scope, expires__gt=now())
except AccessToken.DoesNotExist:
# None found... make a new one!
at = self.create_access_token(request, user, scope, client)
return at
def create_access_token(self, request, user, scope, client):
return AccessToken.objects.create(
user=user,
client=client,
scope=scope
)
def create_refresh_token(self, request, user, scope, access_token, client):
return RefreshToken.objects.create(
user=user,
access_token=access_token,
client=client
)
def invalidate_refresh_token(self, rt):
if constants.DELETE_EXPIRED:
rt.delete()
else:
rt.expired = True
rt.save()
def invalidate_access_token(self, at):
if constants.DELETE_EXPIRED:
at.delete()
else:
at.expires = now() - timedelta(milliseconds=1)
at.save()
class Capture(Capture):
"""
Implementation of :class:`provider.views.Capture`.
"""
def get_redirect_url(self, request):
return reverse('oauth2:authorize')
class Authorize(Authorize, OAuth2AccessTokenMixin):
"""
Implementation of :class:`provider.views.Authorize`.
"""
def get_request_form(self, client, data):
return AuthorizationRequestForm(data, client=client)
def get_authorization_form(self, request, client, data, client_data):
return AuthorizationForm(data)
def get_client(self, client_id):
try:
return Client.objects.get(client_id=client_id)
except Client.DoesNotExist:
return None
def get_redirect_url(self, request):
return reverse('oauth2:redirect')
def save_authorization(self, request, client, form, client_data):
grant = form.save(commit=False)
if grant is None:
return None
grant.user = request.user
grant.client = client
grant.redirect_uri = client_data.get('redirect_uri', '')
grant.save()
return grant.code
class Redirect(Redirect):
"""
Implementation of :class:`provider.views.Redirect`
"""
pass
class AccessTokenView(AccessTokenView, OAuth2AccessTokenMixin):
"""
Implementation of :class:`provider.views.AccessToken`.
.. note:: This implementation does provide all default grant types defined
in :attr:`provider.views.AccessToken.grant_types`. If you
wish to disable any, you can override the :meth:`get_handler` method
*or* the :attr:`grant_types` list.
"""
authentication = (
BasicClientBackend,
RequestParamsClientBackend,
PublicPasswordBackend,
)
def get_authorization_code_grant(self, request, data, client):
form = AuthorizationCodeGrantForm(data, client=client)
if not form.is_valid():
raise OAuthError(form.errors)
return form.cleaned_data.get('grant')
def get_refresh_token_grant(self, request, data, client):
form = RefreshTokenGrantForm(data, client=client)
if not form.is_valid():
raise OAuthError(form.errors)
return form.cleaned_data.get('refresh_token')
def get_password_grant(self, request, data, client):
form = PasswordGrantForm(data, client=client)
if not form.is_valid():
raise OAuthError(form.errors)
return form.cleaned_data
def get_client_credentials_grant(self, request, data, client):
form = ClientCredentialsGrantForm(data, client=client)
if not form.is_valid():
raise OAuthError(form.errors)
return form.cleaned_data
def invalidate_grant(self, grant):
if constants.DELETE_EXPIRED:
grant.delete()
else:
grant.expires = now() - timedelta(days=1)
grant.save()
class AccessTokenDetailView(View):
"""
This view returns info about a given access token. If the token does not exist or is expired, HTTP 400 is returned.
A successful response has HTTP status 200 and includes a JSON object containing the username, scope, and expiration
date-time (in ISO 8601 format, UTC timezone) for the access token.
Example
GET /access_token/abc123/
{
username: "some-user",
scope: "read",
expires: "2015-04-01T08:41:51"
}
"""
def get(self, request, *args, **kwargs):
JSON_CONTENT_TYPE = 'application/json'
try:
access_token = AccessToken.objects.get_token(kwargs['token'])
content = {
'username': access_token.user.username,
'scope': access_token.get_scope_display(),
'expires': access_token.expires.isoformat()
}
return HttpResponse(json.dumps(content), content_type=JSON_CONTENT_TYPE)
except ObjectDoesNotExist:
return HttpResponseBadRequest(json.dumps({'error': 'invalid_token'}), content_type=JSON_CONTENT_TYPE)
| mit |
nexusz99/boto | boto/ec2/autoscale/activity.py | 152 | 3058 | # Copyright (c) 2009-2011 Reza Lotun http://reza.lotun.name/
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the
# "Software"), to deal in the Software without restriction, including
# without limitation the rights to use, copy, modify, merge, publish, dis-
# tribute, sublicense, and/or sell copies of the Software, and to permit
# persons to whom the Software is furnished to do so, subject to the fol-
# lowing conditions:
#
# The above copyright notice and this permission notice shall be included
# in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
# OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABIL-
# ITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT
# SHALL THE AUTHOR BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
# WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
from datetime import datetime
class Activity(object):
def __init__(self, connection=None):
self.connection = connection
self.start_time = None
self.end_time = None
self.activity_id = None
self.progress = None
self.status_code = None
self.cause = None
self.description = None
self.status_message = None
self.group_name = None
def __repr__(self):
return 'Activity<%s>: For group:%s, progress:%s, cause:%s' % (self.activity_id,
self.group_name,
self.status_message,
self.cause)
def startElement(self, name, attrs, connection):
return None
def endElement(self, name, value, connection):
if name == 'ActivityId':
self.activity_id = value
elif name == 'AutoScalingGroupName':
self.group_name = value
elif name == 'StartTime':
try:
self.start_time = datetime.strptime(value, '%Y-%m-%dT%H:%M:%S.%fZ')
except ValueError:
self.start_time = datetime.strptime(value, '%Y-%m-%dT%H:%M:%SZ')
elif name == 'EndTime':
try:
self.end_time = datetime.strptime(value, '%Y-%m-%dT%H:%M:%S.%fZ')
except ValueError:
self.end_time = datetime.strptime(value, '%Y-%m-%dT%H:%M:%SZ')
elif name == 'Progress':
self.progress = value
elif name == 'Cause':
self.cause = value
elif name == 'Description':
self.description = value
elif name == 'StatusMessage':
self.status_message = value
elif name == 'StatusCode':
self.status_code = value
else:
setattr(self, name, value)
| mit |
baidu/Paddle | python/paddle/fluid/tests/unittests/test_ir_graph.py | 5 | 4755 | # Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import unittest
import six
from paddle import fluid
class TestIRGraph(unittest.TestCase):
"""
TODO(fc500110): `resolve_hazard` api will be tested when it can be used.
"""
def test_nodes(self):
graph = build_graph()
self.assertTrue(
{node.name()
for node in graph.nodes()} == {"x1", "x2", "out", "sum"})
def test_has_set_get(self):
graph = build_graph()
for attr_name in ["int", "float", "string"]:
self.assertFalse(graph.has(attr_name))
graph.set("int", 1)
graph.set("float", 0.5)
graph.set("string", "string")
for attr_name in ["int", "float", "string"]:
self.assertTrue(graph.has(attr_name))
self.assertTrue(graph.get_int("int") == 1)
self.assertTrue(graph.get_float("float") == 0.5)
self.assertTrue(graph.get_string("string") == "string")
def test_erase(self):
graph = build_graph()
graph.set("test", 0)
self.assertTrue(graph.has("test"))
graph.erase("test")
self.assertFalse(graph.has("test"))
def test_create_var_node(self):
prog = fluid.core.ProgramDesc()
block = prog.block(0)
shape = [10, 20]
x1 = block.var(six.b("x1"))
x1.set_type(fluid.core.VarDesc.VarType.LOD_TENSOR)
x1.set_shape(shape)
graph = fluid.core.Graph(prog)
node = graph.create_var_node(x1)
self.assertTrue(node.node_type() == fluid.core.Node.Type.Variable)
def test_create_op_node(self):
prog = fluid.core.ProgramDesc()
block = prog.block(0)
sum_op_desc = block.append_op()
graph = fluid.core.Graph(prog)
node = graph.create_op_node(sum_op_desc)
self.assertTrue(node.node_type() == fluid.core.Node.Type.Operation)
def test_create_control_dep_var(self):
graph = build_graph()
name = "__control_var@{}".format(len(graph.nodes()))
node = graph.create_control_dep_var()
self.assertTrue(node.name() == name)
def test_create_empty_node(self):
prog = fluid.core.ProgramDesc()
graph = fluid.core.Graph(prog)
n1 = graph.create_empty_node('x', fluid.core.Node.Type.Operation)
self.assertTrue(n1.name() == 'x')
n2 = graph.create_empty_node('y', fluid.core.Node.Type.Variable)
self.assertTrue(n2.name() == 'y')
def test_release_nodes(self):
graph = build_graph()
nodes = graph.release_nodes()
self.assertTrue(len(graph.nodes()) == 0)
self.assertTrue({node.name()
for node in nodes} == {"x1", "x2", "out", "sum"})
def test_remove_node(self):
graph = build_graph()
nodes = graph.nodes()
for node in nodes:
if node.name() == "sum":
break
self.assertTrue({node.name()
for node in nodes} == {"x1", "x2", "out", "sum"})
nodes.remove(node)
self.assertTrue({node.name() for node in nodes} == {"x1", "x2", "out"})
def test_retrieve_node(self):
graph = build_graph()
nodes = []
for i in range(len(graph.nodes())):
nodes.append(graph.retrieve_node(i))
for node in nodes:
self.assertTrue(node in graph.nodes())
def resolve_hazard(self):
pass
def build_graph():
prog = fluid.core.ProgramDesc()
block = prog.block(0)
shape = [10, 20]
# prepare input/output
x1 = block.var(six.b("x1"))
x1.set_type(fluid.core.VarDesc.VarType.LOD_TENSOR)
x1.set_shape(shape)
x2 = block.var(six.b("x2"))
x2.set_type(fluid.core.VarDesc.VarType.LOD_TENSOR)
x2.set_shape(shape)
out = block.var(six.b("out"))
out.set_type(fluid.core.VarDesc.VarType.LOD_TENSOR)
sum_op_desc = block.append_op()
sum_op_desc.set_type("sum")
sum_op_desc.set_input("X", ["x1", "x2"])
sum_op_desc.set_output("Out", ["out"])
sum_op_desc.check_attrs()
sum_op_desc.infer_shape(block)
graph = fluid.core.Graph(prog)
return graph
if __name__ == "__main__":
unittest.main()
| apache-2.0 |
evertonaleixo/tarp | DeepLearning/deep-belief-network-example.py | 1 | 1185 | # coding=utf-8
from sklearn.datasets import load_digits
from sklearn.cross_validation import train_test_split
from sklearn.metrics.classification import accuracy_score
import numpy as np
from dbn import SupervisedDBNClassification
# Loading dataset
digits = load_digits()
X, Y = digits.data, digits.target
# Data scaling
X = (X / 16).astype(np.float32)
# Splitting data
X_train, X_test, Y_train, Y_test = train_test_split(X, Y, test_size=0.2, random_state=0)
# Training
classifier = SupervisedDBNClassification(hidden_layers_structure=[256, 256],
learning_rate_rbm=0.1,
learning_rate=0.1,
n_epochs_rbm=10,
n_iter_backprop=100,
l2_regularization=0.0,
batch_size=32,
activation_function='relu',
dropout_p=0.2)
classifier.fit(X_train, Y_train)
# Test
Y_pred = classifier.predict(X_test)
print('Done.\nAccuracy: ')
print(accuracy_score(Y_test, Y_pred)) | apache-2.0 |
oliverhr/odoo | openerp/addons/test_documentation_examples/tests/test_delegation.py | 366 | 1299 | # -*- coding: utf-8 -*-
from openerp.tests import common
class TestDelegation(common.TransactionCase):
def setUp(self):
super(TestDelegation, self).setUp()
env = self.env
record = env['delegation.parent'].create({
'child0_id': env['delegation.child0'].create({'field_0': 0}).id,
'child1_id': env['delegation.child1'].create({'field_1': 1}).id,
})
self.record = record
def test_delegating_record(self):
env = self.env
record = self.record
# children fields can be looked up on the parent record directly
self.assertEqual(
record.field_0
,
0
)
self.assertEqual(
record.field_1
,
1
)
def test_swap_child(self):
env = self.env
record = self.record
record.write({
'child0_id': env['delegation.child0'].create({'field_0': 42}).id
})
self.assertEqual(
record.field_0
,
42
)
def test_write(self):
record = self.record
record.write({'field_1': 4})
self.assertEqual(
record.field_1
,
4
)
self.assertEqual(
record.child1_id.field_1
,
4
)
| agpl-3.0 |
abhattad4/Digi-Menu | django/contrib/gis/gdal/srs.py | 82 | 12095 | """
The Spatial Reference class, represents OGR Spatial Reference objects.
Example:
>>> from django.contrib.gis.gdal import SpatialReference
>>> srs = SpatialReference('WGS84')
>>> print(srs)
GEOGCS["WGS 84",
DATUM["WGS_1984",
SPHEROID["WGS 84",6378137,298.257223563,
AUTHORITY["EPSG","7030"]],
TOWGS84[0,0,0,0,0,0,0],
AUTHORITY["EPSG","6326"]],
PRIMEM["Greenwich",0,
AUTHORITY["EPSG","8901"]],
UNIT["degree",0.01745329251994328,
AUTHORITY["EPSG","9122"]],
AUTHORITY["EPSG","4326"]]
>>> print(srs.proj)
+proj=longlat +ellps=WGS84 +datum=WGS84 +no_defs
>>> print(srs.ellipsoid)
(6378137.0, 6356752.3142451793, 298.25722356300003)
>>> print(srs.projected, srs.geographic)
False True
>>> srs.import_epsg(32140)
>>> print(srs.name)
NAD83 / Texas South Central
"""
from ctypes import byref, c_char_p, c_int
# Getting the error checking routine and exceptions
from django.contrib.gis.gdal.base import GDALBase
from django.contrib.gis.gdal.error import SRSException
from django.contrib.gis.gdal.prototypes import srs as capi
from django.utils import six
from django.utils.encoding import force_bytes, force_text
class SpatialReference(GDALBase):
"""
A wrapper for the OGRSpatialReference object. According to the GDAL Web site,
the SpatialReference object "provide[s] services to represent coordinate
systems (projections and datums) and to transform between them."
"""
def __init__(self, srs_input='', srs_type='user'):
"""
Creates a GDAL OSR Spatial Reference object from the given input.
The input may be string of OGC Well Known Text (WKT), an integer
EPSG code, a PROJ.4 string, and/or a projection "well known" shorthand
string (one of 'WGS84', 'WGS72', 'NAD27', 'NAD83').
"""
if srs_type == 'wkt':
self.ptr = capi.new_srs(c_char_p(b''))
self.import_wkt(srs_input)
return
elif isinstance(srs_input, six.string_types):
# Encoding to ASCII if unicode passed in.
if isinstance(srs_input, six.text_type):
srs_input = srs_input.encode('ascii')
try:
# If SRID is a string, e.g., '4326', then make acceptable
# as user input.
srid = int(srs_input)
srs_input = 'EPSG:%d' % srid
except ValueError:
pass
elif isinstance(srs_input, six.integer_types):
# EPSG integer code was input.
srs_type = 'epsg'
elif isinstance(srs_input, self.ptr_type):
srs = srs_input
srs_type = 'ogr'
else:
raise TypeError('Invalid SRS type "%s"' % srs_type)
if srs_type == 'ogr':
# Input is already an SRS pointer.
srs = srs_input
else:
# Creating a new SRS pointer, using the string buffer.
buf = c_char_p(b'')
srs = capi.new_srs(buf)
# If the pointer is NULL, throw an exception.
if not srs:
raise SRSException('Could not create spatial reference from: %s' % srs_input)
else:
self.ptr = srs
# Importing from either the user input string or an integer SRID.
if srs_type == 'user':
self.import_user_input(srs_input)
elif srs_type == 'epsg':
self.import_epsg(srs_input)
def __del__(self):
"Destroys this spatial reference."
if self._ptr and capi:
capi.release_srs(self._ptr)
def __getitem__(self, target):
"""
Returns the value of the given string attribute node, None if the node
doesn't exist. Can also take a tuple as a parameter, (target, child),
where child is the index of the attribute in the WKT. For example:
>>> wkt = 'GEOGCS["WGS 84", DATUM["WGS_1984, ... AUTHORITY["EPSG","4326"]]'
>>> srs = SpatialReference(wkt) # could also use 'WGS84', or 4326
>>> print(srs['GEOGCS'])
WGS 84
>>> print(srs['DATUM'])
WGS_1984
>>> print(srs['AUTHORITY'])
EPSG
>>> print(srs['AUTHORITY', 1]) # The authority value
4326
>>> print(srs['TOWGS84', 4]) # the fourth value in this wkt
0
>>> print(srs['UNIT|AUTHORITY']) # For the units authority, have to use the pipe symbole.
EPSG
>>> print(srs['UNIT|AUTHORITY', 1]) # The authority value for the units
9122
"""
if isinstance(target, tuple):
return self.attr_value(*target)
else:
return self.attr_value(target)
def __str__(self):
"The string representation uses 'pretty' WKT."
return self.pretty_wkt
# #### SpatialReference Methods ####
def attr_value(self, target, index=0):
"""
The attribute value for the given target node (e.g. 'PROJCS'). The index
keyword specifies an index of the child node to return.
"""
if not isinstance(target, six.string_types) or not isinstance(index, int):
raise TypeError
return capi.get_attr_value(self.ptr, force_bytes(target), index)
def auth_name(self, target):
"Returns the authority name for the given string target node."
return capi.get_auth_name(self.ptr, force_bytes(target))
def auth_code(self, target):
"Returns the authority code for the given string target node."
return capi.get_auth_code(self.ptr, force_bytes(target))
def clone(self):
"Returns a clone of this SpatialReference object."
return SpatialReference(capi.clone_srs(self.ptr))
def from_esri(self):
"Morphs this SpatialReference from ESRI's format to EPSG."
capi.morph_from_esri(self.ptr)
def identify_epsg(self):
"""
This method inspects the WKT of this SpatialReference, and will
add EPSG authority nodes where an EPSG identifier is applicable.
"""
capi.identify_epsg(self.ptr)
def to_esri(self):
"Morphs this SpatialReference to ESRI's format."
capi.morph_to_esri(self.ptr)
def validate(self):
"Checks to see if the given spatial reference is valid."
capi.srs_validate(self.ptr)
# #### Name & SRID properties ####
@property
def name(self):
"Returns the name of this Spatial Reference."
if self.projected:
return self.attr_value('PROJCS')
elif self.geographic:
return self.attr_value('GEOGCS')
elif self.local:
return self.attr_value('LOCAL_CS')
else:
return None
@property
def srid(self):
"Returns the SRID of top-level authority, or None if undefined."
try:
return int(self.attr_value('AUTHORITY', 1))
except (TypeError, ValueError):
return None
# #### Unit Properties ####
@property
def linear_name(self):
"Returns the name of the linear units."
units, name = capi.linear_units(self.ptr, byref(c_char_p()))
return name
@property
def linear_units(self):
"Returns the value of the linear units."
units, name = capi.linear_units(self.ptr, byref(c_char_p()))
return units
@property
def angular_name(self):
"Returns the name of the angular units."
units, name = capi.angular_units(self.ptr, byref(c_char_p()))
return name
@property
def angular_units(self):
"Returns the value of the angular units."
units, name = capi.angular_units(self.ptr, byref(c_char_p()))
return units
@property
def units(self):
"""
Returns a 2-tuple of the units value and the units name,
and will automatically determines whether to return the linear
or angular units.
"""
units, name = None, None
if self.projected or self.local:
units, name = capi.linear_units(self.ptr, byref(c_char_p()))
elif self.geographic:
units, name = capi.angular_units(self.ptr, byref(c_char_p()))
if name is not None:
name = force_text(name)
return (units, name)
# #### Spheroid/Ellipsoid Properties ####
@property
def ellipsoid(self):
"""
Returns a tuple of the ellipsoid parameters:
(semimajor axis, semiminor axis, and inverse flattening)
"""
return (self.semi_major, self.semi_minor, self.inverse_flattening)
@property
def semi_major(self):
"Returns the Semi Major Axis for this Spatial Reference."
return capi.semi_major(self.ptr, byref(c_int()))
@property
def semi_minor(self):
"Returns the Semi Minor Axis for this Spatial Reference."
return capi.semi_minor(self.ptr, byref(c_int()))
@property
def inverse_flattening(self):
"Returns the Inverse Flattening for this Spatial Reference."
return capi.invflattening(self.ptr, byref(c_int()))
# #### Boolean Properties ####
@property
def geographic(self):
"""
Returns True if this SpatialReference is geographic
(root node is GEOGCS).
"""
return bool(capi.isgeographic(self.ptr))
@property
def local(self):
"Returns True if this SpatialReference is local (root node is LOCAL_CS)."
return bool(capi.islocal(self.ptr))
@property
def projected(self):
"""
Returns True if this SpatialReference is a projected coordinate system
(root node is PROJCS).
"""
return bool(capi.isprojected(self.ptr))
# #### Import Routines #####
def import_epsg(self, epsg):
"Imports the Spatial Reference from the EPSG code (an integer)."
capi.from_epsg(self.ptr, epsg)
def import_proj(self, proj):
"Imports the Spatial Reference from a PROJ.4 string."
capi.from_proj(self.ptr, proj)
def import_user_input(self, user_input):
"Imports the Spatial Reference from the given user input string."
capi.from_user_input(self.ptr, force_bytes(user_input))
def import_wkt(self, wkt):
"Imports the Spatial Reference from OGC WKT (string)"
capi.from_wkt(self.ptr, byref(c_char_p(wkt)))
def import_xml(self, xml):
"Imports the Spatial Reference from an XML string."
capi.from_xml(self.ptr, xml)
# #### Export Properties ####
@property
def wkt(self):
"Returns the WKT representation of this Spatial Reference."
return capi.to_wkt(self.ptr, byref(c_char_p()))
@property
def pretty_wkt(self, simplify=0):
"Returns the 'pretty' representation of the WKT."
return capi.to_pretty_wkt(self.ptr, byref(c_char_p()), simplify)
@property
def proj(self):
"Returns the PROJ.4 representation for this Spatial Reference."
return capi.to_proj(self.ptr, byref(c_char_p()))
@property
def proj4(self):
"Alias for proj()."
return self.proj
@property
def xml(self, dialect=''):
"Returns the XML representation of this Spatial Reference."
return capi.to_xml(self.ptr, byref(c_char_p()), dialect)
class CoordTransform(GDALBase):
"The coordinate system transformation object."
def __init__(self, source, target):
"Initializes on a source and target SpatialReference objects."
if not isinstance(source, SpatialReference) or not isinstance(target, SpatialReference):
raise TypeError('source and target must be of type SpatialReference')
self.ptr = capi.new_ct(source._ptr, target._ptr)
self._srs1_name = source.name
self._srs2_name = target.name
def __del__(self):
"Deletes this Coordinate Transformation object."
if self._ptr and capi:
capi.destroy_ct(self._ptr)
def __str__(self):
return 'Transform from "%s" to "%s"' % (self._srs1_name, self._srs2_name)
| bsd-3-clause |
myarjunar/QGIS | python/plugins/processing/gui/ExtentSelectionPanel.py | 1 | 5956 | # -*- coding: utf-8 -*-
"""
***************************************************************************
ExtentSelectionPanel.py
---------------------
Date : August 2012
Copyright : (C) 2012 by Victor Olaya
Email : volayaf at gmail dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
from builtins import str
__author__ = 'Victor Olaya'
__date__ = 'August 2012'
__copyright__ = '(C) 2012, Victor Olaya'
# This will get replaced with a git SHA1 when you do a git archive
__revision__ = '$Format:%H$'
import os
from qgis.PyQt import uic
from qgis.PyQt.QtWidgets import QMenu, QAction, QInputDialog
from qgis.PyQt.QtGui import QCursor
from qgis.gui import QgsMessageBar
from qgis.utils import iface
from processing.gui.RectangleMapTool import RectangleMapTool
from processing.core.ProcessingConfig import ProcessingConfig
from processing.tools import dataobjects
pluginPath = os.path.split(os.path.dirname(__file__))[0]
WIDGET, BASE = uic.loadUiType(
os.path.join(pluginPath, 'ui', 'widgetBaseSelector.ui'))
class ExtentSelectionPanel(BASE, WIDGET):
def __init__(self, dialog, param):
super(ExtentSelectionPanel, self).__init__(None)
self.setupUi(self)
self.dialog = dialog
self.param = param
if self.param.optional:
if hasattr(self.leText, 'setPlaceholderText'):
self.leText.setPlaceholderText(
self.tr('[Leave blank to use min covering extent]'))
self.btnSelect.clicked.connect(self.selectExtent)
canvas = iface.mapCanvas()
self.prevMapTool = canvas.mapTool()
self.tool = RectangleMapTool(canvas)
self.tool.rectangleCreated.connect(self.updateExtent)
if param.default:
tokens = param.default.split(',')
if len(tokens) == 4:
try:
float(tokens[0])
float(tokens[1])
float(tokens[2])
float(tokens[3])
self.leText.setText(param.default)
except:
pass
def selectExtent(self):
popupmenu = QMenu()
useLayerExtentAction = QAction(
self.tr('Use layer/canvas extent'), self.btnSelect)
selectOnCanvasAction = QAction(
self.tr('Select extent on canvas'), self.btnSelect)
popupmenu.addAction(useLayerExtentAction)
popupmenu.addAction(selectOnCanvasAction)
selectOnCanvasAction.triggered.connect(self.selectOnCanvas)
useLayerExtentAction.triggered.connect(self.useLayerExtent)
if self.param.optional:
useMincoveringExtentAction = QAction(
self.tr('Use min covering extent from input layers'),
self.btnSelect)
useMincoveringExtentAction.triggered.connect(
self.useMinCoveringExtent)
popupmenu.addAction(useMincoveringExtentAction)
popupmenu.exec_(QCursor.pos())
def useMinCoveringExtent(self):
self.leText.setText('')
def useLayerExtent(self):
CANVAS_KEY = 'Use canvas extent'
extentsDict = {}
extentsDict[CANVAS_KEY] = {"extent": iface.mapCanvas().extent(),
"authid": iface.mapCanvas().mapSettings().destinationCrs().authid()}
extents = [CANVAS_KEY]
layers = dataobjects.getAllLayers()
for layer in layers:
authid = layer.crs().authid()
if ProcessingConfig.getSetting(ProcessingConfig.SHOW_CRS_DEF) \
and authid is not None:
layerName = u'{} [{}]'.format(layer.name(), authid)
else:
layerName = layer.name()
extents.append(layerName)
extentsDict[layerName] = {"extent": layer.extent(), "authid": authid}
(item, ok) = QInputDialog.getItem(self, self.tr('Select extent'),
self.tr('Use extent from'), extents, False)
if ok:
self.setValueFromRect(extentsDict[item]["extent"])
if extentsDict[item]["authid"] != iface.mapCanvas().mapSettings().destinationCrs().authid():
iface.messageBar().pushMessage(self.tr("Warning"),
self.tr("The projection of the chosen layer is not the same as canvas projection! The selected extent might not be what was intended."),
QgsMessageBar.WARNING, 8)
def selectOnCanvas(self):
canvas = iface.mapCanvas()
canvas.setMapTool(self.tool)
self.dialog.showMinimized()
def updateExtent(self):
r = self.tool.rectangle()
self.setValueFromRect(r)
def setValueFromRect(self, r):
s = '{},{},{},{}'.format(
r.xMinimum(), r.xMaximum(), r.yMinimum(), r.yMaximum())
self.leText.setText(s)
self.tool.reset()
canvas = iface.mapCanvas()
canvas.setMapTool(self.prevMapTool)
self.dialog.showNormal()
self.dialog.raise_()
self.dialog.activateWindow()
def getValue(self):
if str(self.leText.text()).strip() != '':
return str(self.leText.text())
else:
return None
def setExtentFromString(self, s):
self.leText.setText(s)
| gpl-2.0 |
mrjaydee82/SinLessKernel-4.4.4 | toolchains/491/share/gdb/python/gdb/FrameIterator.py | 126 | 1577 | # Copyright (C) 2013-2014 Free Software Foundation, Inc.
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import gdb
import itertools
class FrameIterator(object):
"""A gdb.Frame iterator. Iterates over gdb.Frames or objects that
conform to that interface."""
def __init__(self, frame_obj):
"""Initialize a FrameIterator.
Arguments:
frame_obj the starting frame."""
super(FrameIterator, self).__init__()
self.frame = frame_obj
def __iter__(self):
return self
def next(self):
"""next implementation.
Returns:
The next oldest frame."""
result = self.frame
if result is None:
raise StopIteration
self.frame = result.older()
return result
# Python 3.x requires __next__(self) while Python 2.x requires
# next(self). Define next(self), and for Python 3.x create this
# wrapper.
def __next__(self):
return self.next()
| gpl-2.0 |
waterponey/scikit-learn | sklearn/datasets/tests/test_base.py | 13 | 8907 | import os
import shutil
import tempfile
import warnings
import numpy
from pickle import loads
from pickle import dumps
from sklearn.datasets import get_data_home
from sklearn.datasets import clear_data_home
from sklearn.datasets import load_files
from sklearn.datasets import load_sample_images
from sklearn.datasets import load_sample_image
from sklearn.datasets import load_digits
from sklearn.datasets import load_diabetes
from sklearn.datasets import load_linnerud
from sklearn.datasets import load_iris
from sklearn.datasets import load_breast_cancer
from sklearn.datasets import load_boston
from sklearn.datasets.base import Bunch
from sklearn.externals.six import b, u
from sklearn.utils.testing import assert_false
from sklearn.utils.testing import assert_true
from sklearn.utils.testing import assert_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import with_setup
DATA_HOME = tempfile.mkdtemp(prefix="scikit_learn_data_home_test_")
LOAD_FILES_ROOT = tempfile.mkdtemp(prefix="scikit_learn_load_files_test_")
TEST_CATEGORY_DIR1 = ""
TEST_CATEGORY_DIR2 = ""
def _remove_dir(path):
if os.path.isdir(path):
shutil.rmtree(path)
def teardown_module():
"""Test fixture (clean up) run once after all tests of this module"""
for path in [DATA_HOME, LOAD_FILES_ROOT]:
_remove_dir(path)
def setup_load_files():
global TEST_CATEGORY_DIR1
global TEST_CATEGORY_DIR2
TEST_CATEGORY_DIR1 = tempfile.mkdtemp(dir=LOAD_FILES_ROOT)
TEST_CATEGORY_DIR2 = tempfile.mkdtemp(dir=LOAD_FILES_ROOT)
sample_file = tempfile.NamedTemporaryFile(dir=TEST_CATEGORY_DIR1,
delete=False)
sample_file.write(b("Hello World!\n"))
sample_file.close()
def teardown_load_files():
_remove_dir(TEST_CATEGORY_DIR1)
_remove_dir(TEST_CATEGORY_DIR2)
def test_data_home():
# get_data_home will point to a pre-existing folder
data_home = get_data_home(data_home=DATA_HOME)
assert_equal(data_home, DATA_HOME)
assert_true(os.path.exists(data_home))
# clear_data_home will delete both the content and the folder it-self
clear_data_home(data_home=data_home)
assert_false(os.path.exists(data_home))
# if the folder is missing it will be created again
data_home = get_data_home(data_home=DATA_HOME)
assert_true(os.path.exists(data_home))
def test_default_empty_load_files():
res = load_files(LOAD_FILES_ROOT)
assert_equal(len(res.filenames), 0)
assert_equal(len(res.target_names), 0)
assert_equal(res.DESCR, None)
@with_setup(setup_load_files, teardown_load_files)
def test_default_load_files():
res = load_files(LOAD_FILES_ROOT)
assert_equal(len(res.filenames), 1)
assert_equal(len(res.target_names), 2)
assert_equal(res.DESCR, None)
assert_equal(res.data, [b("Hello World!\n")])
@with_setup(setup_load_files, teardown_load_files)
def test_load_files_w_categories_desc_and_encoding():
category = os.path.abspath(TEST_CATEGORY_DIR1).split('/').pop()
res = load_files(LOAD_FILES_ROOT, description="test",
categories=category, encoding="utf-8")
assert_equal(len(res.filenames), 1)
assert_equal(len(res.target_names), 1)
assert_equal(res.DESCR, "test")
assert_equal(res.data, [u("Hello World!\n")])
@with_setup(setup_load_files, teardown_load_files)
def test_load_files_wo_load_content():
res = load_files(LOAD_FILES_ROOT, load_content=False)
assert_equal(len(res.filenames), 1)
assert_equal(len(res.target_names), 2)
assert_equal(res.DESCR, None)
assert_equal(res.get('data'), None)
def test_load_sample_images():
try:
res = load_sample_images()
assert_equal(len(res.images), 2)
assert_equal(len(res.filenames), 2)
assert_true(res.DESCR)
except ImportError:
warnings.warn("Could not load sample images, PIL is not available.")
def test_load_digits():
digits = load_digits()
assert_equal(digits.data.shape, (1797, 64))
assert_equal(numpy.unique(digits.target).size, 10)
# test return_X_y option
X_y_tuple = load_digits(return_X_y=True)
bunch = load_digits()
assert_true(isinstance(X_y_tuple, tuple))
assert_array_equal(X_y_tuple[0], bunch.data)
assert_array_equal(X_y_tuple[1], bunch.target)
def test_load_digits_n_class_lt_10():
digits = load_digits(9)
assert_equal(digits.data.shape, (1617, 64))
assert_equal(numpy.unique(digits.target).size, 9)
def test_load_sample_image():
try:
china = load_sample_image('china.jpg')
assert_equal(china.dtype, 'uint8')
assert_equal(china.shape, (427, 640, 3))
except ImportError:
warnings.warn("Could not load sample images, PIL is not available.")
def test_load_missing_sample_image_error():
have_PIL = True
try:
try:
from scipy.misc import imread
except ImportError:
from scipy.misc.pilutil import imread
except ImportError:
have_PIL = False
if have_PIL:
assert_raises(AttributeError, load_sample_image,
'blop.jpg')
else:
warnings.warn("Could not load sample images, PIL is not available.")
def test_load_diabetes():
res = load_diabetes()
assert_equal(res.data.shape, (442, 10))
assert_true(res.target.size, 442)
assert_equal(len(res.feature_names), 10)
# test return_X_y option
X_y_tuple = load_diabetes(return_X_y=True)
bunch = load_diabetes()
assert_true(isinstance(X_y_tuple, tuple))
assert_array_equal(X_y_tuple[0], bunch.data)
assert_array_equal(X_y_tuple[1], bunch.target)
def test_load_linnerud():
res = load_linnerud()
assert_equal(res.data.shape, (20, 3))
assert_equal(res.target.shape, (20, 3))
assert_equal(len(res.target_names), 3)
assert_true(res.DESCR)
# test return_X_y option
X_y_tuple = load_linnerud(return_X_y=True)
bunch = load_linnerud()
assert_true(isinstance(X_y_tuple, tuple))
assert_array_equal(X_y_tuple[0], bunch.data)
assert_array_equal(X_y_tuple[1], bunch.target)
def test_load_iris():
res = load_iris()
assert_equal(res.data.shape, (150, 4))
assert_equal(res.target.size, 150)
assert_equal(res.target_names.size, 3)
assert_true(res.DESCR)
# test return_X_y option
X_y_tuple = load_iris(return_X_y=True)
bunch = load_iris()
assert_true(isinstance(X_y_tuple, tuple))
assert_array_equal(X_y_tuple[0], bunch.data)
assert_array_equal(X_y_tuple[1], bunch.target)
def test_load_breast_cancer():
res = load_breast_cancer()
assert_equal(res.data.shape, (569, 30))
assert_equal(res.target.size, 569)
assert_equal(res.target_names.size, 2)
assert_true(res.DESCR)
# test return_X_y option
X_y_tuple = load_breast_cancer(return_X_y=True)
bunch = load_breast_cancer()
assert_true(isinstance(X_y_tuple, tuple))
assert_array_equal(X_y_tuple[0], bunch.data)
assert_array_equal(X_y_tuple[1], bunch.target)
def test_load_boston():
res = load_boston()
assert_equal(res.data.shape, (506, 13))
assert_equal(res.target.size, 506)
assert_equal(res.feature_names.size, 13)
assert_true(res.DESCR)
# test return_X_y option
X_y_tuple = load_boston(return_X_y=True)
bunch = load_boston()
assert_true(isinstance(X_y_tuple, tuple))
assert_array_equal(X_y_tuple[0], bunch.data)
assert_array_equal(X_y_tuple[1], bunch.target)
def test_loads_dumps_bunch():
bunch = Bunch(x="x")
bunch_from_pkl = loads(dumps(bunch))
bunch_from_pkl.x = "y"
assert_equal(bunch_from_pkl['x'], bunch_from_pkl.x)
def test_bunch_pickle_generated_with_0_16_and_read_with_0_17():
bunch = Bunch(key='original')
# This reproduces a problem when Bunch pickles have been created
# with scikit-learn 0.16 and are read with 0.17. Basically there
# is a suprising behaviour because reading bunch.key uses
# bunch.__dict__ (which is non empty for 0.16 Bunch objects)
# whereas assigning into bunch.key uses bunch.__setattr__. See
# https://github.com/scikit-learn/scikit-learn/issues/6196 for
# more details
bunch.__dict__['key'] = 'set from __dict__'
bunch_from_pkl = loads(dumps(bunch))
# After loading from pickle the __dict__ should have been ignored
assert_equal(bunch_from_pkl.key, 'original')
assert_equal(bunch_from_pkl['key'], 'original')
# Making sure that changing the attr does change the value
# associated with __getitem__ as well
bunch_from_pkl.key = 'changed'
assert_equal(bunch_from_pkl.key, 'changed')
assert_equal(bunch_from_pkl['key'], 'changed')
def test_bunch_dir():
# check that dir (important for autocomplete) shows attributes
data = load_iris()
assert_true("data" in dir(data))
| bsd-3-clause |
bregman-arie/ansible | lib/ansible/modules/network/cnos/cnos_backup.py | 35 | 11398 | #!/usr/bin/python
# -*- coding: utf-8 -*-
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
#
# Copyright (C) 2017 Lenovo, Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
# Module to Backup Config to Lenovo Switches
# Lenovo Networking
#
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: cnos_backup
author: "Dave Kasberg (@dkasberg)"
short_description: Backup the current running or startup configuration to a remote server on devices running Lenovo CNOS
description:
- This module allows you to work with switch configurations. It provides a
way to back up the running or startup configurations of a switch to a
remote server. This is achieved by periodically saving a copy of the
startup or running configuration of the network device to a remote server
using FTP, SFTP, TFTP, or SCP. The first step is to create a directory from
where the remote server can be reached. The next step is to provide the
full file path of the location where the configuration will be backed up.
Authentication details required by the remote server must be provided as
well. This module uses SSH to manage network device configuration.
The results of the operation will be placed in a directory named 'results'
that must be created by the user in their local directory to where the playbook is run.
For more information about this module from Lenovo and customizing it usage for your
use cases, please visit U(http://systemx.lenovofiles.com/help/index.jsp?topic=%2Fcom.lenovo.switchmgt.ansible.doc%2Fcnos_backup.html)
version_added: "2.3"
extends_documentation_fragment: cnos
options:
configType:
description:
- This specifies what type of configuration will be backed up. The
choices are the running or startup configurations. There is no
default value, so it will result in an error if the input is
incorrect.
required: Yes
default: Null
choices: [running-config, startup-config]
protocol:
description:
- This refers to the protocol used by the network device to
interact with the remote server to where to upload the backup
configuration. The choices are FTP, SFTP, TFTP, or SCP. Any other
protocols will result in error. If this parameter is not specified,
there is no default value to be used.
required: Yes
default: Null
choices: [SFTP, SCP, FTP, TFTP]
rcserverip:
description:
-This specifies the IP Address of the remote server to where the
configuration will be backed up.
required: Yes
default: Null
rcpath:
description:
- This specifies the full file path where the configuration file
will be copied on the remote server. In case the relative path is
used as the variable value, the root folder for the user of the
server needs to be specified.
required: Yes
default: Null
serverusername:
description:
- Specify the username for the server relating to the protocol
used.
required: Yes
default: Null
serverpassword:
description:
- Specify the password for the server relating to the protocol
used.
required: Yes
default: Null
'''
EXAMPLES = '''
Tasks : The following are examples of using the module cnos_backup. These are written in the main.yml file of the tasks directory.
---
- name: Test Running Config Backup
cnos_backup:
host: "{{ inventory_hostname }}"
username: "{{ hostvars[inventory_hostname]['username'] }}"
password: "{{ hostvars[inventory_hostname]['password'] }}"
deviceType: "{{ hostvars[inventory_hostname]['deviceType'] }}"
enablePassword: "{{ hostvars[inventory_hostname]['enablePassword'] }}"
outputfile: "./results/test_backup_{{ inventory_hostname }}_output.txt"
configType: running-config
protocol: "sftp"
serverip: "10.241.106.118"
rcpath: "/root/cnos/G8272-running-config.txt"
serverusername: "root"
serverpassword: "root123"
- name: Test Startup Config Backup
cnos_backup:
host: "{{ inventory_hostname }}"
username: "{{ hostvars[inventory_hostname]['username'] }}"
password: "{{ hostvars[inventory_hostname]['password'] }}"
deviceType: "{{ hostvars[inventory_hostname]['deviceType'] }}"
enablePassword: "{{ hostvars[inventory_hostname]['enablePassword'] }}"
outputfile: "./results/test_backup_{{ inventory_hostname }}_output.txt"
configType: startup-config
protocol: "sftp"
serverip: "10.241.106.118"
rcpath: "/root/cnos/G8272-startup-config.txt"
serverusername: "root"
serverpassword: "root123"
- name: Test Running Config Backup -TFTP
cnos_backup:
host: "{{ inventory_hostname }}"
username: "{{ hostvars[inventory_hostname]['username'] }}"
password: "{{ hostvars[inventory_hostname]['password'] }}"
deviceType: "{{ hostvars[inventory_hostname]['deviceType'] }}"
enablePassword: "{{ hostvars[inventory_hostname]['enablePassword'] }}"
outputfile: "./results/test_backup_{{ inventory_hostname }}_output.txt"
configType: running-config
protocol: "tftp"
serverip: "10.241.106.118"
rcpath: "/anil/G8272-running-config.txt"
serverusername: "root"
serverpassword: "root123"
- name: Test Startup Config Backup - TFTP
cnos_backup:
host: "{{ inventory_hostname }}"
username: "{{ hostvars[inventory_hostname]['username'] }}"
password: "{{ hostvars[inventory_hostname]['password'] }}"
deviceType: "{{ hostvars[inventory_hostname]['deviceType'] }}"
enablePassword: "{{ hostvars[inventory_hostname]['enablePassword'] }}"
outputfile: "./results/test_backup_{{ inventory_hostname }}_output.txt"
configType: startup-config
protocol: "tftp"
serverip: "10.241.106.118"
rcpath: "/anil/G8272-startup-config.txt"
serverusername: "root"
serverpassword: "root123"
'''
RETURN = '''
msg:
description: Success or failure message
returned: always
type: string
sample: "Config file tranferred to server"
'''
import sys
try:
import paramiko
HAS_PARAMIKO = True
except ImportError:
HAS_PARAMIKO = False
import time
import socket
import array
import json
import time
import re
try:
from ansible.module_utils.network.cnos import cnos
HAS_LIB = True
except:
HAS_LIB = False
from ansible.module_utils.basic import AnsibleModule
from collections import defaultdict
def main():
module = AnsibleModule(
argument_spec=dict(
outputfile=dict(required=True),
host=dict(required=True),
username=dict(required=True),
password=dict(required=True, no_log=True),
enablePassword=dict(required=False, no_log=True),
deviceType=dict(required=True),
configType=dict(required=True),
protocol=dict(required=True),
serverip=dict(required=True),
rcpath=dict(required=True),
serverusername=dict(required=False),
serverpassword=dict(required=False, no_log=True),),
supports_check_mode=False)
username = module.params['username']
password = module.params['password']
enablePassword = module.params['enablePassword']
outputfile = module.params['outputfile']
host = module.params['host']
deviceType = module.params['deviceType']
configType = module.params['configType']
protocol = module.params['protocol'].lower()
rcserverip = module.params['serverip']
rcpath = module.params['rcpath']
serveruser = module.params['serverusername']
serverpwd = module.params['serverpassword']
output = ""
timeout = 90
tftptimeout = 450
if not HAS_PARAMIKO:
module.fail_json(msg='paramiko is required for this module')
# Create instance of SSHClient object
remote_conn_pre = paramiko.SSHClient()
# Automatically add untrusted hosts (make sure okay for security policy in
# your environment)
remote_conn_pre.set_missing_host_key_policy(paramiko.AutoAddPolicy())
# initiate SSH connection with the switch
remote_conn_pre.connect(host, username=username, password=password)
time.sleep(2)
# Use invoke_shell to establish an 'interactive session'
remote_conn = remote_conn_pre.invoke_shell()
time.sleep(2)
#
# Enable and enter configure terminal then send command
output = output + cnos.waitForDeviceResponse("\n", ">", 2, remote_conn)
output = output + \
cnos.enterEnableModeForDevice(enablePassword, 3, remote_conn)
# Make terminal length = 0
output = output + \
cnos.waitForDeviceResponse("terminal length 0\n", "#", 2, remote_conn)
# Invoke method for config transfer from server
if(configType == 'running-config'):
if(protocol == "tftp" or protocol == "ftp"):
transfer_status = cnos.doRunningConfigBackUp(
protocol, tftptimeout, rcserverip, rcpath, serveruser,
serverpwd, remote_conn)
elif(protocol == "sftp" or protocol == "scp"):
transfer_status = cnos.doSecureRunningConfigBackUp(
protocol, timeout, rcserverip, rcpath, serveruser,
serverpwd, remote_conn)
else:
transfer_status = "Invalid Protocol option"
elif(configType == 'startup-config'):
if(protocol == "tftp" or protocol == "ftp"):
transfer_status = cnos.doStartupConfigBackUp(
protocol, tftptimeout, rcserverip, rcpath, serveruser,
serverpwd, remote_conn)
elif(protocol == "sftp" or protocol == "scp"):
transfer_status = cnos.doSecureStartupConfigBackUp(
protocol, timeout, rcserverip, rcpath, serveruser, serverpwd,
remote_conn)
else:
transfer_status = "Invalid Protocol option"
else:
transfer_status = "Invalid configType Option"
output = output + "\n Config Back Up status \n" + transfer_status
# Save it into the file
file = open(outputfile, "a")
file.write(output)
file.close()
# Logic to check when changes occur or not
errorMsg = cnos.checkOutputForError(output)
if(errorMsg is None):
module.exit_json(changed=True, msg="Config file tranferred to server")
else:
module.fail_json(msg=errorMsg)
if __name__ == '__main__':
main()
| gpl-3.0 |
Bismarrck/tensorflow | tensorflow/contrib/kernel_methods/python/kernel_estimators.py | 10 | 13797 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Estimators that combine explicit kernel mappings with linear models."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import six
from tensorflow.contrib import layers
from tensorflow.contrib.kernel_methods.python.mappers import dense_kernel_mapper as dkm
from tensorflow.contrib.learn.python.learn.estimators import estimator
from tensorflow.contrib.learn.python.learn.estimators import head as head_lib
from tensorflow.contrib.learn.python.learn.estimators import linear
from tensorflow.contrib.learn.python.learn.estimators import prediction_key
from tensorflow.python.ops import array_ops
from tensorflow.python.platform import tf_logging as logging
_FEATURE_COLUMNS = "feature_columns"
_KERNEL_MAPPERS = "kernel_mappers"
_OPTIMIZER = "optimizer"
def _check_valid_kernel_mappers(kernel_mappers):
"""Checks that the input kernel_mappers are valid."""
if kernel_mappers is None:
return True
for kernel_mappers_list in six.itervalues(kernel_mappers):
for kernel_mapper in kernel_mappers_list:
if not isinstance(kernel_mapper, dkm.DenseKernelMapper):
return False
return True
def _check_valid_head(head):
"""Returns true if the provided head is supported."""
if head is None:
return False
# pylint: disable=protected-access
return isinstance(head, head_lib._BinaryLogisticHead) or isinstance(
head, head_lib._MultiClassHead)
# pylint: enable=protected-access
def _update_features_and_columns(features, feature_columns,
kernel_mappers_dict):
"""Updates features and feature_columns based on provided kernel mappers.
Currently supports the update of `RealValuedColumn`s only.
Args:
features: Initial features dict. The key is a `string` (feature column name)
and the value is a tensor.
feature_columns: Initial iterable containing all the feature columns to be
consumed (possibly after being updated) by the model. All items should be
instances of classes derived from `FeatureColumn`.
kernel_mappers_dict: A dict from feature column (type: _FeatureColumn) to
objects inheriting from KernelMapper class.
Returns:
updated features and feature_columns based on provided kernel_mappers_dict.
"""
if kernel_mappers_dict is None:
return features, feature_columns
# First construct new columns and features affected by kernel_mappers_dict.
mapped_features = dict()
mapped_columns = set()
for feature_column in kernel_mappers_dict:
column_name = feature_column.name
# Currently only mappings over RealValuedColumns are supported.
if not isinstance(feature_column, layers.feature_column._RealValuedColumn): # pylint: disable=protected-access
logging.warning(
"Updates are currently supported on RealValuedColumns only. Metadata "
"for FeatureColumn {} will not be updated.".format(column_name))
continue
mapped_column_name = column_name + "_MAPPED"
# Construct new feature columns based on provided kernel_mappers.
column_kernel_mappers = kernel_mappers_dict[feature_column]
new_dim = sum(mapper.output_dim for mapper in column_kernel_mappers)
mapped_columns.add(
layers.feature_column.real_valued_column(mapped_column_name, new_dim))
# Get mapped features by concatenating mapped tensors (one mapped tensor
# per kernel mappers from the list of kernel mappers corresponding to each
# feature column).
output_tensors = []
for kernel_mapper in column_kernel_mappers:
output_tensors.append(kernel_mapper.map(features[column_name]))
tensor = array_ops.concat(output_tensors, 1)
mapped_features[mapped_column_name] = tensor
# Finally update features dict and feature_columns.
features = features.copy()
features.update(mapped_features)
feature_columns = set(feature_columns)
feature_columns.update(mapped_columns)
return features, feature_columns
def _kernel_model_fn(features, labels, mode, params, config=None):
"""model_fn for the Estimator using kernel methods.
Args:
features: `Tensor` or dict of `Tensor` (depends on data passed to `fit`).
labels: `Tensor` of shape [batch_size, 1] or [batch_size] labels of
dtype `int32` or `int64` in the range `[0, n_classes)`.
mode: Defines whether this is training, evaluation or prediction. See
`ModeKeys`.
params: A dict of hyperparameters.
The following hyperparameters are expected:
* head: A `Head` instance.
* feature_columns: An iterable containing all the feature columns used by
the model.
* optimizer: string, `Optimizer` object, or callable that defines the
optimizer to use for training. If `None`, will use a FTRL optimizer.
* kernel_mappers: Dictionary of kernel mappers to be applied to the input
features before training.
config: `RunConfig` object to configure the runtime settings.
Returns:
A `ModelFnOps` instance.
Raises:
ValueError: If mode is not any of the `ModeKeys`.
"""
feature_columns = params[_FEATURE_COLUMNS]
kernel_mappers = params[_KERNEL_MAPPERS]
updated_features, updated_columns = _update_features_and_columns(
features, feature_columns, kernel_mappers)
params[_FEATURE_COLUMNS] = updated_columns
return linear._linear_model_fn( # pylint: disable=protected-access
updated_features, labels, mode, params, config)
class _KernelEstimator(estimator.Estimator):
"""Generic kernel-based linear estimator."""
def __init__(self,
feature_columns=None,
model_dir=None,
weight_column_name=None,
head=None,
optimizer=None,
kernel_mappers=None,
config=None):
"""Constructs a `_KernelEstimator` object."""
if not feature_columns and not kernel_mappers:
raise ValueError(
"You should set at least one of feature_columns, kernel_mappers.")
if not _check_valid_kernel_mappers(kernel_mappers):
raise ValueError("Invalid kernel mappers.")
if not _check_valid_head(head):
raise ValueError(
"head type: {} is not supported. Supported head types: "
"_BinaryLogisticHead, _MultiClassHead.".format(type(head)))
params = {
"head": head,
_FEATURE_COLUMNS: feature_columns or [],
_OPTIMIZER: optimizer,
_KERNEL_MAPPERS: kernel_mappers,
}
super(_KernelEstimator, self).__init__(
model_fn=_kernel_model_fn,
model_dir=model_dir,
config=config,
params=params)
class KernelLinearClassifier(_KernelEstimator):
"""Linear classifier using kernel methods as feature preprocessing.
It trains a linear model after possibly mapping initial input features into
a mapped space using explicit kernel mappings. Due to the kernel mappings,
training a linear classifier in the mapped (output) space can detect
non-linearities in the input space.
The user can provide a list of kernel mappers to be applied to all or a subset
of existing feature_columns. This way, the user can effectively provide 2
types of feature columns:
* those passed as elements of feature_columns in the classifier's constructor
* those appearing as a key of the kernel_mappers dict.
If a column appears in feature_columns only, no mapping is applied to it. If
it appears as a key in kernel_mappers, the corresponding kernel mappers are
applied to it. Note that it is possible that a column appears in both places.
Currently kernel_mappers are supported for _RealValuedColumns only.
Example usage:
```
real_column_a = real_valued_column(name='real_column_a',...)
sparse_column_b = sparse_column_with_hash_bucket(...)
kernel_mappers = {real_column_a : [RandomFourierFeatureMapper(...)]}
optimizer = ...
# real_column_a is used as a feature in both its initial and its transformed
# (mapped) form. sparse_column_b is not affected by kernel mappers.
kernel_classifier = KernelLinearClassifier(
feature_columns=[real_column_a, sparse_column_b],
model_dir=...,
optimizer=optimizer,
kernel_mappers=kernel_mappers)
# real_column_a is used as a feature in its transformed (mapped) form only.
# sparse_column_b is not affected by kernel mappers.
kernel_classifier = KernelLinearClassifier(
feature_columns=[sparse_column_b],
model_dir=...,
optimizer=optimizer,
kernel_mappers=kernel_mappers)
# Input builders
def train_input_fn: # returns x, y
...
def eval_input_fn: # returns x, y
...
kernel_classifier.fit(input_fn=train_input_fn)
kernel_classifier.evaluate(input_fn=eval_input_fn)
kernel_classifier.predict(...)
```
Input of `fit` and `evaluate` should have following features, otherwise there
will be a `KeyError`:
* if `weight_column_name` is not `None`, a feature with
`key=weight_column_name` whose value is a `Tensor`.
* for each `column` in `feature_columns`:
- if `column` is a `SparseColumn`, a feature with `key=column.name`
whose `value` is a `SparseTensor`.
- if `column` is a `WeightedSparseColumn`, two features: the first with
`key` the id column name, the second with `key` the weight column name.
Both features' `value` must be a `SparseTensor`.
- if `column` is a `RealValuedColumn`, a feature with `key=column.name`
whose `value` is a `Tensor`.
"""
def __init__(self,
feature_columns=None,
model_dir=None,
n_classes=2,
weight_column_name=None,
optimizer=None,
kernel_mappers=None,
config=None):
"""Construct a `KernelLinearClassifier` estimator object.
Args:
feature_columns: An iterable containing all the feature columns used by
the model. All items in the set should be instances of classes derived
from `FeatureColumn`.
model_dir: Directory to save model parameters, graph etc. This can also be
used to load checkpoints from the directory into an estimator to
continue training a previously saved model.
n_classes: number of label classes. Default is binary classification.
Note that class labels are integers representing the class index (i.e.
values from 0 to n_classes-1). For arbitrary label values (e.g. string
labels), convert to class indices first.
weight_column_name: A string defining feature column name representing
weights. It is used to down weight or boost examples during training. It
will be multiplied by the loss of the example.
optimizer: The optimizer used to train the model. If specified, it should
be an instance of `tf.Optimizer`. If `None`, the Ftrl optimizer is used
by default.
kernel_mappers: Dictionary of kernel mappers to be applied to the input
features before training a (linear) model. Keys are feature columns and
values are lists of mappers to be applied to the corresponding feature
column. Currently only _RealValuedColumns are supported and therefore
all mappers should conform to the `DenseKernelMapper` interface (see
./mappers/dense_kernel_mapper.py).
config: `RunConfig` object to configure the runtime settings.
Returns:
A `KernelLinearClassifier` estimator.
Raises:
ValueError: if n_classes < 2.
ValueError: if neither feature_columns nor kernel_mappers are provided.
ValueError: if mappers provided as kernel_mappers values are invalid.
"""
super(KernelLinearClassifier, self).__init__(
feature_columns=feature_columns,
model_dir=model_dir,
weight_column_name=weight_column_name,
head=head_lib.multi_class_head(
n_classes=n_classes, weight_column_name=weight_column_name),
optimizer=optimizer,
kernel_mappers=kernel_mappers,
config=config)
def predict_classes(self, input_fn=None):
"""Runs inference to determine the predicted class per instance.
Args:
input_fn: The input function providing features.
Returns:
A generator of predicted classes for the features provided by input_fn.
Each predicted class is represented by its class index (i.e. integer from
0 to n_classes-1)
"""
key = prediction_key.PredictionKey.CLASSES
predictions = super(KernelLinearClassifier, self).predict(
input_fn=input_fn, outputs=[key])
return (pred[key] for pred in predictions)
def predict_proba(self, input_fn=None):
"""Runs inference to determine the class probability predictions.
Args:
input_fn: The input function providing features.
Returns:
A generator of predicted class probabilities for the features provided by
input_fn.
"""
key = prediction_key.PredictionKey.PROBABILITIES
predictions = super(KernelLinearClassifier, self).predict(
input_fn=input_fn, outputs=[key])
return (pred[key] for pred in predictions)
| apache-2.0 |
sileht/deb-openstack-keystone | keystone/service.py | 1 | 21260 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 OpenStack LLC
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import uuid
import routes
from keystone import catalog
from keystone import exception
from keystone import identity
from keystone import policy
from keystone import token
from keystone.common import logging
from keystone.common import utils
from keystone.common import wsgi
class AdminRouter(wsgi.ComposingRouter):
def __init__(self):
mapper = routes.Mapper()
version_controller = VersionController('admin')
mapper.connect('/',
controller=version_controller,
action='get_version')
# Token Operations
auth_controller = TokenController()
mapper.connect('/tokens',
controller=auth_controller,
action='authenticate',
conditions=dict(method=['POST']))
mapper.connect('/tokens/{token_id}',
controller=auth_controller,
action='validate_token',
conditions=dict(method=['GET']))
mapper.connect('/tokens/{token_id}',
controller=auth_controller,
action='validate_token_head',
conditions=dict(method=['HEAD']))
mapper.connect('/tokens/{token_id}',
controller=auth_controller,
action='delete_token',
conditions=dict(method=['DELETE']))
mapper.connect('/tokens/{token_id}/endpoints',
controller=auth_controller,
action='endpoints',
conditions=dict(method=['GET']))
# Miscellaneous Operations
extensions_controller = AdminExtensionsController()
mapper.connect('/extensions',
controller=extensions_controller,
action='get_extensions_info',
conditions=dict(method=['GET']))
mapper.connect('/extensions/{extension_alias}',
controller=extensions_controller,
action='get_extension_info',
conditions=dict(method=['GET']))
identity_router = identity.AdminRouter()
routers = [identity_router]
super(AdminRouter, self).__init__(mapper, routers)
class PublicRouter(wsgi.ComposingRouter):
def __init__(self):
mapper = routes.Mapper()
version_controller = VersionController('public')
mapper.connect('/',
controller=version_controller,
action='get_version')
# Token Operations
auth_controller = TokenController()
mapper.connect('/tokens',
controller=auth_controller,
action='authenticate',
conditions=dict(method=['POST']))
# Miscellaneous
extensions_controller = PublicExtensionsController()
mapper.connect('/extensions',
controller=extensions_controller,
action='get_extensions_info',
conditions=dict(method=['GET']))
mapper.connect('/extensions/{extension_alias}',
controller=extensions_controller,
action='get_extension_info',
conditions=dict(method=['GET']))
identity_router = identity.PublicRouter()
routers = [identity_router]
super(PublicRouter, self).__init__(mapper, routers)
class PublicVersionRouter(wsgi.ComposingRouter):
def __init__(self):
mapper = routes.Mapper()
version_controller = VersionController('public')
mapper.connect('/',
controller=version_controller,
action='get_versions')
routers = []
super(PublicVersionRouter, self).__init__(mapper, routers)
class AdminVersionRouter(wsgi.ComposingRouter):
def __init__(self):
mapper = routes.Mapper()
version_controller = VersionController('admin')
mapper.connect('/',
controller=version_controller,
action='get_versions')
routers = []
super(AdminVersionRouter, self).__init__(mapper, routers)
class VersionController(wsgi.Application):
def __init__(self, version_type):
self.catalog_api = catalog.Manager()
self.url_key = "%sURL" % version_type
super(VersionController, self).__init__()
def _get_identity_url(self, context):
catalog_ref = self.catalog_api.get_catalog(
context=context,
user_id=None,
tenant_id=None)
for region, region_ref in catalog_ref.iteritems():
for service, service_ref in region_ref.iteritems():
if service == 'identity':
return service_ref[self.url_key]
raise exception.NotImplemented()
def _get_versions_list(self, context):
"""The list of versions is dependent on the context."""
identity_url = self._get_identity_url(context)
if not identity_url.endswith('/'):
identity_url = identity_url + '/'
versions = {}
versions['v2.0'] = {
"id": "v2.0",
"status": "beta",
"updated": "2011-11-19T00:00:00Z",
"links": [
{
"rel": "self",
"href": identity_url,
}, {
"rel": "describedby",
"type": "text/html",
"href": "http://docs.openstack.org/api/openstack-"
"identity-service/2.0/content/"
}, {
"rel": "describedby",
"type": "application/pdf",
"href": "http://docs.openstack.org/api/openstack-"
"identity-service/2.0/identity-dev-guide-"
"2.0.pdf"
}
],
"media-types": [
{
"base": "application/json",
"type": "application/vnd.openstack.identity-v2.0"
"+json"
}, {
"base": "application/xml",
"type": "application/vnd.openstack.identity-v2.0"
"+xml"
}
]
}
return versions
def get_versions(self, context):
versions = self._get_versions_list(context)
return wsgi.render_response(status=(300, 'Multiple Choices'), body={
"versions": {
"values": versions.values()
}
})
def get_version(self, context):
versions = self._get_versions_list(context)
return wsgi.render_response(body={
"version": versions['v2.0']
})
class NoopController(wsgi.Application):
def __init__(self):
super(NoopController, self).__init__()
def noop(self, context):
return {}
class TokenController(wsgi.Application):
def __init__(self):
self.catalog_api = catalog.Manager()
self.identity_api = identity.Manager()
self.token_api = token.Manager()
self.policy_api = policy.Manager()
super(TokenController, self).__init__()
def authenticate(self, context, auth=None):
"""Authenticate credentials and return a token.
Accept auth as a dict that looks like::
{
"auth":{
"passwordCredentials":{
"username":"test_user",
"password":"mypass"
},
"tenantName":"customer-x"
}
}
In this case, tenant is optional, if not provided the token will be
considered "unscoped" and can later be used to get a scoped token.
Alternatively, this call accepts auth with only a token and tenant
that will return a token that is scoped to that tenant.
"""
token_id = uuid.uuid4().hex
if 'passwordCredentials' in auth:
username = auth['passwordCredentials'].get('username', '')
password = auth['passwordCredentials'].get('password', '')
tenant_name = auth.get('tenantName', None)
user_id = auth['passwordCredentials'].get('userId', None)
if username:
user_ref = self.identity_api.get_user_by_name(
context=context, user_name=username)
if user_ref:
user_id = user_ref['id']
# more compat
tenant_id = auth.get('tenantId', None)
if tenant_name:
tenant_ref = self.identity_api.get_tenant_by_name(
context=context, tenant_name=tenant_name)
if tenant_ref:
tenant_id = tenant_ref['id']
try:
auth_info = self.identity_api.authenticate(context=context,
user_id=user_id,
password=password,
tenant_id=tenant_id)
(user_ref, tenant_ref, metadata_ref) = auth_info
# If the user is disabled don't allow them to authenticate
if not user_ref.get('enabled', True):
raise exception.Forbidden(message='User has been disabled')
except AssertionError as e:
raise exception.Unauthorized(e.message)
token_ref = self.token_api.create_token(
context, token_id, dict(id=token_id,
user=user_ref,
tenant=tenant_ref,
metadata=metadata_ref))
if tenant_ref:
catalog_ref = self.catalog_api.get_catalog(
context=context,
user_id=user_ref['id'],
tenant_id=tenant_ref['id'],
metadata=metadata_ref)
else:
catalog_ref = {}
elif 'token' in auth:
token = auth['token'].get('id', None)
tenant_name = auth.get('tenantName')
# more compat
if tenant_name:
tenant_ref = self.identity_api.get_tenant_by_name(
context=context, tenant_name=tenant_name)
tenant_id = tenant_ref['id']
else:
tenant_id = auth.get('tenantId', None)
try:
old_token_ref = self.token_api.get_token(context=context,
token_id=token)
except exception.NotFound:
raise exception.Unauthorized()
user_ref = old_token_ref['user']
tenants = self.identity_api.get_tenants_for_user(context,
user_ref['id'])
if tenant_id:
assert tenant_id in tenants
tenant_ref = self.identity_api.get_tenant(context=context,
tenant_id=tenant_id)
if tenant_ref:
metadata_ref = self.identity_api.get_metadata(
context=context,
user_id=user_ref['id'],
tenant_id=tenant_ref['id'])
catalog_ref = self.catalog_api.get_catalog(
context=context,
user_id=user_ref['id'],
tenant_id=tenant_ref['id'],
metadata=metadata_ref)
else:
metadata_ref = {}
catalog_ref = {}
token_ref = self.token_api.create_token(
context, token_id, dict(id=token_id,
user=user_ref,
tenant=tenant_ref,
metadata=metadata_ref))
# TODO(termie): optimize this call at some point and put it into the
# the return for metadata
# fill out the roles in the metadata
roles_ref = []
for role_id in metadata_ref.get('roles', []):
roles_ref.append(self.identity_api.get_role(context, role_id))
logging.debug('TOKEN_REF %s', token_ref)
return self._format_authenticate(token_ref, roles_ref, catalog_ref)
def _get_token_ref(self, context, token_id, belongs_to=None):
"""Returns a token if a valid one exists.
Optionally, limited to a token owned by a specific tenant.
"""
# TODO(termie): this stuff should probably be moved to middleware
self.assert_admin(context)
token_ref = self.token_api.get_token(context=context,
token_id=token_id)
if belongs_to:
assert token_ref['tenant']['id'] == belongs_to
return token_ref
# admin only
def validate_token_head(self, context, token_id):
"""Check that a token is valid.
Optionally, also ensure that it is owned by a specific tenant.
Identical to ``validate_token``, except does not return a response.
"""
belongs_to = context['query_string'].get("belongsTo")
assert self._get_token_ref(context, token_id, belongs_to)
# admin only
def validate_token(self, context, token_id):
"""Check that a token is valid.
Optionally, also ensure that it is owned by a specific tenant.
Returns metadata about the token along any associated roles.
"""
belongs_to = context['query_string'].get("belongsTo")
token_ref = self._get_token_ref(context, token_id, belongs_to)
# TODO(termie): optimize this call at some point and put it into the
# the return for metadata
# fill out the roles in the metadata
metadata_ref = token_ref['metadata']
roles_ref = []
for role_id in metadata_ref.get('roles', []):
roles_ref.append(self.identity_api.get_role(context, role_id))
# Get a service catalog if belongs_to is not none
# This is needed for on-behalf-of requests
catalog_ref = None
if belongs_to is not None:
catalog_ref = self.catalog_api.get_catalog(
context=context,
user_id=token_ref['user']['id'],
tenant_id=token_ref['tenant']['id'],
metadata=metadata_ref)
return self._format_token(token_ref, roles_ref, catalog_ref)
def delete_token(self, context, token_id):
"""Delete a token, effectively invalidating it for authz."""
# TODO(termie): this stuff should probably be moved to middleware
self.assert_admin(context)
self.token_api.delete_token(context=context, token_id=token_id)
def endpoints(self, context, token_id):
"""Return a list of endpoints available to the token."""
raise exception.NotImplemented()
def _format_authenticate(self, token_ref, roles_ref, catalog_ref):
o = self._format_token(token_ref, roles_ref)
o['access']['serviceCatalog'] = self._format_catalog(catalog_ref)
return o
def _format_token(self, token_ref, roles_ref, catalog_ref=None):
user_ref = token_ref['user']
metadata_ref = token_ref['metadata']
expires = token_ref['expires']
if expires is not None:
expires = utils.isotime(expires)
o = {'access': {'token': {'id': token_ref['id'],
'expires': expires,
},
'user': {'id': user_ref['id'],
'name': user_ref['name'],
'username': user_ref['name'],
'roles': roles_ref,
'roles_links': metadata_ref.get('roles_links',
[])
}
}
}
if 'tenant' in token_ref and token_ref['tenant']:
token_ref['tenant']['enabled'] = True
o['access']['token']['tenant'] = token_ref['tenant']
if catalog_ref is not None:
o['access']['serviceCatalog'] = self._format_catalog(catalog_ref)
return o
def _format_catalog(self, catalog_ref):
"""Munge catalogs from internal to output format
Internal catalogs look like:
{$REGION: {
{$SERVICE: {
$key1: $value1,
...
}
}
}
The legacy api wants them to look like
[{'name': $SERVICE[name],
'type': $SERVICE,
'endpoints': [{
'tenantId': $tenant_id,
...
'region': $REGION,
}],
'endpoints_links': [],
}]
"""
if not catalog_ref:
return {}
services = {}
for region, region_ref in catalog_ref.iteritems():
for service, service_ref in region_ref.iteritems():
new_service_ref = services.get(service, {})
new_service_ref['name'] = service_ref.pop('name')
new_service_ref['type'] = service
new_service_ref['endpoints_links'] = []
service_ref['region'] = region
endpoints_ref = new_service_ref.get('endpoints', [])
endpoints_ref.append(service_ref)
new_service_ref['endpoints'] = endpoints_ref
services[service] = new_service_ref
return services.values()
class ExtensionsController(wsgi.Application):
"""Base extensions controller to be extended by public and admin API's."""
def __init__(self, extensions=None):
super(ExtensionsController, self).__init__()
self.extensions = extensions or {}
def get_extensions_info(self, context):
return {'extensions': {'values': self.extensions.values()}}
def get_extension_info(self, context, extension_alias):
try:
return {'extension': self.extensions[extension_alias]}
except KeyError:
raise exception.NotFound(target=extension_alias)
class PublicExtensionsController(ExtensionsController):
pass
class AdminExtensionsController(ExtensionsController):
def __init__(self, *args, **kwargs):
super(AdminExtensionsController, self).__init__(*args, **kwargs)
# TODO(dolph): Extensions should obviously provide this information
# themselves, but hardcoding it here allows us to match
# the API spec in the short term with minimal complexity.
self.extensions['OS-KSADM'] = {
'name': 'Openstack Keystone Admin',
'namespace': 'http://docs.openstack.org/identity/api/ext/'
'OS-KSADM/v1.0',
'alias': 'OS-KSADM',
'updated': '2011-08-19T13:25:27-06:00',
'description': 'Openstack extensions to Keystone v2.0 API '
'enabling Admin Operations.',
'links': [
{
'rel': 'describedby',
# TODO(dolph): link needs to be revised after
# bug 928059 merges
'type': 'text/html',
'href': ('https://github.com/openstack/'
'identity-api'),
}
]
}
@logging.fail_gracefully
def public_app_factory(global_conf, **local_conf):
conf = global_conf.copy()
conf.update(local_conf)
return PublicRouter()
@logging.fail_gracefully
def admin_app_factory(global_conf, **local_conf):
conf = global_conf.copy()
conf.update(local_conf)
return AdminRouter()
@logging.fail_gracefully
def public_version_app_factory(global_conf, **local_conf):
conf = global_conf.copy()
conf.update(local_conf)
return PublicVersionRouter()
@logging.fail_gracefully
def admin_version_app_factory(global_conf, **local_conf):
conf = global_conf.copy()
conf.update(local_conf)
return AdminVersionRouter()
| apache-2.0 |
kevinmel2000/brython | www/src/Lib/sys.py | 2 | 5195 | # hack to return special attributes
from _sys import *
_getframe = Getframe
from javascript import JSObject
has_local_storage=__BRYTHON__.has_local_storage
has_session_storage = __BRYTHON__.has_session_storage
has_json=__BRYTHON__.has_json
brython_debug_mode = __BRYTHON__.debug
argv = ['__main__']
base_exec_prefix = __BRYTHON__.brython_path
base_prefix = __BRYTHON__.brython_path
builtin_module_names=__BRYTHON__.builtin_module_names
byteorder='little'
def exc_info():
exc = __BRYTHON__.current_exception
return (exc.__class__,exc,exc.traceback)
exec_prefix = __BRYTHON__.brython_path
executable = __BRYTHON__.brython_path+'/brython.js'
def exit(i=None):
raise SystemExit('')
class flag_class:
def __init__(self):
self.debug=0
self.inspect=0
self.interactive=0
self.optimize=0
self.dont_write_bytecode=0
self.no_user_site=0
self.no_site=0
self.ignore_environment=0
self.verbose=0
self.bytes_warning=0
self.quiet=0
self.hash_randomization=1
flags=flag_class()
def getfilesystemencoding(*args,**kw):
"""getfilesystemencoding() -> string
Return the encoding used to convert Unicode filenames in
operating system filenames."""
return 'utf-8'
maxsize=2147483647
maxunicode=1114111
# Imported from _sys
# path = __BRYTHON__.path
# #path_hooks = list(JSObject(__BRYTHON__.path_hooks))
# meta_path=__BRYTHON__.meta_path
platform="brython"
prefix = __BRYTHON__.brython_path
version = '.'.join(str(x) for x in __BRYTHON__.version_info[:3])
version += " (default, %s) \n[Javascript 1.5] on Brython" % __BRYTHON__.compiled_date
hexversion = 0x03000000 # python 3.0
class __version_info(object):
def __init__(self, version_info):
self.version_info = version_info
self.major = version_info[0]
self.minor = version_info[1]
self.micro = version_info[2]
self.releaselevel = version_info[3]
self.serial = version_info[4]
def __getitem__(self, index):
if isinstance(self.version_info[index], list):
return tuple(self.version_info[index])
return self.version_info[index]
def hexversion(self):
try:
return '0%d0%d0%d' % (self.major, self.minor, self.micro)
finally: #probably some invalid char in minor (rc, etc)
return '0%d0000' % (self.major)
def __str__(self):
_s="sys.version(major=%d, minor=%d, micro=%d, releaselevel='%s', serial=%d)"
return _s % (self.major, self.minor, self.micro,
self.releaselevel, self.serial)
#return str(self.version_info)
def __eq__(self,other):
if isinstance(other, tuple):
return (self.major, self.minor, self.micro) == other
raise Error("Error! I don't know how to compare!")
def __ge__(self,other):
if isinstance(other, tuple):
return (self.major, self.minor, self.micro) >= other
raise Error("Error! I don't know how to compare!")
def __gt__(self,other):
if isinstance(other, tuple):
return (self.major, self.minor, self.micro) > other
raise Error("Error! I don't know how to compare!")
def __le__(self,other):
if isinstance(other, tuple):
return (self.major, self.minor, self.micro) <= other
raise Error("Error! I don't know how to compare!")
def __lt__(self,other):
if isinstance(other, tuple):
return (self.major, self.minor, self.micro) < other
raise Error("Error! I don't know how to compare!")
def __ne__(self,other):
if isinstance(other, tuple):
return (self.major, self.minor, self.micro) != other
raise Error("Error! I don't know how to compare!")
#eventually this needs to be the real python version such as 3.0, 3.1, etc
version_info=__version_info(__BRYTHON__.version_info)
class _implementation:
def __init__(self):
self.name='brython'
self.version = __version_info(__BRYTHON__.implementation)
self.hexversion = self.version.hexversion()
self.cache_tag=None
def __repr__(self):
return "namespace(name='%s' version=%s hexversion='%s')" % (self.name, self.version, self.hexversion)
def __str__(self):
return "namespace(name='%s' version=%s hexversion='%s')" % (self.name, self.version, self.hexversion)
implementation=_implementation()
class _hash_info:
def __init__(self):
self.width=32,
self.modulus=2147483647
self.inf=314159
self.nan=0
self.imag=1000003
self.algorithm='siphash24'
self.hash_bits=64
self.seed_bits=128
cutoff=0
def __repr(self):
#fix me
return "sys.hash_info(width=32, modulus=2147483647, inf=314159, nan=0, imag=1000003, algorithm='siphash24', hash_bits=64, seed_bits=128, cutoff=0)"
hash_info=_hash_info()
warnoptions=[]
def getfilesystemencoding():
return 'utf-8'
## __stdxxx__ contains the original values of sys.stdxxx
__stdout__ = __BRYTHON__.stdout
__stderr__ = __BRYTHON__.stderr
__stdin__ = __BRYTHON__.stdin
#delete objects not in python sys module namespace
del JSObject
del _implementation
| bsd-3-clause |
iagcl/data_pipeline | tests/stream/test_json_oracle.py | 1 | 3105 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
from data_pipeline.stream.oracle_message import OracleMessage
def test_reset():
m = OracleMessage()
assert m.operation_code == ''
assert m.table_name == ''
assert m.statement_id == ''
assert m.commit_lsn == ''
assert m.commit_timestamp == ''
assert m.message_sequence == ''
assert m.multiline_flag == ''
assert m.commit_statement == ''
assert m.primary_key_fields == ''
m.operation_code = 'opcode'
m.table_name = 'tablename'
assert m.operation_code == 'opcode'
assert m.table_name == 'tablename'
assert m.statement_id == ''
assert m.commit_lsn == ''
assert m.commit_timestamp == ''
assert m.message_sequence == ''
assert m.multiline_flag == ''
assert m.commit_statement == ''
assert m.primary_key_fields == ''
m.reset()
assert m.operation_code == ''
assert m.table_name == ''
assert m.statement_id == ''
assert m.commit_lsn == ''
assert m.commit_timestamp == ''
assert m.message_sequence == ''
assert m.multiline_flag == ''
assert m.commit_statement == ''
assert m.primary_key_fields == ''
def test_deserialise_reset():
m = OracleMessage()
m.operation_code = 'opcode'
m.table_name = 'tablename'
m.multiline_flag = '1'
assert m.operation_code == 'opcode'
assert m.table_name == 'tablename'
assert m.statement_id == ''
assert m.commit_lsn == ''
assert m.commit_timestamp == ''
assert m.message_sequence == ''
assert m.multiline_flag == '1'
assert m.commit_statement == ''
assert m.primary_key_fields == ''
message = {
'operation_code': 'newopcode'
, 'table_name': 'newtablename'
, 'statement_id': 'an_id'
, 'commit_lsn': ''
, 'commit_timestamp': ''
, 'message_sequence': ''
, 'multiline_flag': '0'
, 'commit_statement': ''
, 'primary_key_fields': ''
}
m.deserialise(message)
assert m.operation_code == 'newopcode'
assert m.table_name == 'newtablename'
assert m.statement_id == 'an_id'
assert m.commit_lsn == ''
assert m.commit_timestamp == ''
assert m.message_sequence == ''
assert m.multiline_flag == '0'
assert m.commit_statement == ''
assert m.primary_key_fields == ''
| apache-2.0 |
lesserwhirls/scipy-cwt | scipy/weave/examples/object.py | 12 | 1619 | """ Attribute and method access on Python objects from C++.
Note: std::cout type operations currently crash python...
Not sure what is up with this...
"""
import scipy.weave as weave
#----------------------------------------------------------------------------
# get/set attribute and call methods example
#----------------------------------------------------------------------------
class Foo(object):
def __init__(self):
self.val = 1
def inc(self,amount):
self.val += amount
return self.val
obj = Foo()
code = """
py::tuple result(3);
int i = obj.attr("val");
result[0] = i;
py::tuple args(1);
args[0] = 2;
i = obj.mcall("inc",args);
result[1] = i;
obj.set_attr("val",5);
i = obj.attr("val");
result[2] = i;
return_val = result;
"""
print 'initial, inc(2), set(5)/get:', weave.inline(code,['obj'])
#----------------------------------------------------------------------------
# indexing of values.
#----------------------------------------------------------------------------
from UserList import UserList
obj = UserList([1,[1,2],"hello"])
code = """
int i;
// find obj length and access each of its items
//std::cout << "UserList items: ";
//for(i = 0; i < obj.length(); i++)
// std::cout << obj[i].str() << " ";
//std::cout << std::endl;
// assign new values to each of its items
for(i = 0; i < obj.length(); i++)
obj[i] = "goodbye";
"""
weave.inline(code,['obj'])
print "obj with new values:", obj
| bsd-3-clause |
aequitas/home-assistant | homeassistant/components/thingspeak/__init__.py | 7 | 1978 | """Support for submitting data to Thingspeak."""
import logging
from requests.exceptions import RequestException
import voluptuous as vol
from homeassistant.const import (
CONF_API_KEY, CONF_ID, CONF_WHITELIST, STATE_UNAVAILABLE, STATE_UNKNOWN)
from homeassistant.helpers import event, state as state_helper
import homeassistant.helpers.config_validation as cv
_LOGGER = logging.getLogger(__name__)
DOMAIN = 'thingspeak'
TIMEOUT = 5
CONFIG_SCHEMA = vol.Schema({
DOMAIN: vol.Schema({
vol.Required(CONF_API_KEY): cv.string,
vol.Required(CONF_ID): int,
vol.Required(CONF_WHITELIST): cv.string,
}),
}, extra=vol.ALLOW_EXTRA)
def setup(hass, config):
"""Set up the Thingspeak environment."""
import thingspeak
conf = config[DOMAIN]
api_key = conf.get(CONF_API_KEY)
channel_id = conf.get(CONF_ID)
entity = conf.get(CONF_WHITELIST)
try:
channel = thingspeak.Channel(
channel_id, write_key=api_key, timeout=TIMEOUT)
channel.get()
except RequestException:
_LOGGER.error("Error while accessing the ThingSpeak channel. "
"Please check that the channel exists and your "
"API key is correct")
return False
def thingspeak_listener(entity_id, old_state, new_state):
"""Listen for new events and send them to Thingspeak."""
if new_state is None or new_state.state in (
STATE_UNKNOWN, '', STATE_UNAVAILABLE):
return
try:
if new_state.entity_id != entity:
return
_state = state_helper.state_as_number(new_state)
except ValueError:
return
try:
channel.update({'field1': _state})
except RequestException:
_LOGGER.error(
"Error while sending value '%s' to Thingspeak", _state)
event.track_state_change(hass, entity, thingspeak_listener)
return True
| apache-2.0 |
telwertowski/QGIS | python/plugins/processing/algs/gdal/Datasources2Vrt.py | 4 | 4979 | # -*- coding: utf-8 -*-
"""
***************************************************************************
Datasources2Vrt.py
---------------------
Date : May 2015
Copyright : (C) 2015 by Luigi Pirelli
Email : luipir at gmail dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Luigi Pirelli'
__date__ = 'May 2015'
__copyright__ = '(C) 2015, Luigi Pirelli'
import html
from qgis.core import (QgsProcessing,
QgsProcessingException,
QgsProcessingParameterMultipleLayers,
QgsProcessingParameterBoolean,
QgsProcessingParameterVectorDestination,
QgsProcessingOutputString
)
from processing.algs.gdal.GdalAlgorithm import GdalAlgorithm
from processing.algs.gdal.GdalUtils import GdalUtils
class Datasources2Vrt(GdalAlgorithm):
INPUT = 'INPUT'
UNIONED = 'UNIONED'
OUTPUT = 'OUTPUT'
VRT_STRING = 'VRT_STRING'
def createCustomParametersWidget(self, parent):
return None
def group(self):
return self.tr('Vector miscellaneous')
def groupId(self):
return 'vectormiscellaneous'
def name(self):
return 'buildvirtualvector'
def displayName(self):
return self.tr('Build virtual vector')
def tags(self):
return ['ogr', 'gdal', 'vrt', 'create']
def shortHelpString(self):
return self.tr("This algorithm creates a virtual layer that contains a set of vector layers.\n\n"
"The output virtual layer will not be opened in the current project.")
def __init__(self):
super().__init__()
def initAlgorithm(self, config=None):
self.addParameter(QgsProcessingParameterMultipleLayers(self.INPUT,
self.tr('Input datasources'),
QgsProcessing.TypeVector))
self.addParameter(QgsProcessingParameterBoolean(self.UNIONED,
self.tr('Create "unioned" VRT'),
defaultValue=False))
class ParameterVectorVrtDestination(QgsProcessingParameterVectorDestination):
def __init__(self, name, description):
super().__init__(name, description)
def clone(self):
copy = ParameterVectorVrtDestination(self.name(), self.description())
return copy
def type(self):
return 'vrt_vector_destination'
def defaultFileExtension(self):
return 'vrt'
self.addParameter(ParameterVectorVrtDestination(self.OUTPUT,
self.tr('Virtual vector')))
self.addOutput(QgsProcessingOutputString(self.VRT_STRING,
self.tr('Virtual string')))
def processAlgorithm(self, parameters, context, feedback):
input_layers = self.parameterAsLayerList(parameters, self.INPUT, context)
unioned = self.parameterAsBoolean(parameters, self.UNIONED, context)
vrtPath = self.parameterAsOutputLayer(parameters, self.OUTPUT, context)
vrt = '<OGRVRTDataSource>'
if unioned:
vrt += '<OGRVRTUnionLayer name="UnionedLayer">'
total = 100.0 / len(input_layers) if input_layers else 0
for current, layer in enumerate(input_layers):
if feedback.isCanceled():
break
basePath = GdalUtils.ogrConnectionStringFromLayer(layer)
layerName = GdalUtils.ogrLayerName(layer.source())
vrt += '<OGRVRTLayer name="{}">'.format(html.escape(layerName, True))
vrt += '<SrcDataSource>{}</SrcDataSource>'.format(html.escape(basePath, True))
vrt += '<SrcLayer>{}</SrcLayer>'.format(html.escape(layerName, True))
vrt += '</OGRVRTLayer>'
feedback.setProgress(int(current * total))
if unioned:
vrt += '</OGRVRTUnionLayer>'
vrt += '</OGRVRTDataSource>'
with open(vrtPath, 'w', encoding='utf-8') as f:
f.write(vrt)
return {self.OUTPUT: vrtPath, self.VRT_STRING: vrt}
def commandName(self):
return ''
| gpl-2.0 |
otherness-space/myProject | my_project_001/lib/python2.7/site-packages/django/contrib/gis/geos/prototypes/__init__.py | 314 | 1305 | """
This module contains all of the GEOS ctypes function prototypes. Each
prototype handles the interaction between the GEOS library and Python
via ctypes.
"""
# Coordinate sequence routines.
from django.contrib.gis.geos.prototypes.coordseq import (create_cs, get_cs,
cs_clone, cs_getordinate, cs_setordinate, cs_getx, cs_gety, cs_getz,
cs_setx, cs_sety, cs_setz, cs_getsize, cs_getdims)
# Geometry routines.
from django.contrib.gis.geos.prototypes.geom import (from_hex, from_wkb, from_wkt,
create_point, create_linestring, create_linearring, create_polygon, create_collection,
destroy_geom, get_extring, get_intring, get_nrings, get_geomn, geom_clone,
geos_normalize, geos_type, geos_typeid, geos_get_srid, geos_set_srid,
get_dims, get_num_coords, get_num_geoms,
to_hex, to_wkb, to_wkt)
# Miscellaneous routines.
from django.contrib.gis.geos.prototypes.misc import *
# Predicates
from django.contrib.gis.geos.prototypes.predicates import (geos_hasz, geos_isempty,
geos_isring, geos_issimple, geos_isvalid, geos_contains, geos_crosses,
geos_disjoint, geos_equals, geos_equalsexact, geos_intersects,
geos_intersects, geos_overlaps, geos_relatepattern, geos_touches, geos_within)
# Topology routines
from django.contrib.gis.geos.prototypes.topology import *
| mit |
niltonlk/nest-simulator | pynest/nest/logic/hl_api_logic.py | 20 | 1815 | # -*- coding: utf-8 -*-
#
# hl_api_logic.py
#
# This file is part of NEST.
#
# Copyright (C) 2004 The NEST Initiative
#
# NEST is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# NEST is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NEST. If not, see <http://www.gnu.org/licenses/>.
from ..ll_api import sli_func
from ..lib.hl_api_types import CreateParameter
__all__ = [
'conditional',
]
def conditional(condition, param_if_true, param_if_false):
"""
Yields one value or another, based on the condition.
Parameters
----------
condition : Parameter
A comparing Parameter, created with the usual comparators.
param_if_true : [Parameter | float]
Value or Parameter used to get a value used if the condition evaluates to true.
param_if_false : [Parameter | float]
Value or Parameter used to get a value used if the condition evaluates to false.
Returns
-------
Parameter:
Object representing the conditional.
"""
if isinstance(param_if_true, (int, float)):
param_if_true = CreateParameter(
'constant', {'value': float(param_if_true)})
if isinstance(param_if_false, (int, float)):
param_if_false = CreateParameter(
'constant', {'value': float(param_if_false)})
return sli_func("conditional", condition, param_if_true, param_if_false)
| gpl-2.0 |
Alwnikrotikz/visvis | backends/backend_gtk.py | 3 | 13118 | # -*- coding: utf-8 -*-
# Copyright (C) 2012, Robert Schroll
#
# Visvis is distributed under the terms of the (new) BSD License.
# The full license can be found in 'license.txt'.
""" The GTK backend.
"""
import os, sys
import visvis
from visvis import BaseFigure, events, constants
from visvis.core.misc import getResourceDir
import gtk
import gtk.gtkgl
import gobject
import OpenGL.GL as gl
MOUSEMAP = {gtk.gdk.BUTTON_PRESS : 'down',
gtk.gdk.BUTTON_RELEASE : 'up',
gtk.gdk._2BUTTON_PRESS : 'double'}
KEYMAP = { gtk.keysyms.Shift_L: constants.KEY_SHIFT,
gtk.keysyms.Shift_R: constants.KEY_SHIFT,
gtk.keysyms.Alt_L: constants.KEY_ALT,
gtk.keysyms.Alt_R: constants.KEY_ALT,
gtk.keysyms.Control_L: constants.KEY_CONTROL,
gtk.keysyms.Control_R: constants.KEY_CONTROL,
gtk.keysyms.Left: constants.KEY_LEFT,
gtk.keysyms.Up: constants.KEY_UP,
gtk.keysyms.Right: constants.KEY_RIGHT,
gtk.keysyms.Down: constants.KEY_DOWN,
gtk.keysyms.Page_Up: constants.KEY_PAGEUP,
gtk.keysyms.Page_Down: constants.KEY_PAGEDOWN,
gtk.keysyms.KP_Enter: constants.KEY_ENTER,
gtk.keysyms.Return: constants.KEY_ENTER,
gtk.keysyms.Escape: constants.KEY_ESCAPE,
gtk.keysyms.Delete: constants.KEY_DELETE,
}
# Make uppercase letters be lowercase
for i in range(ord('A'), ord('Z')):
KEYMAP[i] = i+32
def modifiers(event):
"""Convert the GTK state into a tuple of active modifier keys."""
if not hasattr(event, 'state'):
return ()
mods = ()
if event.state & gtk.gdk.SHIFT_MASK:
mods += constants.KEY_SHIFT,
if event.state & gtk.gdk.CONTROL_MASK:
mods += constants.KEY_CONTROL,
if event.state & gtk.gdk.MOD1_MASK:
mods += constants.KEY_ALT,
return mods
class GlCanvas(gtk.gtkgl.DrawingArea):
def __init__(self, figure, *args, **kw):
gtk.gtkgl.DrawingArea.__init__(self)
self.add_events(gtk.gdk.BUTTON_PRESS_MASK |
gtk.gdk.BUTTON_RELEASE_MASK |
gtk.gdk.POINTER_MOTION_MASK |
gtk.gdk.POINTER_MOTION_HINT_MASK |
gtk.gdk.KEY_PRESS_MASK |
gtk.gdk.KEY_RELEASE_MASK |
gtk.gdk.ENTER_NOTIFY_MASK |
gtk.gdk.LEAVE_NOTIFY_MASK |
gtk.gdk.FOCUS_CHANGE_MASK)
self.set_property('can-focus', True)
self.figure = figure
# Configure OpenGL framebuffer.
# Try to get a double-buffered framebuffer configuration,
# if not successful then try to get a single-buffered one.
display_mode = (gtk.gdkgl.MODE_RGB |
gtk.gdkgl.MODE_DEPTH |
gtk.gdkgl.MODE_DOUBLE)
try:
glconfig = gtk.gdkgl.Config(mode=display_mode)
except gtk.gdkgl.NoMatches:
display_mode &= ~gtk.gdkgl.MODE_DOUBLE
glconfig = gtk.gdkgl.Config(mode=display_mode)
self.set_gl_capability(glconfig)
# Connect the relevant signals.
self.connect('configure_event', self._on_configure_event)
self.connect('expose_event', self._on_expose_event)
self.connect('delete_event', self._on_delete_event)
self.connect('motion_notify_event', self._on_motion_notify_event)
self.connect('button_press_event', self._on_button_event)
self.connect('button_release_event', self._on_button_event)
self.connect('scroll_event', self._on_scroll_event)
self.connect('key_press_event', self._on_key_press_event)
self.connect('key_release_event', self._on_key_release_event)
self.connect('enter_notify_event', self._on_enter_notify_event)
self.connect('leave_notify_event', self._on_leave_notify_event)
self.connect('focus_in_event', self._on_focus_in_event)
def _on_configure_event(self, *args):
if self.figure:
self.figure._OnResize()
return False
def _on_delete_event(self, *args):
if self.figure:
self.figure.Destroy()
return True # Let figure.Destoy() destroy this widget
def _on_motion_notify_event(self, widget, event):
if event.is_hint:
x, y, state = event.window.get_pointer()
else:
x, y, state = event.x, event.y, event.state
if self.figure:
self.figure._GenerateMouseEvent('motion', x, y, 0, modifiers(event))
def _on_button_event(self, widget, event):
if event.type in MOUSEMAP:
button = {1:1, 3:2}.get(event.button, 0)
self.figure._GenerateMouseEvent(MOUSEMAP[event.type], event.x, event.y, button, modifiers(event))
def _on_scroll_event(self, widget, event):
horizontal = {gtk.gdk.SCROLL_LEFT: 1.0, gtk.gdk.SCROLL_RIGHT: -1.0}.get(event.direction, 0)
vertical = {gtk.gdk.SCROLL_UP: 1.0, gtk.gdk.SCROLL_DOWN: -1.0}.get(event.direction, 0)
if horizontal or vertical:
self.figure._GenerateMouseEvent('scroll', event.x, event.y, horizontal, vertical, modifiers(event))
def _on_key_press_event(self, widget, event):
self.figure._GenerateKeyEvent('keydown', KEYMAP.get(event.keyval, event.keyval),
event.string, modifiers(event))
def _on_key_release_event(self, widget, event):
self.figure._GenerateKeyEvent('keyup', KEYMAP.get(event.keyval, event.keyval),
event.string, modifiers(event))
def _on_enter_notify_event(self, widget, event):
if self.figure:
ev = self.figure.eventEnter
ev.Set(0,0,0)
ev.Fire()
def _on_leave_notify_event(self, widget, event):
if self.figure:
ev = self.figure.eventLeave
ev.Set(0,0,0)
ev.Fire()
def _on_focus_in_event(self, widget, event):
if self.figure:
BaseFigure._currentNr = self.figure.nr
def _on_expose_event(self, *args):
# Obtain a reference to the OpenGL drawable
# and rendering context.
gldrawable = self.get_gl_drawable()
glcontext = self.get_gl_context()
# OpenGL begin
if not gldrawable.gl_begin(glcontext):
return False
self.figure.OnDraw()
# OpenGL end
gldrawable.gl_end()
def set_current(self):
gldrawable = self.get_gl_drawable()
glcontext = self.get_gl_context()
gldrawable.make_current(glcontext)
def swap_buffers(self):
gldrawable = self.get_gl_drawable()
glcontext = self.get_gl_context()
if gldrawable.is_double_buffered():
gldrawable.swap_buffers()
else:
gl.glFlush()
class Figure(BaseFigure):
def __init__(self, *args, **kw):
self._widget = None
self._widget_args = (args, kw)
if kw.get('create_widget', True):
self.CreateWidget()
BaseFigure.__init__(self)
def CreateWidget(self):
"""Create the Figure's widget if necessary, and return the
widget."""
if self._widget is None:
# Make sure there is a native app and the timer is started
# (also when embedded)
app.Create()
# create gl widget
updatePosition = False
args, kwargs = self._widget_args
if 'create_widget' in kwargs:
updatePosition = True
del(kwargs['create_widget'])
self._widget = GlCanvas(self, *args, **kwargs)
if updatePosition:
self.position._Changed()
return self._widget
def _SetCurrent(self):
"""Make figure the current OpenGL context."""
if self._widget and not self._destroyed:
self._widget.set_current()
def _SwapBuffers(self):
"""Swap the memory and screen buffers."""
if self._widget and not self._destroyed:
self._widget.swap_buffers()
def _RedrawGui(self):
"""Make the widget redraw itself."""
if self._widget:
self._widget.queue_draw()
def _ProcessGuiEvents(self):
"""Process all events in queue."""
app.ProcessEvents()
def _SetTitle(self, title):
"""Set the title, when not used in application."""
if self._widget and not self._destroyed:
window = self._widget.parent
if isinstance(window, gtk.Window):
window.set_title(title)
def _SetPosition(self, x, y, w, h):
"""Set the position and size of the widget. If it is embedded,
ignore the x and y coordinates."""
if self._widget and not self._destroyed:
self._widget.set_size_request(w, h)
self._widget.queue_resize()
window = self._widget.parent
if isinstance(window, gtk.Window):
window.move(x, y)
window.resize(w, h)
def _GetPosition(self):
"""Get the widget's position."""
if self._widget and not self._destroyed:
alloc = self._widget.allocation
x, y = alloc.x, alloc.y
window = self._widget.parent
if isinstance(window, gtk.Window):
x, y = window.get_position()
return x, y, alloc.width, alloc.height
return 0, 0, 0, 0
def _Close(self, widget):
"""Close the widget."""
if widget is None:
widget = self._widget
if widget:
window = widget.parent
# The destroy() method causes IPython to emit on error on my system
# the first time it happens (almar)
if isinstance(window, gtk.Window):
window.destroy()
else:
widget.destroy()
# If no more figures, quit
# If in script-mode, we nicely quit. If in interactive mode, we won't.
if len(BaseFigure._figures) == 0:
app.Quit()
def newFigure():
"""Create a figure and put it in a window."""
figure = Figure()
window = gtk.Window()
# Set icon
try:
iconfile = os.path.join(getResourceDir(), 'visvis_icon_gtk.png')
window.set_icon_from_file(iconfile)
except Exception:
pass
# From GTKGL example
if sys.platform != 'win32':
window.set_resize_mode(gtk.RESIZE_IMMEDIATE)
window.set_reallocate_redraws(True)
window.add(figure._widget)
size = visvis.settings.figureSize
figure._widget.set_size_request(size[0], size[1])
window.set_geometry_hints(min_width=100, min_height=100)
window.show_all()
window.connect('delete-event', figure._widget._on_delete_event)
# Initialize OpenGl
figure.DrawNow()
return figure
class VisvisEventsTimer:
""" Timer that can be started and stopped.
"""
def __init__(self):
self._running = False
def Start(self):
if not self._running:
self._running = True
self._PostTimeout()
def Stop(self):
self._running = False
def _PostTimeout(self):
gobject.timeout_add(10, self._Fire)
def _Fire(self):
if self._running:
events.processVisvisEvents()
return True # So called again.
else:
return False # Stop timer.
class App(events.App):
"""App()
Application class to wrap the GUI applications in a class
with a simple interface that is the same for all backends.
This is the GTK implementation.
"""
def __init__(self):
# Create timer
self._timer = VisvisEventsTimer()
def _GetNativeApp(self):
"""Ensure the GTK app exists."""
# Start timer
self._timer.Start()
# Prevent quiting when used interactively
if not hasattr(gtk, 'vv_do_quit'):
gtk.vv_do_quit = False
# Return singleton gtk object, which represents the gtk application
return gtk
def _ProcessEvents(self):
"""Process GTK events."""
gtk.gdk.threads_enter() # enter/leave prevents IPython -gthread to hang
while gtk.events_pending():
gtk.main_iteration(False)
gtk.gdk.threads_leave()
def _Run(self):
"""Enter GTK mainloop."""
self._GetNativeApp()
if gtk.main_level() == 0:
# We need to start the mainloop. This means we will also
# have to kill the mainloop when the last figure is closed.
gtk.vv_do_quit = True
gtk.main()
def Quit(self):
if gtk.vv_do_quit: # We started the mainloop, so we better kill it.
gtk.main_quit()
app = App()
| bsd-3-clause |
person142/scipy | scipy/linalg/tests/test_sketches.py | 9 | 3968 | """Tests for _sketches.py."""
import numpy as np
from numpy.testing import assert_, assert_equal
from scipy.linalg import clarkson_woodruff_transform
from scipy.linalg._sketches import cwt_matrix
from scipy.sparse import issparse, rand
from scipy.sparse.linalg import norm
class TestClarksonWoodruffTransform(object):
"""
Testing the Clarkson Woodruff Transform
"""
# set seed for generating test matrices
rng = np.random.RandomState(seed=1179103485)
# Test matrix parameters
n_rows = 2000
n_cols = 100
density = 0.1
# Sketch matrix dimensions
n_sketch_rows = 200
# Seeds to test with
seeds = [1755490010, 934377150, 1391612830, 1752708722, 2008891431,
1302443994, 1521083269, 1501189312, 1126232505, 1533465685]
A_dense = rng.randn(n_rows, n_cols)
A_csc = rand(
n_rows, n_cols, density=density, format='csc', random_state=rng,
)
A_csr = rand(
n_rows, n_cols, density=density, format='csr', random_state=rng,
)
A_coo = rand(
n_rows, n_cols, density=density, format='coo', random_state=rng,
)
# Collect the test matrices
test_matrices = [
A_dense, A_csc, A_csr, A_coo,
]
# Test vector with norm ~1
x = rng.randn(n_rows, 1) / np.sqrt(n_rows)
def test_sketch_dimensions(self):
for A in self.test_matrices:
for seed in self.seeds:
sketch = clarkson_woodruff_transform(
A, self.n_sketch_rows, seed=seed
)
assert_(sketch.shape == (self.n_sketch_rows, self.n_cols))
def test_seed_returns_identical_transform_matrix(self):
for A in self.test_matrices:
for seed in self.seeds:
S1 = cwt_matrix(
self.n_sketch_rows, self.n_rows, seed=seed
).todense()
S2 = cwt_matrix(
self.n_sketch_rows, self.n_rows, seed=seed
).todense()
assert_equal(S1, S2)
def test_seed_returns_identically(self):
for A in self.test_matrices:
for seed in self.seeds:
sketch1 = clarkson_woodruff_transform(
A, self.n_sketch_rows, seed=seed
)
sketch2 = clarkson_woodruff_transform(
A, self.n_sketch_rows, seed=seed
)
if issparse(sketch1):
sketch1 = sketch1.todense()
if issparse(sketch2):
sketch2 = sketch2.todense()
assert_equal(sketch1, sketch2)
def test_sketch_preserves_frobenius_norm(self):
# Given the probabilistic nature of the sketches
# we run the test multiple times and check that
# we pass all/almost all the tries.
n_errors = 0
for A in self.test_matrices:
if issparse(A):
true_norm = norm(A)
else:
true_norm = np.linalg.norm(A)
for seed in self.seeds:
sketch = clarkson_woodruff_transform(
A, self.n_sketch_rows, seed=seed,
)
if issparse(sketch):
sketch_norm = norm(sketch)
else:
sketch_norm = np.linalg.norm(sketch)
if np.abs(true_norm - sketch_norm) > 0.1 * true_norm:
n_errors += 1
assert_(n_errors == 0)
def test_sketch_preserves_vector_norm(self):
n_errors = 0
n_sketch_rows = int(np.ceil(2. / (0.01 * 0.5**2)))
true_norm = np.linalg.norm(self.x)
for seed in self.seeds:
sketch = clarkson_woodruff_transform(
self.x, n_sketch_rows, seed=seed,
)
sketch_norm = np.linalg.norm(sketch)
if np.abs(true_norm - sketch_norm) > 0.5 * true_norm:
n_errors += 1
assert_(n_errors == 0)
| bsd-3-clause |
tpbarron/rlflow | rlflow/memories/experience_replay.py | 1 | 1772 | import collections
import random
import numpy as np
class ExperienceReplay(object):
SARS = collections.namedtuple('SARS', ['S1', 'A', 'R', 'S2', 'T'])
def __init__(self, state_shape=(1,), max_size=1000000):
self.max_size = max_size
self.cur_size = 0
self.next_ind = 0
self.S1 = np.empty([max_size]+list(state_shape))
self.A = np.empty((max_size,))
self.R = np.empty((max_size,))
self.S2 = np.empty([max_size]+list(state_shape))
self.T = np.empty((max_size,))
# self.memory = []
def add_element(self, s1, a, r, s2, t):
"""
Add an element to the back of the memory
Removes an element from the front if full
"""
# add element at next_ind
self.S1[self.next_ind] = s1
self.A[self.next_ind] = a
self.R[self.next_ind] = r
self.S2[self.next_ind] = s2
self.T[self.next_ind] = int(t)
self.next_ind += 1
if self.next_ind == self.max_size:
self.next_ind = 0
if self.cur_size < self.max_size:
self.cur_size += 1
def sample(self, n):
"""
Sample n elements uniformly from the memory
"""
indices = np.random.choice(self.cur_size, n, replace=False)
s1 = np.take(self.S1, indices, axis=0)
a = np.take(self.A, indices)
r = np.take(self.R, indices)
s2 = np.take(self.S2, indices, axis=0)
t = np.take(self.T, indices)
return s1, a, r, s2, t
# sample_elements = []
# for _ in range(n):
# sample_elements.append(self.memory[random.randint(0, len(self.memory)-1)])
#
# return sample_elements
def size(self):
return self.cur_size
| mit |
it-projects-llc/misc-addons | odoo_backup_sh/models/res_config_settings.py | 1 | 2915 | # Copyright 2019 Dinar Gabbasov <https://it-projects.info/team/GabbasovDinar>
# Copyright 2019 Ivan Yelizariev <https://it-projects.info/team/yelizariev>
# License MIT (https://opensource.org/licenses/MIT).
from odoo import api, fields, models
class ResConfigSettings(models.TransientModel):
_inherit = "res.config.settings"
module_odoo_backup_sh_google_disk = fields.Boolean(
string="Google Drive", help="Use Google Drive to store Database"
)
module_odoo_backup_sh_dropbox = fields.Boolean(
string="Dropbox", help="Use Dropbox to store Database"
)
available_module_odoo_backup_sh_dropbox = fields.Boolean()
available_module_odoo_backup_sh_google_disk = fields.Boolean()
odoo_backup_sh_amazon_bucket_name = fields.Char(
"S3 Bucket", config_parameter="odoo_backup_sh.s3_bucket_name", default=""
)
odoo_backup_sh_amazon_access_key_id = fields.Char(
"Access Key ID", config_parameter="odoo_backup_sh.aws_access_key_id", default=""
)
odoo_backup_sh_amazon_secret_access_key = fields.Char(
"Secret Access Key",
config_parameter="odoo_backup_sh.aws_secret_access_key",
default="",
)
odoo_backup_sh_private_s3_dir = fields.Char(
"Path",
config_parameter="odoo_backup_sh.private_s3_dir",
default="",
help="Folder in S3 Bucket, e.g. odoo-backups",
)
odoo_backup_sh_odoo_oauth_uid = fields.Char(
"Odoo OAuth", config_parameter="odoo_backup_sh.odoo_oauth_uid", default=""
)
private_s3_dir_changed = fields.Boolean(default=False)
def get_values(self):
res = super(ResConfigSettings, self).get_values()
IrModule = self.env["ir.module.module"]
for m in ["odoo_backup_sh_google_disk", "odoo_backup_sh_dropbox"]:
res["available_module_" + m] = bool(
IrModule.sudo().search([("name", "=", m)], limit=1)
)
return res
@api.onchange("odoo_backup_sh_amazon_access_key_id")
def switch_to_private_s3(self):
if self.odoo_backup_sh_amazon_access_key_id and self.odoo_backup_sh_amazon_access_key_id != self.env[
"ir.config_parameter"
].get_param(
"odoo_backup_sh.aws_access_key_id"
):
# when Access Key is changed to new non-empty value
self.odoo_backup_sh_odoo_oauth_uid = ""
@api.onchange("odoo_backup_sh_private_s3_dir")
def track_dir_changes(self):
current_value = self.env["ir.config_parameter"].get_param(
"odoo_backup_sh.private_s3_dir"
)
has_key = self.env["ir.config_parameter"].get_param(
"odoo_backup_sh.aws_access_key_id"
)
if (
self.odoo_backup_sh_private_s3_dir
and has_key
and self.odoo_backup_sh_private_s3_dir != current_value
):
self.private_s3_dir_changed = True
| mit |
berendkleinhaneveld/VTK | ThirdParty/Twisted/twisted/test/test_jelly.py | 30 | 18360 | # Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Test cases for L{jelly} object serialization.
"""
import datetime
import decimal
from twisted.spread import jelly, pb
from twisted.trial import unittest
from twisted.test.proto_helpers import StringTransport
class TestNode(object, jelly.Jellyable):
"""
An object to test jellyfying of new style class instances.
"""
classAttr = 4
def __init__(self, parent=None):
if parent:
self.id = parent.id + 1
parent.children.append(self)
else:
self.id = 1
self.parent = parent
self.children = []
class A:
"""
Dummy class.
"""
def amethod(self):
"""
Method tp be used in serialization tests.
"""
def afunc(self):
"""
A dummy function to test function serialization.
"""
class B:
"""
Dummy class.
"""
def bmethod(self):
"""
Method to be used in serialization tests.
"""
class C:
"""
Dummy class.
"""
def cmethod(self):
"""
Method to be used in serialization tests.
"""
class D(object):
"""
Dummy new-style class.
"""
class E(object):
"""
Dummy new-style class with slots.
"""
__slots__ = ("x", "y")
def __init__(self, x=None, y=None):
self.x = x
self.y = y
def __getstate__(self):
return {"x" : self.x, "y" : self.y}
def __setstate__(self, state):
self.x = state["x"]
self.y = state["y"]
class SimpleJellyTest:
def __init__(self, x, y):
self.x = x
self.y = y
def isTheSameAs(self, other):
return self.__dict__ == other.__dict__
class JellyTestCase(unittest.TestCase):
"""
Testcases for L{jelly} module serialization.
@cvar decimalData: serialized version of decimal data, to be used in tests.
@type decimalData: C{list}
"""
def _testSecurity(self, inputList, atom):
"""
Helper test method to test security options for a type.
@param inputList: a sample input for the type.
@param inputList: C{list}
@param atom: atom identifier for the type.
@type atom: C{str}
"""
c = jelly.jelly(inputList)
taster = jelly.SecurityOptions()
taster.allowBasicTypes()
# By default, it should succeed
jelly.unjelly(c, taster)
taster.allowedTypes.pop(atom)
# But it should raise an exception when disallowed
self.assertRaises(jelly.InsecureJelly, jelly.unjelly, c, taster)
def test_methodSelfIdentity(self):
a = A()
b = B()
a.bmethod = b.bmethod
b.a = a
im_ = jelly.unjelly(jelly.jelly(b)).a.bmethod
self.assertEqual(im_.im_class, im_.im_self.__class__)
def test_methodsNotSelfIdentity(self):
"""
If a class change after an instance has been created, L{jelly.unjelly}
shoud raise a C{TypeError} when trying to unjelly the instance.
"""
a = A()
b = B()
c = C()
a.bmethod = c.cmethod
b.a = a
savecmethod = C.cmethod
del C.cmethod
try:
self.assertRaises(TypeError, jelly.unjelly, jelly.jelly(b))
finally:
C.cmethod = savecmethod
def test_newStyle(self):
n = D()
n.x = 1
n2 = D()
n.n2 = n2
n.n3 = n2
c = jelly.jelly(n)
m = jelly.unjelly(c)
self.assertIsInstance(m, D)
self.assertIdentical(m.n2, m.n3)
def test_newStyleWithSlots(self):
"""
A class defined with I{slots} can be jellied and unjellied with the
values for its attributes preserved.
"""
n = E()
n.x = 1
c = jelly.jelly(n)
m = jelly.unjelly(c)
self.assertIsInstance(m, E)
self.assertEqual(n.x, 1)
def test_typeOldStyle(self):
"""
Test that an old style class type can be jellied and unjellied
to the original type.
"""
t = [C]
r = jelly.unjelly(jelly.jelly(t))
self.assertEqual(t, r)
def test_typeNewStyle(self):
"""
Test that a new style class type can be jellied and unjellied
to the original type.
"""
t = [D]
r = jelly.unjelly(jelly.jelly(t))
self.assertEqual(t, r)
def test_typeBuiltin(self):
"""
Test that a builtin type can be jellied and unjellied to the original
type.
"""
t = [str]
r = jelly.unjelly(jelly.jelly(t))
self.assertEqual(t, r)
def test_dateTime(self):
dtn = datetime.datetime.now()
dtd = datetime.datetime.now() - dtn
input = [dtn, dtd]
c = jelly.jelly(input)
output = jelly.unjelly(c)
self.assertEqual(input, output)
self.assertNotIdentical(input, output)
def test_decimal(self):
"""
Jellying L{decimal.Decimal} instances and then unjellying the result
should produce objects which represent the values of the original
inputs.
"""
inputList = [decimal.Decimal('9.95'),
decimal.Decimal(0),
decimal.Decimal(123456),
decimal.Decimal('-78.901')]
c = jelly.jelly(inputList)
output = jelly.unjelly(c)
self.assertEqual(inputList, output)
self.assertNotIdentical(inputList, output)
decimalData = ['list', ['decimal', 995, -2], ['decimal', 0, 0],
['decimal', 123456, 0], ['decimal', -78901, -3]]
def test_decimalUnjelly(self):
"""
Unjellying the s-expressions produced by jelly for L{decimal.Decimal}
instances should result in L{decimal.Decimal} instances with the values
represented by the s-expressions.
This test also verifies that C{self.decimalData} contains valid jellied
data. This is important since L{test_decimalMissing} re-uses
C{self.decimalData} and is expected to be unable to produce
L{decimal.Decimal} instances even though the s-expression correctly
represents a list of them.
"""
expected = [decimal.Decimal('9.95'),
decimal.Decimal(0),
decimal.Decimal(123456),
decimal.Decimal('-78.901')]
output = jelly.unjelly(self.decimalData)
self.assertEqual(output, expected)
def test_decimalSecurity(self):
"""
By default, C{decimal} objects should be allowed by
L{jelly.SecurityOptions}. If not allowed, L{jelly.unjelly} should raise
L{jelly.InsecureJelly} when trying to unjelly it.
"""
inputList = [decimal.Decimal('9.95')]
self._testSecurity(inputList, "decimal")
def test_set(self):
"""
Jellying C{set} instances and then unjellying the result
should produce objects which represent the values of the original
inputs.
"""
inputList = [set([1, 2, 3])]
output = jelly.unjelly(jelly.jelly(inputList))
self.assertEqual(inputList, output)
self.assertNotIdentical(inputList, output)
def test_frozenset(self):
"""
Jellying C{frozenset} instances and then unjellying the result
should produce objects which represent the values of the original
inputs.
"""
inputList = [frozenset([1, 2, 3])]
output = jelly.unjelly(jelly.jelly(inputList))
self.assertEqual(inputList, output)
self.assertNotIdentical(inputList, output)
def test_setSecurity(self):
"""
By default, C{set} objects should be allowed by
L{jelly.SecurityOptions}. If not allowed, L{jelly.unjelly} should raise
L{jelly.InsecureJelly} when trying to unjelly it.
"""
inputList = [set([1, 2, 3])]
self._testSecurity(inputList, "set")
def test_frozensetSecurity(self):
"""
By default, C{frozenset} objects should be allowed by
L{jelly.SecurityOptions}. If not allowed, L{jelly.unjelly} should raise
L{jelly.InsecureJelly} when trying to unjelly it.
"""
inputList = [frozenset([1, 2, 3])]
self._testSecurity(inputList, "frozenset")
def test_oldSets(self):
"""
Test jellying C{sets.Set}: it should serialize to the same thing as
C{set} jelly, and be unjellied as C{set} if available.
"""
inputList = [jelly._sets.Set([1, 2, 3])]
inputJelly = jelly.jelly(inputList)
self.assertEqual(inputJelly, jelly.jelly([set([1, 2, 3])]))
output = jelly.unjelly(inputJelly)
# Even if the class is different, it should coerce to the same list
self.assertEqual(list(inputList[0]), list(output[0]))
if set is jelly._sets.Set:
self.assertIsInstance(output[0], jelly._sets.Set)
else:
self.assertIsInstance(output[0], set)
def test_oldImmutableSets(self):
"""
Test jellying C{sets.ImmutableSet}: it should serialize to the same
thing as C{frozenset} jelly, and be unjellied as C{frozenset} if
available.
"""
inputList = [jelly._sets.ImmutableSet([1, 2, 3])]
inputJelly = jelly.jelly(inputList)
self.assertEqual(inputJelly, jelly.jelly([frozenset([1, 2, 3])]))
output = jelly.unjelly(inputJelly)
# Even if the class is different, it should coerce to the same list
self.assertEqual(list(inputList[0]), list(output[0]))
if frozenset is jelly._sets.ImmutableSet:
self.assertIsInstance(output[0], jelly._sets.ImmutableSet)
else:
self.assertIsInstance(output[0], frozenset)
def test_simple(self):
"""
Simplest test case.
"""
self.failUnless(SimpleJellyTest('a', 'b').isTheSameAs(
SimpleJellyTest('a', 'b')))
a = SimpleJellyTest(1, 2)
cereal = jelly.jelly(a)
b = jelly.unjelly(cereal)
self.failUnless(a.isTheSameAs(b))
def test_identity(self):
"""
Test to make sure that objects retain identity properly.
"""
x = []
y = (x)
x.append(y)
x.append(y)
self.assertIdentical(x[0], x[1])
self.assertIdentical(x[0][0], x)
s = jelly.jelly(x)
z = jelly.unjelly(s)
self.assertIdentical(z[0], z[1])
self.assertIdentical(z[0][0], z)
def test_unicode(self):
x = unicode('blah')
y = jelly.unjelly(jelly.jelly(x))
self.assertEqual(x, y)
self.assertEqual(type(x), type(y))
def test_stressReferences(self):
reref = []
toplevelTuple = ({'list': reref}, reref)
reref.append(toplevelTuple)
s = jelly.jelly(toplevelTuple)
z = jelly.unjelly(s)
self.assertIdentical(z[0]['list'], z[1])
self.assertIdentical(z[0]['list'][0], z)
def test_moreReferences(self):
a = []
t = (a,)
a.append((t,))
s = jelly.jelly(t)
z = jelly.unjelly(s)
self.assertIdentical(z[0][0][0], z)
def test_typeSecurity(self):
"""
Test for type-level security of serialization.
"""
taster = jelly.SecurityOptions()
dct = jelly.jelly({})
self.assertRaises(jelly.InsecureJelly, jelly.unjelly, dct, taster)
def test_newStyleClasses(self):
j = jelly.jelly(D)
uj = jelly.unjelly(D)
self.assertIdentical(D, uj)
def test_lotsaTypes(self):
"""
Test for all types currently supported in jelly
"""
a = A()
jelly.unjelly(jelly.jelly(a))
jelly.unjelly(jelly.jelly(a.amethod))
items = [afunc, [1, 2, 3], not bool(1), bool(1), 'test', 20.3,
(1, 2, 3), None, A, unittest, {'a': 1}, A.amethod]
for i in items:
self.assertEqual(i, jelly.unjelly(jelly.jelly(i)))
def test_setState(self):
global TupleState
class TupleState:
def __init__(self, other):
self.other = other
def __getstate__(self):
return (self.other,)
def __setstate__(self, state):
self.other = state[0]
def __hash__(self):
return hash(self.other)
a = A()
t1 = TupleState(a)
t2 = TupleState(a)
t3 = TupleState((t1, t2))
d = {t1: t1, t2: t2, t3: t3, "t3": t3}
t3prime = jelly.unjelly(jelly.jelly(d))["t3"]
self.assertIdentical(t3prime.other[0].other, t3prime.other[1].other)
def test_classSecurity(self):
"""
Test for class-level security of serialization.
"""
taster = jelly.SecurityOptions()
taster.allowInstancesOf(A, B)
a = A()
b = B()
c = C()
# add a little complexity to the data
a.b = b
a.c = c
# and a backreference
a.x = b
b.c = c
# first, a friendly insecure serialization
friendly = jelly.jelly(a, taster)
x = jelly.unjelly(friendly, taster)
self.assertIsInstance(x.c, jelly.Unpersistable)
# now, a malicious one
mean = jelly.jelly(a)
self.assertRaises(jelly.InsecureJelly, jelly.unjelly, mean, taster)
self.assertIdentical(x.x, x.b, "Identity mismatch")
# test class serialization
friendly = jelly.jelly(A, taster)
x = jelly.unjelly(friendly, taster)
self.assertIdentical(x, A, "A came back: %s" % x)
def test_unjellyable(self):
"""
Test that if Unjellyable is used to deserialize a jellied object,
state comes out right.
"""
class JellyableTestClass(jelly.Jellyable):
pass
jelly.setUnjellyableForClass(JellyableTestClass, jelly.Unjellyable)
input = JellyableTestClass()
input.attribute = 'value'
output = jelly.unjelly(jelly.jelly(input))
self.assertEqual(output.attribute, 'value')
self.assertIsInstance(output, jelly.Unjellyable)
def test_persistentStorage(self):
perst = [{}, 1]
def persistentStore(obj, jel, perst = perst):
perst[1] = perst[1] + 1
perst[0][perst[1]] = obj
return str(perst[1])
def persistentLoad(pidstr, unj, perst = perst):
pid = int(pidstr)
return perst[0][pid]
a = SimpleJellyTest(1, 2)
b = SimpleJellyTest(3, 4)
c = SimpleJellyTest(5, 6)
a.b = b
a.c = c
c.b = b
jel = jelly.jelly(a, persistentStore = persistentStore)
x = jelly.unjelly(jel, persistentLoad = persistentLoad)
self.assertIdentical(x.b, x.c.b)
self.failUnless(perst[0], "persistentStore was not called.")
self.assertIdentical(x.b, a.b, "Persistent storage identity failure.")
def test_newStyleClassesAttributes(self):
n = TestNode()
n1 = TestNode(n)
n11 = TestNode(n1)
n2 = TestNode(n)
# Jelly it
jel = jelly.jelly(n)
m = jelly.unjelly(jel)
# Check that it has been restored ok
self._check_newstyle(n, m)
def _check_newstyle(self, a, b):
self.assertEqual(a.id, b.id)
self.assertEqual(a.classAttr, 4)
self.assertEqual(b.classAttr, 4)
self.assertEqual(len(a.children), len(b.children))
for x, y in zip(a.children, b.children):
self._check_newstyle(x, y)
def test_referenceable(self):
"""
A L{pb.Referenceable} instance jellies to a structure which unjellies to
a L{pb.RemoteReference}. The C{RemoteReference} has a I{luid} that
matches up with the local object key in the L{pb.Broker} which sent the
L{Referenceable}.
"""
ref = pb.Referenceable()
jellyBroker = pb.Broker()
jellyBroker.makeConnection(StringTransport())
j = jelly.jelly(ref, invoker=jellyBroker)
unjellyBroker = pb.Broker()
unjellyBroker.makeConnection(StringTransport())
uj = jelly.unjelly(j, invoker=unjellyBroker)
self.assertIn(uj.luid, jellyBroker.localObjects)
class ClassA(pb.Copyable, pb.RemoteCopy):
def __init__(self):
self.ref = ClassB(self)
class ClassB(pb.Copyable, pb.RemoteCopy):
def __init__(self, ref):
self.ref = ref
class CircularReferenceTestCase(unittest.TestCase):
"""
Tests for circular references handling in the jelly/unjelly process.
"""
def test_simpleCircle(self):
jelly.setUnjellyableForClass(ClassA, ClassA)
jelly.setUnjellyableForClass(ClassB, ClassB)
a = jelly.unjelly(jelly.jelly(ClassA()))
self.assertIdentical(a.ref.ref, a,
"Identity not preserved in circular reference")
def test_circleWithInvoker(self):
class DummyInvokerClass:
pass
dummyInvoker = DummyInvokerClass()
dummyInvoker.serializingPerspective = None
a0 = ClassA()
jelly.setUnjellyableForClass(ClassA, ClassA)
jelly.setUnjellyableForClass(ClassB, ClassB)
j = jelly.jelly(a0, invoker=dummyInvoker)
a1 = jelly.unjelly(j)
self.failUnlessIdentical(a1.ref.ref, a1,
"Identity not preserved in circular reference")
def test_set(self):
"""
Check that a C{set} can contain a circular reference and be serialized
and unserialized without losing the reference.
"""
s = set()
a = SimpleJellyTest(s, None)
s.add(a)
res = jelly.unjelly(jelly.jelly(a))
self.assertIsInstance(res.x, set)
self.assertEqual(list(res.x), [res])
def test_frozenset(self):
"""
Check that a C{frozenset} can contain a circular reference and be
serializeserialized without losing the reference.
"""
a = SimpleJellyTest(None, None)
s = frozenset([a])
a.x = s
res = jelly.unjelly(jelly.jelly(a))
self.assertIsInstance(res.x, frozenset)
self.assertEqual(list(res.x), [res])
| bsd-3-clause |
leva2020/cineco-admin | web/js/jquery.vectormaps/converter/simplifier.py | 234 | 5985 | import argparse
import sys
import os
from osgeo import ogr
from osgeo import osr
import anyjson
import shapely.geometry
import shapely.ops
import codecs
import time
format = '%.8f %.8f'
tolerance = 0.01
infile = '/Users/kirilllebedev/Maps/50m-admin-0-countries/ne_50m_admin_0_countries.shp'
outfile = 'map.shp'
# Open the datasource to operate on.
in_ds = ogr.Open( infile, update = 0 )
in_layer = in_ds.GetLayer( 0 )
in_defn = in_layer.GetLayerDefn()
# Create output file with similar information.
shp_driver = ogr.GetDriverByName( 'ESRI Shapefile' )
if os.path.exists('map.shp'):
shp_driver.DeleteDataSource( outfile )
shp_ds = shp_driver.CreateDataSource( outfile )
shp_layer = shp_ds.CreateLayer( in_defn.GetName(),
geom_type = in_defn.GetGeomType(),
srs = in_layer.GetSpatialRef() )
in_field_count = in_defn.GetFieldCount()
for fld_index in range(in_field_count):
src_fd = in_defn.GetFieldDefn( fld_index )
fd = ogr.FieldDefn( src_fd.GetName(), src_fd.GetType() )
fd.SetWidth( src_fd.GetWidth() )
fd.SetPrecision( src_fd.GetPrecision() )
shp_layer.CreateField( fd )
# Load geometries
geometries = []
for feature in in_layer:
geometry = feature.GetGeometryRef()
geometryType = geometry.GetGeometryType()
if geometryType == ogr.wkbPolygon or geometryType == ogr.wkbMultiPolygon:
shapelyGeometry = shapely.wkb.loads( geometry.ExportToWkb() )
#if not shapelyGeometry.is_valid:
#buffer to fix selfcrosses
#shapelyGeometry = shapelyGeometry.buffer(0)
if shapelyGeometry:
geometries.append(shapelyGeometry)
in_layer.ResetReading()
start = int(round(time.time() * 1000))
# Simplification
points = []
connections = {}
counter = 0
for geom in geometries:
counter += 1
polygons = []
if isinstance(geom, shapely.geometry.Polygon):
polygons.append(geom)
else:
for polygon in geom:
polygons.append(polygon)
for polygon in polygons:
if polygon.area > 0:
lines = []
lines.append(polygon.exterior)
for line in polygon.interiors:
lines.append(line)
for line in lines:
for i in range(len(line.coords)-1):
indexFrom = i
indexTo = i+1
pointFrom = format % line.coords[indexFrom]
pointTo = format % line.coords[indexTo]
if pointFrom == pointTo:
continue
if not (pointFrom in connections):
connections[pointFrom] = {}
connections[pointFrom][pointTo] = 1
if not (pointTo in connections):
connections[pointTo] = {}
connections[pointTo][pointFrom] = 1
print int(round(time.time() * 1000)) - start
simplifiedLines = {}
pivotPoints = {}
def simplifyRing(ring):
coords = list(ring.coords)[0:-1]
simpleCoords = []
isPivot = False
pointIndex = 0
while not isPivot and pointIndex < len(coords):
pointStr = format % coords[pointIndex]
pointIndex += 1
isPivot = ((len(connections[pointStr]) > 2) or (pointStr in pivotPoints))
pointIndex = pointIndex - 1
if not isPivot:
simpleRing = shapely.geometry.LineString(coords).simplify(tolerance)
if len(simpleRing.coords) <= 2:
return None
else:
pivotPoints[format % coords[0]] = True
pivotPoints[format % coords[-1]] = True
simpleLineKey = format % coords[0]+':'+format % coords[1]+':'+format % coords[-1]
simplifiedLines[simpleLineKey] = simpleRing.coords
return simpleRing
else:
points = coords[pointIndex:len(coords)]
points.extend(coords[0:pointIndex+1])
iFrom = 0
for i in range(1, len(points)):
pointStr = format % points[i]
if ((len(connections[pointStr]) > 2) or (pointStr in pivotPoints)):
line = points[iFrom:i+1]
lineKey = format % line[-1]+':'+format % line[-2]+':'+format % line[0]
if lineKey in simplifiedLines:
simpleLine = simplifiedLines[lineKey]
simpleLine = list(reversed(simpleLine))
else:
simpleLine = shapely.geometry.LineString(line).simplify(tolerance).coords
lineKey = format % line[0]+':'+format % line[1]+':'+format % line[-1]
simplifiedLines[lineKey] = simpleLine
simpleCoords.extend( simpleLine[0:-1] )
iFrom = i
if len(simpleCoords) <= 2:
return None
else:
return shapely.geometry.LineString(simpleCoords)
def simplifyPolygon(polygon):
simpleExtRing = simplifyRing(polygon.exterior)
if simpleExtRing is None:
return None
simpleIntRings = []
for ring in polygon.interiors:
simpleIntRing = simplifyRing(ring)
if simpleIntRing is not None:
simpleIntRings.append(simpleIntRing)
return shapely.geometry.Polygon(simpleExtRing, simpleIntRings)
results = []
for geom in geometries:
polygons = []
simplePolygons = []
if isinstance(geom, shapely.geometry.Polygon):
polygons.append(geom)
else:
for polygon in geom:
polygons.append(polygon)
for polygon in polygons:
simplePolygon = simplifyPolygon(polygon)
if not (simplePolygon is None or simplePolygon._geom is None):
simplePolygons.append(simplePolygon)
if len(simplePolygons) > 0:
results.append(shapely.geometry.MultiPolygon(simplePolygons))
else:
results.append(None)
# Process all features in input layer.
in_feat = in_layer.GetNextFeature()
counter = 0
while in_feat is not None:
if results[counter] is not None:
out_feat = ogr.Feature( feature_def = shp_layer.GetLayerDefn() )
out_feat.SetFrom( in_feat )
out_feat.SetGeometryDirectly(
ogr.CreateGeometryFromWkb(
shapely.wkb.dumps(
results[counter]
)
)
)
shp_layer.CreateFeature( out_feat )
out_feat.Destroy()
else:
print 'geometry is too small: '+in_feat.GetField(16)
in_feat.Destroy()
in_feat = in_layer.GetNextFeature()
counter += 1
# Cleanup
shp_ds.Destroy()
in_ds.Destroy()
print int(round(time.time() * 1000)) - start | mit |
carlmw/oscar-wager | django/middleware/transaction.py | 447 | 1090 | from django.db import transaction
class TransactionMiddleware(object):
"""
Transaction middleware. If this is enabled, each view function will be run
with commit_on_response activated - that way a save() doesn't do a direct
commit, the commit is done when a successful response is created. If an
exception happens, the database is rolled back.
"""
def process_request(self, request):
"""Enters transaction management"""
transaction.enter_transaction_management()
transaction.managed(True)
def process_exception(self, request, exception):
"""Rolls back the database and leaves transaction management"""
if transaction.is_dirty():
transaction.rollback()
transaction.leave_transaction_management()
def process_response(self, request, response):
"""Commits and leaves transaction management."""
if transaction.is_managed():
if transaction.is_dirty():
transaction.commit()
transaction.leave_transaction_management()
return response
| bsd-3-clause |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.