repo_name stringlengths 5 100 | path stringlengths 4 294 | copies stringclasses 990
values | size stringlengths 4 7 | content stringlengths 666 1M | license stringclasses 15
values |
|---|---|---|---|---|---|
vany-egorov/node-gyp | gyp/gyptest.py | 1752 | 8019 | #!/usr/bin/env python
# Copyright (c) 2012 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
__doc__ = """
gyptest.py -- test runner for GYP tests.
"""
import os
import optparse
import subprocess
import sys
class CommandRunner(object):
"""
Executor class for commands, including "commands" implemented by
Python functions.
"""
verbose = True
active = True
def __init__(self, dictionary={}):
self.subst_dictionary(dictionary)
def subst_dictionary(self, dictionary):
self._subst_dictionary = dictionary
def subst(self, string, dictionary=None):
"""
Substitutes (via the format operator) the values in the specified
dictionary into the specified command.
The command can be an (action, string) tuple. In all cases, we
perform substitution on strings and don't worry if something isn't
a string. (It's probably a Python function to be executed.)
"""
if dictionary is None:
dictionary = self._subst_dictionary
if dictionary:
try:
string = string % dictionary
except TypeError:
pass
return string
def display(self, command, stdout=None, stderr=None):
if not self.verbose:
return
if type(command) == type(()):
func = command[0]
args = command[1:]
s = '%s(%s)' % (func.__name__, ', '.join(map(repr, args)))
if type(command) == type([]):
# TODO: quote arguments containing spaces
# TODO: handle meta characters?
s = ' '.join(command)
else:
s = self.subst(command)
if not s.endswith('\n'):
s += '\n'
sys.stdout.write(s)
sys.stdout.flush()
def execute(self, command, stdout=None, stderr=None):
"""
Executes a single command.
"""
if not self.active:
return 0
if type(command) == type(''):
command = self.subst(command)
cmdargs = shlex.split(command)
if cmdargs[0] == 'cd':
command = (os.chdir,) + tuple(cmdargs[1:])
if type(command) == type(()):
func = command[0]
args = command[1:]
return func(*args)
else:
if stdout is sys.stdout:
# Same as passing sys.stdout, except python2.4 doesn't fail on it.
subout = None
else:
# Open pipe for anything else so Popen works on python2.4.
subout = subprocess.PIPE
if stderr is sys.stderr:
# Same as passing sys.stderr, except python2.4 doesn't fail on it.
suberr = None
elif stderr is None:
# Merge with stdout if stderr isn't specified.
suberr = subprocess.STDOUT
else:
# Open pipe for anything else so Popen works on python2.4.
suberr = subprocess.PIPE
p = subprocess.Popen(command,
shell=(sys.platform == 'win32'),
stdout=subout,
stderr=suberr)
p.wait()
if stdout is None:
self.stdout = p.stdout.read()
elif stdout is not sys.stdout:
stdout.write(p.stdout.read())
if stderr not in (None, sys.stderr):
stderr.write(p.stderr.read())
return p.returncode
def run(self, command, display=None, stdout=None, stderr=None):
"""
Runs a single command, displaying it first.
"""
if display is None:
display = command
self.display(display)
return self.execute(command, stdout, stderr)
class Unbuffered(object):
def __init__(self, fp):
self.fp = fp
def write(self, arg):
self.fp.write(arg)
self.fp.flush()
def __getattr__(self, attr):
return getattr(self.fp, attr)
sys.stdout = Unbuffered(sys.stdout)
sys.stderr = Unbuffered(sys.stderr)
def is_test_name(f):
return f.startswith('gyptest') and f.endswith('.py')
def find_all_gyptest_files(directory):
result = []
for root, dirs, files in os.walk(directory):
if '.svn' in dirs:
dirs.remove('.svn')
result.extend([ os.path.join(root, f) for f in files if is_test_name(f) ])
result.sort()
return result
def main(argv=None):
if argv is None:
argv = sys.argv
usage = "gyptest.py [-ahlnq] [-f formats] [test ...]"
parser = optparse.OptionParser(usage=usage)
parser.add_option("-a", "--all", action="store_true",
help="run all tests")
parser.add_option("-C", "--chdir", action="store", default=None,
help="chdir to the specified directory")
parser.add_option("-f", "--format", action="store", default='',
help="run tests with the specified formats")
parser.add_option("-G", '--gyp_option', action="append", default=[],
help="Add -G options to the gyp command line")
parser.add_option("-l", "--list", action="store_true",
help="list available tests and exit")
parser.add_option("-n", "--no-exec", action="store_true",
help="no execute, just print the command line")
parser.add_option("--passed", action="store_true",
help="report passed tests")
parser.add_option("--path", action="append", default=[],
help="additional $PATH directory")
parser.add_option("-q", "--quiet", action="store_true",
help="quiet, don't print test command lines")
opts, args = parser.parse_args(argv[1:])
if opts.chdir:
os.chdir(opts.chdir)
if opts.path:
extra_path = [os.path.abspath(p) for p in opts.path]
extra_path = os.pathsep.join(extra_path)
os.environ['PATH'] = extra_path + os.pathsep + os.environ['PATH']
if not args:
if not opts.all:
sys.stderr.write('Specify -a to get all tests.\n')
return 1
args = ['test']
tests = []
for arg in args:
if os.path.isdir(arg):
tests.extend(find_all_gyptest_files(os.path.normpath(arg)))
else:
if not is_test_name(os.path.basename(arg)):
print >>sys.stderr, arg, 'is not a valid gyp test name.'
sys.exit(1)
tests.append(arg)
if opts.list:
for test in tests:
print test
sys.exit(0)
CommandRunner.verbose = not opts.quiet
CommandRunner.active = not opts.no_exec
cr = CommandRunner()
os.environ['PYTHONPATH'] = os.path.abspath('test/lib')
if not opts.quiet:
sys.stdout.write('PYTHONPATH=%s\n' % os.environ['PYTHONPATH'])
passed = []
failed = []
no_result = []
if opts.format:
format_list = opts.format.split(',')
else:
# TODO: not duplicate this mapping from pylib/gyp/__init__.py
format_list = {
'aix5': ['make'],
'freebsd7': ['make'],
'freebsd8': ['make'],
'openbsd5': ['make'],
'cygwin': ['msvs'],
'win32': ['msvs', 'ninja'],
'linux2': ['make', 'ninja'],
'linux3': ['make', 'ninja'],
'darwin': ['make', 'ninja', 'xcode', 'xcode-ninja'],
}[sys.platform]
for format in format_list:
os.environ['TESTGYP_FORMAT'] = format
if not opts.quiet:
sys.stdout.write('TESTGYP_FORMAT=%s\n' % format)
gyp_options = []
for option in opts.gyp_option:
gyp_options += ['-G', option]
if gyp_options and not opts.quiet:
sys.stdout.write('Extra Gyp options: %s\n' % gyp_options)
for test in tests:
status = cr.run([sys.executable, test] + gyp_options,
stdout=sys.stdout,
stderr=sys.stderr)
if status == 2:
no_result.append(test)
elif status:
failed.append(test)
else:
passed.append(test)
if not opts.quiet:
def report(description, tests):
if tests:
if len(tests) == 1:
sys.stdout.write("\n%s the following test:\n" % description)
else:
fmt = "\n%s the following %d tests:\n"
sys.stdout.write(fmt % (description, len(tests)))
sys.stdout.write("\t" + "\n\t".join(tests) + "\n")
if opts.passed:
report("Passed", passed)
report("Failed", failed)
report("No result from", no_result)
if failed:
return 1
else:
return 0
if __name__ == "__main__":
sys.exit(main())
| mit |
brianjgeiger/osf.io | api_tests/registrations/views/test_registration_files_list.py | 20 | 1461 | import pytest
from tests.json_api_test_app import JSONAPITestApp
from api.base.settings.defaults import API_BASE
from osf_tests.factories import (
AuthUserFactory,
ProjectFactory,
RegistrationFactory
)
@pytest.mark.django_db
class TestRegistrationFilesList(object):
@pytest.fixture(autouse=True)
def setUp(self):
self.app = JSONAPITestApp()
self.user = AuthUserFactory()
self.node = ProjectFactory(creator=self.user)
self.registration = RegistrationFactory(
project=self.node, creator=self.user)
# Note: folders/files added to node do not seem to get picked up by the
# Registration factory so they are added after
self.folder = self.registration.get_addon(
'osfstorage').get_root().append_folder('Archive of OSF Storage')
self.folder.save()
self.file = self.folder.append_file(
'So, on average, it has been super comfortable this week')
self.file.save()
def test_registration_relationships_contains_guid_not_id(self):
url = '/{}registrations/{}/files/{}/'.format(
API_BASE, self.registration._id, self.file.provider)
res = self.app.get(url, auth=self.user.auth)
split_href = res.json['data'][0]['relationships']['files']['links']['related']['href'].split(
'/')
assert self.registration._id in split_href
assert self.registration.id not in split_href
| apache-2.0 |
rbuffat/pyepw | tests/test_design_conditions.py | 1 | 18375 | import os
import tempfile
import unittest
from pyepw.epw import DesignConditions, DesignCondition, EPW
class TestDesignConditions(unittest.TestCase):
def setUp(self):
self.fd, self.path = tempfile.mkstemp()
def tearDown(self):
os.remove(self.path)
def test_create_design_conditions(self):
obj = DesignConditions()
design_condition_obj = DesignCondition()
var_design_condition_title_of_design_condition = "title_of_design_condition"
design_condition_obj.title_of_design_condition = var_design_condition_title_of_design_condition
var_design_condition_unkown_field = "unkown_field"
design_condition_obj.unkown_field = var_design_condition_unkown_field
var_design_condition_design_stat_heating = "Heating"
design_condition_obj.design_stat_heating = var_design_condition_design_stat_heating
var_design_condition_coldestmonth = int((12 + 1) * 0.5)
design_condition_obj.coldestmonth = var_design_condition_coldestmonth
var_design_condition_db996 = 5.5
design_condition_obj.db996 = var_design_condition_db996
var_design_condition_db990 = 6.6
design_condition_obj.db990 = var_design_condition_db990
var_design_condition_dp996 = 7.7
design_condition_obj.dp996 = var_design_condition_dp996
var_design_condition_hr_dp996 = 8.8
design_condition_obj.hr_dp996 = var_design_condition_hr_dp996
var_design_condition_db_dp996 = 9.9
design_condition_obj.db_dp996 = var_design_condition_db_dp996
var_design_condition_dp990 = 10.10
design_condition_obj.dp990 = var_design_condition_dp990
var_design_condition_hr_dp990 = 11.11
design_condition_obj.hr_dp990 = var_design_condition_hr_dp990
var_design_condition_db_dp990 = 12.12
design_condition_obj.db_dp990 = var_design_condition_db_dp990
var_design_condition_ws004c = 13.13
design_condition_obj.ws004c = var_design_condition_ws004c
var_design_condition_db_ws004c = 14.14
design_condition_obj.db_ws004c = var_design_condition_db_ws004c
var_design_condition_ws010c = 15.15
design_condition_obj.ws010c = var_design_condition_ws010c
var_design_condition_db_ws010c = 16.16
design_condition_obj.db_ws010c = var_design_condition_db_ws010c
var_design_condition_ws_db996 = 17.17
design_condition_obj.ws_db996 = var_design_condition_ws_db996
var_design_condition_wd_db996 = 18.18
design_condition_obj.wd_db996 = var_design_condition_wd_db996
var_design_condition_design_stat_cooling = "Cooling"
design_condition_obj.design_stat_cooling = var_design_condition_design_stat_cooling
var_design_condition_hottestmonth = int((12 + 1) * 0.5)
design_condition_obj.hottestmonth = var_design_condition_hottestmonth
var_design_condition_dbr = 21.21
design_condition_obj.dbr = var_design_condition_dbr
var_design_condition_db004 = 22.22
design_condition_obj.db004 = var_design_condition_db004
var_design_condition_wb_db004 = 23.23
design_condition_obj.wb_db004 = var_design_condition_wb_db004
var_design_condition_db010 = 24.24
design_condition_obj.db010 = var_design_condition_db010
var_design_condition_wb_db010 = 25.25
design_condition_obj.wb_db010 = var_design_condition_wb_db010
var_design_condition_db020 = 26.26
design_condition_obj.db020 = var_design_condition_db020
var_design_condition_wb_db020 = 27.27
design_condition_obj.wb_db020 = var_design_condition_wb_db020
var_design_condition_wb004 = 28.28
design_condition_obj.wb004 = var_design_condition_wb004
var_design_condition_db_wb004 = 29.29
design_condition_obj.db_wb004 = var_design_condition_db_wb004
var_design_condition_wb010 = 30.30
design_condition_obj.wb010 = var_design_condition_wb010
var_design_condition_db_wb010 = 31.31
design_condition_obj.db_wb010 = var_design_condition_db_wb010
var_design_condition_wb020 = 32.32
design_condition_obj.wb020 = var_design_condition_wb020
var_design_condition_db_wb020 = 33.33
design_condition_obj.db_wb020 = var_design_condition_db_wb020
var_design_condition_ws_db004 = 34.34
design_condition_obj.ws_db004 = var_design_condition_ws_db004
var_design_condition_wd_db004 = 35.35
design_condition_obj.wd_db004 = var_design_condition_wd_db004
var_design_condition_dp004 = 36.36
design_condition_obj.dp004 = var_design_condition_dp004
var_design_condition_hr_dp004 = 37.37
design_condition_obj.hr_dp004 = var_design_condition_hr_dp004
var_design_condition_db_dp004 = 38.38
design_condition_obj.db_dp004 = var_design_condition_db_dp004
var_design_condition_dp010 = 39.39
design_condition_obj.dp010 = var_design_condition_dp010
var_design_condition_hr_dp010 = 40.40
design_condition_obj.hr_dp010 = var_design_condition_hr_dp010
var_design_condition_db_dp010 = 41.41
design_condition_obj.db_dp010 = var_design_condition_db_dp010
var_design_condition_dp020 = 42.42
design_condition_obj.dp020 = var_design_condition_dp020
var_design_condition_hr_dp020 = 43.43
design_condition_obj.hr_dp020 = var_design_condition_hr_dp020
var_design_condition_db_dp020 = 44.44
design_condition_obj.db_dp020 = var_design_condition_db_dp020
var_design_condition_en004 = 45.45
design_condition_obj.en004 = var_design_condition_en004
var_design_condition_db_en004 = 46.46
design_condition_obj.db_en004 = var_design_condition_db_en004
var_design_condition_en010 = 47.47
design_condition_obj.en010 = var_design_condition_en010
var_design_condition_db_en010 = 48.48
design_condition_obj.db_en010 = var_design_condition_db_en010
var_design_condition_en020 = 49.49
design_condition_obj.en020 = var_design_condition_en020
var_design_condition_db_en020 = 50.50
design_condition_obj.db_en020 = var_design_condition_db_en020
var_design_condition_hrs_84_and_db12_8_or_20_6 = 51.51
design_condition_obj.hrs_84_and_db12_8_or_20_6 = var_design_condition_hrs_84_and_db12_8_or_20_6
var_design_condition_design_stat_extremes = "Extremes"
design_condition_obj.design_stat_extremes = var_design_condition_design_stat_extremes
var_design_condition_ws010 = 53.53
design_condition_obj.ws010 = var_design_condition_ws010
var_design_condition_ws025 = 54.54
design_condition_obj.ws025 = var_design_condition_ws025
var_design_condition_ws050 = 55.55
design_condition_obj.ws050 = var_design_condition_ws050
var_design_condition_wbmax = 56.56
design_condition_obj.wbmax = var_design_condition_wbmax
var_design_condition_dbmin_mean = 57.57
design_condition_obj.dbmin_mean = var_design_condition_dbmin_mean
var_design_condition_dbmax_mean = 58.58
design_condition_obj.dbmax_mean = var_design_condition_dbmax_mean
var_design_condition_dbmin_stddev = 59.59
design_condition_obj.dbmin_stddev = var_design_condition_dbmin_stddev
var_design_condition_dbmax_stddev = 60.60
design_condition_obj.dbmax_stddev = var_design_condition_dbmax_stddev
var_design_condition_dbmin05years = 61.61
design_condition_obj.dbmin05years = var_design_condition_dbmin05years
var_design_condition_dbmax05years = 62.62
design_condition_obj.dbmax05years = var_design_condition_dbmax05years
var_design_condition_dbmin10years = 63.63
design_condition_obj.dbmin10years = var_design_condition_dbmin10years
var_design_condition_dbmax10years = 64.64
design_condition_obj.dbmax10years = var_design_condition_dbmax10years
var_design_condition_dbmin20years = 65.65
design_condition_obj.dbmin20years = var_design_condition_dbmin20years
var_design_condition_dbmax20years = 66.66
design_condition_obj.dbmax20years = var_design_condition_dbmax20years
var_design_condition_dbmin50years = 67.67
design_condition_obj.dbmin50years = var_design_condition_dbmin50years
var_design_condition_dbmax50years = 68.68
design_condition_obj.dbmax50years = var_design_condition_dbmax50years
obj.add_design_condition(design_condition_obj)
epw = EPW(design_conditions=obj)
epw.save(self.path, check=False)
epw2 = EPW()
epw2.read(self.path)
self.assertEqual(
epw2.design_conditions.design_conditions[0].title_of_design_condition,
var_design_condition_title_of_design_condition)
self.assertEqual(
epw2.design_conditions.design_conditions[0].unkown_field,
var_design_condition_unkown_field)
self.assertEqual(
epw2.design_conditions.design_conditions[0].design_stat_heating,
var_design_condition_design_stat_heating)
self.assertEqual(
epw2.design_conditions.design_conditions[0].coldestmonth,
var_design_condition_coldestmonth)
self.assertAlmostEqual(
epw2.design_conditions.design_conditions[0].db996,
var_design_condition_db996)
self.assertAlmostEqual(
epw2.design_conditions.design_conditions[0].db990,
var_design_condition_db990)
self.assertAlmostEqual(
epw2.design_conditions.design_conditions[0].dp996,
var_design_condition_dp996)
self.assertAlmostEqual(
epw2.design_conditions.design_conditions[0].hr_dp996,
var_design_condition_hr_dp996)
self.assertAlmostEqual(
epw2.design_conditions.design_conditions[0].db_dp996,
var_design_condition_db_dp996)
self.assertAlmostEqual(
epw2.design_conditions.design_conditions[0].dp990,
var_design_condition_dp990)
self.assertAlmostEqual(
epw2.design_conditions.design_conditions[0].hr_dp990,
var_design_condition_hr_dp990)
self.assertAlmostEqual(
epw2.design_conditions.design_conditions[0].db_dp990,
var_design_condition_db_dp990)
self.assertAlmostEqual(
epw2.design_conditions.design_conditions[0].ws004c,
var_design_condition_ws004c)
self.assertAlmostEqual(
epw2.design_conditions.design_conditions[0].db_ws004c,
var_design_condition_db_ws004c)
self.assertAlmostEqual(
epw2.design_conditions.design_conditions[0].ws010c,
var_design_condition_ws010c)
self.assertAlmostEqual(
epw2.design_conditions.design_conditions[0].db_ws010c,
var_design_condition_db_ws010c)
self.assertAlmostEqual(
epw2.design_conditions.design_conditions[0].ws_db996,
var_design_condition_ws_db996)
self.assertAlmostEqual(
epw2.design_conditions.design_conditions[0].wd_db996,
var_design_condition_wd_db996)
self.assertEqual(
epw2.design_conditions.design_conditions[0].design_stat_cooling,
var_design_condition_design_stat_cooling)
self.assertEqual(
epw2.design_conditions.design_conditions[0].hottestmonth,
var_design_condition_hottestmonth)
self.assertAlmostEqual(
epw2.design_conditions.design_conditions[0].dbr,
var_design_condition_dbr)
self.assertAlmostEqual(
epw2.design_conditions.design_conditions[0].db004,
var_design_condition_db004)
self.assertAlmostEqual(
epw2.design_conditions.design_conditions[0].wb_db004,
var_design_condition_wb_db004)
self.assertAlmostEqual(
epw2.design_conditions.design_conditions[0].db010,
var_design_condition_db010)
self.assertAlmostEqual(
epw2.design_conditions.design_conditions[0].wb_db010,
var_design_condition_wb_db010)
self.assertAlmostEqual(
epw2.design_conditions.design_conditions[0].db020,
var_design_condition_db020)
self.assertAlmostEqual(
epw2.design_conditions.design_conditions[0].wb_db020,
var_design_condition_wb_db020)
self.assertAlmostEqual(
epw2.design_conditions.design_conditions[0].wb004,
var_design_condition_wb004)
self.assertAlmostEqual(
epw2.design_conditions.design_conditions[0].db_wb004,
var_design_condition_db_wb004)
self.assertAlmostEqual(
epw2.design_conditions.design_conditions[0].wb010,
var_design_condition_wb010)
self.assertAlmostEqual(
epw2.design_conditions.design_conditions[0].db_wb010,
var_design_condition_db_wb010)
self.assertAlmostEqual(
epw2.design_conditions.design_conditions[0].wb020,
var_design_condition_wb020)
self.assertAlmostEqual(
epw2.design_conditions.design_conditions[0].db_wb020,
var_design_condition_db_wb020)
self.assertAlmostEqual(
epw2.design_conditions.design_conditions[0].ws_db004,
var_design_condition_ws_db004)
self.assertAlmostEqual(
epw2.design_conditions.design_conditions[0].wd_db004,
var_design_condition_wd_db004)
self.assertAlmostEqual(
epw2.design_conditions.design_conditions[0].dp004,
var_design_condition_dp004)
self.assertAlmostEqual(
epw2.design_conditions.design_conditions[0].hr_dp004,
var_design_condition_hr_dp004)
self.assertAlmostEqual(
epw2.design_conditions.design_conditions[0].db_dp004,
var_design_condition_db_dp004)
self.assertAlmostEqual(
epw2.design_conditions.design_conditions[0].dp010,
var_design_condition_dp010)
self.assertAlmostEqual(
epw2.design_conditions.design_conditions[0].hr_dp010,
var_design_condition_hr_dp010)
self.assertAlmostEqual(
epw2.design_conditions.design_conditions[0].db_dp010,
var_design_condition_db_dp010)
self.assertAlmostEqual(
epw2.design_conditions.design_conditions[0].dp020,
var_design_condition_dp020)
self.assertAlmostEqual(
epw2.design_conditions.design_conditions[0].hr_dp020,
var_design_condition_hr_dp020)
self.assertAlmostEqual(
epw2.design_conditions.design_conditions[0].db_dp020,
var_design_condition_db_dp020)
self.assertAlmostEqual(
epw2.design_conditions.design_conditions[0].en004,
var_design_condition_en004)
self.assertAlmostEqual(
epw2.design_conditions.design_conditions[0].db_en004,
var_design_condition_db_en004)
self.assertAlmostEqual(
epw2.design_conditions.design_conditions[0].en010,
var_design_condition_en010)
self.assertAlmostEqual(
epw2.design_conditions.design_conditions[0].db_en010,
var_design_condition_db_en010)
self.assertAlmostEqual(
epw2.design_conditions.design_conditions[0].en020,
var_design_condition_en020)
self.assertAlmostEqual(
epw2.design_conditions.design_conditions[0].db_en020,
var_design_condition_db_en020)
self.assertAlmostEqual(
epw2.design_conditions.design_conditions[0].hrs_84_and_db12_8_or_20_6,
var_design_condition_hrs_84_and_db12_8_or_20_6)
self.assertEqual(
epw2.design_conditions.design_conditions[0].design_stat_extremes,
var_design_condition_design_stat_extremes)
self.assertAlmostEqual(
epw2.design_conditions.design_conditions[0].ws010,
var_design_condition_ws010)
self.assertAlmostEqual(
epw2.design_conditions.design_conditions[0].ws025,
var_design_condition_ws025)
self.assertAlmostEqual(
epw2.design_conditions.design_conditions[0].ws050,
var_design_condition_ws050)
self.assertAlmostEqual(
epw2.design_conditions.design_conditions[0].wbmax,
var_design_condition_wbmax)
self.assertAlmostEqual(
epw2.design_conditions.design_conditions[0].dbmin_mean,
var_design_condition_dbmin_mean)
self.assertAlmostEqual(
epw2.design_conditions.design_conditions[0].dbmax_mean,
var_design_condition_dbmax_mean)
self.assertAlmostEqual(
epw2.design_conditions.design_conditions[0].dbmin_stddev,
var_design_condition_dbmin_stddev)
self.assertAlmostEqual(
epw2.design_conditions.design_conditions[0].dbmax_stddev,
var_design_condition_dbmax_stddev)
self.assertAlmostEqual(
epw2.design_conditions.design_conditions[0].dbmin05years,
var_design_condition_dbmin05years)
self.assertAlmostEqual(
epw2.design_conditions.design_conditions[0].dbmax05years,
var_design_condition_dbmax05years)
self.assertAlmostEqual(
epw2.design_conditions.design_conditions[0].dbmin10years,
var_design_condition_dbmin10years)
self.assertAlmostEqual(
epw2.design_conditions.design_conditions[0].dbmax10years,
var_design_condition_dbmax10years)
self.assertAlmostEqual(
epw2.design_conditions.design_conditions[0].dbmin20years,
var_design_condition_dbmin20years)
self.assertAlmostEqual(
epw2.design_conditions.design_conditions[0].dbmax20years,
var_design_condition_dbmax20years)
self.assertAlmostEqual(
epw2.design_conditions.design_conditions[0].dbmin50years,
var_design_condition_dbmin50years)
self.assertAlmostEqual(
epw2.design_conditions.design_conditions[0].dbmax50years,
var_design_condition_dbmax50years)
| apache-2.0 |
lebabouin/CouchPotatoServer-develop | libs/requests/packages/urllib3/request.py | 60 | 5873 | # urllib3/request.py
# Copyright 2008-2013 Andrey Petrov and contributors (see CONTRIBUTORS.txt)
#
# This module is part of urllib3 and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
try:
from urllib.parse import urlencode
except ImportError:
from urllib import urlencode
from .filepost import encode_multipart_formdata
__all__ = ['RequestMethods']
class RequestMethods(object):
"""
Convenience mixin for classes who implement a :meth:`urlopen` method, such
as :class:`~urllib3.connectionpool.HTTPConnectionPool` and
:class:`~urllib3.poolmanager.PoolManager`.
Provides behavior for making common types of HTTP request methods and
decides which type of request field encoding to use.
Specifically,
:meth:`.request_encode_url` is for sending requests whose fields are encoded
in the URL (such as GET, HEAD, DELETE).
:meth:`.request_encode_body` is for sending requests whose fields are
encoded in the *body* of the request using multipart or www-orm-urlencoded
(such as for POST, PUT, PATCH).
:meth:`.request` is for making any kind of request, it will look up the
appropriate encoding format and use one of the above two methods to make
the request.
Initializer parameters:
:param headers:
Headers to include with all requests, unless other headers are given
explicitly.
"""
_encode_url_methods = set(['DELETE', 'GET', 'HEAD', 'OPTIONS'])
_encode_body_methods = set(['PATCH', 'POST', 'PUT', 'TRACE'])
def __init__(self, headers=None):
self.headers = headers or {}
def urlopen(self, method, url, body=None, headers=None,
encode_multipart=True, multipart_boundary=None,
**kw): # Abstract
raise NotImplemented("Classes extending RequestMethods must implement "
"their own ``urlopen`` method.")
def request(self, method, url, fields=None, headers=None, **urlopen_kw):
"""
Make a request using :meth:`urlopen` with the appropriate encoding of
``fields`` based on the ``method`` used.
This is a convenience method that requires the least amount of manual
effort. It can be used in most situations, while still having the option
to drop down to more specific methods when necessary, such as
:meth:`request_encode_url`, :meth:`request_encode_body`,
or even the lowest level :meth:`urlopen`.
"""
method = method.upper()
if method in self._encode_url_methods:
return self.request_encode_url(method, url, fields=fields,
headers=headers,
**urlopen_kw)
else:
return self.request_encode_body(method, url, fields=fields,
headers=headers,
**urlopen_kw)
def request_encode_url(self, method, url, fields=None, **urlopen_kw):
"""
Make a request using :meth:`urlopen` with the ``fields`` encoded in
the url. This is useful for request methods like GET, HEAD, DELETE, etc.
"""
if fields:
url += '?' + urlencode(fields)
return self.urlopen(method, url, **urlopen_kw)
def request_encode_body(self, method, url, fields=None, headers=None,
encode_multipart=True, multipart_boundary=None,
**urlopen_kw):
"""
Make a request using :meth:`urlopen` with the ``fields`` encoded in
the body. This is useful for request methods like POST, PUT, PATCH, etc.
When ``encode_multipart=True`` (default), then
:meth:`urllib3.filepost.encode_multipart_formdata` is used to encode the
payload with the appropriate content type. Otherwise
:meth:`urllib.urlencode` is used with the
'application/x-www-form-urlencoded' content type.
Multipart encoding must be used when posting files, and it's reasonably
safe to use it in other times too. However, it may break request signing,
such as with OAuth.
Supports an optional ``fields`` parameter of key/value strings AND
key/filetuple. A filetuple is a (filename, data, MIME type) tuple where
the MIME type is optional. For example: ::
fields = {
'foo': 'bar',
'fakefile': ('foofile.txt', 'contents of foofile'),
'realfile': ('barfile.txt', open('realfile').read()),
'typedfile': ('bazfile.bin', open('bazfile').read(),
'image/jpeg'),
'nonamefile': 'contents of nonamefile field',
}
When uploading a file, providing a filename (the first parameter of the
tuple) is optional but recommended to best mimick behavior of browsers.
Note that if ``headers`` are supplied, the 'Content-Type' header will be
overwritten because it depends on the dynamic random boundary string
which is used to compose the body of the request. The random boundary
string can be explicitly set with the ``multipart_boundary`` parameter.
"""
if encode_multipart:
body, content_type = encode_multipart_formdata(fields or {},
boundary=multipart_boundary)
else:
body, content_type = (urlencode(fields or {}),
'application/x-www-form-urlencoded')
if headers is None:
headers = self.headers
headers_ = {'Content-Type': content_type}
headers_.update(headers)
return self.urlopen(method, url, body=body, headers=headers_,
**urlopen_kw)
| gpl-3.0 |
40223149/2015springfinal | static/Brython3.1.0-20150301-090019/Lib/unittest/test/testmock/testmagicmethods.py | 737 | 12145 | import unittest
import inspect
import sys
from unittest.mock import Mock, MagicMock, _magics
class TestMockingMagicMethods(unittest.TestCase):
def test_deleting_magic_methods(self):
mock = Mock()
self.assertFalse(hasattr(mock, '__getitem__'))
mock.__getitem__ = Mock()
self.assertTrue(hasattr(mock, '__getitem__'))
del mock.__getitem__
self.assertFalse(hasattr(mock, '__getitem__'))
def test_magicmock_del(self):
mock = MagicMock()
# before using getitem
del mock.__getitem__
self.assertRaises(TypeError, lambda: mock['foo'])
mock = MagicMock()
# this time use it first
mock['foo']
del mock.__getitem__
self.assertRaises(TypeError, lambda: mock['foo'])
def test_magic_method_wrapping(self):
mock = Mock()
def f(self, name):
return self, 'fish'
mock.__getitem__ = f
self.assertFalse(mock.__getitem__ is f)
self.assertEqual(mock['foo'], (mock, 'fish'))
self.assertEqual(mock.__getitem__('foo'), (mock, 'fish'))
mock.__getitem__ = mock
self.assertTrue(mock.__getitem__ is mock)
def test_magic_methods_isolated_between_mocks(self):
mock1 = Mock()
mock2 = Mock()
mock1.__iter__ = Mock(return_value=iter([]))
self.assertEqual(list(mock1), [])
self.assertRaises(TypeError, lambda: list(mock2))
def test_repr(self):
mock = Mock()
self.assertEqual(repr(mock), "<Mock id='%s'>" % id(mock))
mock.__repr__ = lambda s: 'foo'
self.assertEqual(repr(mock), 'foo')
def test_str(self):
mock = Mock()
self.assertEqual(str(mock), object.__str__(mock))
mock.__str__ = lambda s: 'foo'
self.assertEqual(str(mock), 'foo')
def test_dict_methods(self):
mock = Mock()
self.assertRaises(TypeError, lambda: mock['foo'])
def _del():
del mock['foo']
def _set():
mock['foo'] = 3
self.assertRaises(TypeError, _del)
self.assertRaises(TypeError, _set)
_dict = {}
def getitem(s, name):
return _dict[name]
def setitem(s, name, value):
_dict[name] = value
def delitem(s, name):
del _dict[name]
mock.__setitem__ = setitem
mock.__getitem__ = getitem
mock.__delitem__ = delitem
self.assertRaises(KeyError, lambda: mock['foo'])
mock['foo'] = 'bar'
self.assertEqual(_dict, {'foo': 'bar'})
self.assertEqual(mock['foo'], 'bar')
del mock['foo']
self.assertEqual(_dict, {})
def test_numeric(self):
original = mock = Mock()
mock.value = 0
self.assertRaises(TypeError, lambda: mock + 3)
def add(self, other):
mock.value += other
return self
mock.__add__ = add
self.assertEqual(mock + 3, mock)
self.assertEqual(mock.value, 3)
del mock.__add__
def iadd(mock):
mock += 3
self.assertRaises(TypeError, iadd, mock)
mock.__iadd__ = add
mock += 6
self.assertEqual(mock, original)
self.assertEqual(mock.value, 9)
self.assertRaises(TypeError, lambda: 3 + mock)
mock.__radd__ = add
self.assertEqual(7 + mock, mock)
self.assertEqual(mock.value, 16)
def test_hash(self):
mock = Mock()
# test delegation
self.assertEqual(hash(mock), Mock.__hash__(mock))
def _hash(s):
return 3
mock.__hash__ = _hash
self.assertEqual(hash(mock), 3)
def test_nonzero(self):
m = Mock()
self.assertTrue(bool(m))
m.__bool__ = lambda s: False
self.assertFalse(bool(m))
def test_comparison(self):
mock = Mock()
def comp(s, o):
return True
mock.__lt__ = mock.__gt__ = mock.__le__ = mock.__ge__ = comp
self. assertTrue(mock < 3)
self. assertTrue(mock > 3)
self. assertTrue(mock <= 3)
self. assertTrue(mock >= 3)
self.assertRaises(TypeError, lambda: MagicMock() < object())
self.assertRaises(TypeError, lambda: object() < MagicMock())
self.assertRaises(TypeError, lambda: MagicMock() < MagicMock())
self.assertRaises(TypeError, lambda: MagicMock() > object())
self.assertRaises(TypeError, lambda: object() > MagicMock())
self.assertRaises(TypeError, lambda: MagicMock() > MagicMock())
self.assertRaises(TypeError, lambda: MagicMock() <= object())
self.assertRaises(TypeError, lambda: object() <= MagicMock())
self.assertRaises(TypeError, lambda: MagicMock() <= MagicMock())
self.assertRaises(TypeError, lambda: MagicMock() >= object())
self.assertRaises(TypeError, lambda: object() >= MagicMock())
self.assertRaises(TypeError, lambda: MagicMock() >= MagicMock())
def test_equality(self):
for mock in Mock(), MagicMock():
self.assertEqual(mock == mock, True)
self.assertIsInstance(mock == mock, bool)
self.assertEqual(mock != mock, False)
self.assertIsInstance(mock != mock, bool)
self.assertEqual(mock == object(), False)
self.assertEqual(mock != object(), True)
def eq(self, other):
return other == 3
mock.__eq__ = eq
self.assertTrue(mock == 3)
self.assertFalse(mock == 4)
def ne(self, other):
return other == 3
mock.__ne__ = ne
self.assertTrue(mock != 3)
self.assertFalse(mock != 4)
mock = MagicMock()
mock.__eq__.return_value = True
self.assertIsInstance(mock == 3, bool)
self.assertEqual(mock == 3, True)
mock.__ne__.return_value = False
self.assertIsInstance(mock != 3, bool)
self.assertEqual(mock != 3, False)
def test_len_contains_iter(self):
mock = Mock()
self.assertRaises(TypeError, len, mock)
self.assertRaises(TypeError, iter, mock)
self.assertRaises(TypeError, lambda: 'foo' in mock)
mock.__len__ = lambda s: 6
self.assertEqual(len(mock), 6)
mock.__contains__ = lambda s, o: o == 3
self.assertTrue(3 in mock)
self.assertFalse(6 in mock)
mock.__iter__ = lambda s: iter('foobarbaz')
self.assertEqual(list(mock), list('foobarbaz'))
def test_magicmock(self):
mock = MagicMock()
mock.__iter__.return_value = iter([1, 2, 3])
self.assertEqual(list(mock), [1, 2, 3])
getattr(mock, '__bool__').return_value = False
self.assertFalse(hasattr(mock, '__nonzero__'))
self.assertFalse(bool(mock))
for entry in _magics:
self.assertTrue(hasattr(mock, entry))
self.assertFalse(hasattr(mock, '__imaginery__'))
def test_magic_mock_equality(self):
mock = MagicMock()
self.assertIsInstance(mock == object(), bool)
self.assertIsInstance(mock != object(), bool)
self.assertEqual(mock == object(), False)
self.assertEqual(mock != object(), True)
self.assertEqual(mock == mock, True)
self.assertEqual(mock != mock, False)
def test_magicmock_defaults(self):
mock = MagicMock()
self.assertEqual(int(mock), 1)
self.assertEqual(complex(mock), 1j)
self.assertEqual(float(mock), 1.0)
self.assertNotIn(object(), mock)
self.assertEqual(len(mock), 0)
self.assertEqual(list(mock), [])
self.assertEqual(hash(mock), object.__hash__(mock))
self.assertEqual(str(mock), object.__str__(mock))
self.assertTrue(bool(mock))
# in Python 3 oct and hex use __index__
# so these tests are for __index__ in py3k
self.assertEqual(oct(mock), '0o1')
self.assertEqual(hex(mock), '0x1')
# how to test __sizeof__ ?
def test_magic_methods_and_spec(self):
class Iterable(object):
def __iter__(self):
pass
mock = Mock(spec=Iterable)
self.assertRaises(AttributeError, lambda: mock.__iter__)
mock.__iter__ = Mock(return_value=iter([]))
self.assertEqual(list(mock), [])
class NonIterable(object):
pass
mock = Mock(spec=NonIterable)
self.assertRaises(AttributeError, lambda: mock.__iter__)
def set_int():
mock.__int__ = Mock(return_value=iter([]))
self.assertRaises(AttributeError, set_int)
mock = MagicMock(spec=Iterable)
self.assertEqual(list(mock), [])
self.assertRaises(AttributeError, set_int)
def test_magic_methods_and_spec_set(self):
class Iterable(object):
def __iter__(self):
pass
mock = Mock(spec_set=Iterable)
self.assertRaises(AttributeError, lambda: mock.__iter__)
mock.__iter__ = Mock(return_value=iter([]))
self.assertEqual(list(mock), [])
class NonIterable(object):
pass
mock = Mock(spec_set=NonIterable)
self.assertRaises(AttributeError, lambda: mock.__iter__)
def set_int():
mock.__int__ = Mock(return_value=iter([]))
self.assertRaises(AttributeError, set_int)
mock = MagicMock(spec_set=Iterable)
self.assertEqual(list(mock), [])
self.assertRaises(AttributeError, set_int)
def test_setting_unsupported_magic_method(self):
mock = MagicMock()
def set_setattr():
mock.__setattr__ = lambda self, name: None
self.assertRaisesRegex(AttributeError,
"Attempting to set unsupported magic method '__setattr__'.",
set_setattr
)
def test_attributes_and_return_value(self):
mock = MagicMock()
attr = mock.foo
def _get_type(obj):
# the type of every mock (or magicmock) is a custom subclass
# so the real type is the second in the mro
return type(obj).__mro__[1]
self.assertEqual(_get_type(attr), MagicMock)
returned = mock()
self.assertEqual(_get_type(returned), MagicMock)
def test_magic_methods_are_magic_mocks(self):
mock = MagicMock()
self.assertIsInstance(mock.__getitem__, MagicMock)
mock[1][2].__getitem__.return_value = 3
self.assertEqual(mock[1][2][3], 3)
def test_magic_method_reset_mock(self):
mock = MagicMock()
str(mock)
self.assertTrue(mock.__str__.called)
mock.reset_mock()
self.assertFalse(mock.__str__.called)
def test_dir(self):
# overriding the default implementation
for mock in Mock(), MagicMock():
def _dir(self):
return ['foo']
mock.__dir__ = _dir
self.assertEqual(dir(mock), ['foo'])
@unittest.skipIf('PyPy' in sys.version, "This fails differently on pypy")
def test_bound_methods(self):
m = Mock()
# XXXX should this be an expected failure instead?
# this seems like it should work, but is hard to do without introducing
# other api inconsistencies. Failure message could be better though.
m.__iter__ = [3].__iter__
self.assertRaises(TypeError, iter, m)
def test_magic_method_type(self):
class Foo(MagicMock):
pass
foo = Foo()
self.assertIsInstance(foo.__int__, Foo)
def test_descriptor_from_class(self):
m = MagicMock()
type(m).__str__.return_value = 'foo'
self.assertEqual(str(m), 'foo')
def test_iterable_as_iter_return_value(self):
m = MagicMock()
m.__iter__.return_value = [1, 2, 3]
self.assertEqual(list(m), [1, 2, 3])
self.assertEqual(list(m), [1, 2, 3])
m.__iter__.return_value = iter([4, 5, 6])
self.assertEqual(list(m), [4, 5, 6])
self.assertEqual(list(m), [])
if __name__ == '__main__':
unittest.main()
| gpl-3.0 |
maxkoryukov/headphones | lib/unidecode/x000.py | 113 | 3035 | data = (
# Code points u+007f and below are equivalent to ASCII and are handled by a
# special case in the code. Hence they are not present in this table.
'', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '',
'', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '',
'', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '',
'', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '',
'', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '',
'', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '',
'', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '',
'', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '',
'', # 0x80
'', # 0x81
'', # 0x82
'', # 0x83
'', # 0x84
'', # 0x85
'', # 0x86
'', # 0x87
'', # 0x88
'', # 0x89
'', # 0x8a
'', # 0x8b
'', # 0x8c
'', # 0x8d
'', # 0x8e
'', # 0x8f
'', # 0x90
'', # 0x91
'', # 0x92
'', # 0x93
'', # 0x94
'', # 0x95
'', # 0x96
'', # 0x97
'', # 0x98
'', # 0x99
'', # 0x9a
'', # 0x9b
'', # 0x9c
'', # 0x9d
'', # 0x9e
'', # 0x9f
' ', # 0xa0
'!', # 0xa1
'C/', # 0xa2
# Not "GBP" - Pound Sign is used for more than just British Pounds.
'PS', # 0xa3
'$?', # 0xa4
'Y=', # 0xa5
'|', # 0xa6
'SS', # 0xa7
'"', # 0xa8
'(c)', # 0xa9
'a', # 0xaa
'<<', # 0xab
'!', # 0xac
'', # 0xad
'(r)', # 0xae
'-', # 0xaf
'deg', # 0xb0
'+-', # 0xb1
# These might be combined with other superscript digits (u+2070 - u+2079)
'2', # 0xb2
'3', # 0xb3
'\'', # 0xb4
'u', # 0xb5
'P', # 0xb6
'*', # 0xb7
',', # 0xb8
'1', # 0xb9
'o', # 0xba
'>>', # 0xbb
'1/4', # 0xbc
'1/2', # 0xbd
'3/4', # 0xbe
'?', # 0xbf
'A', # 0xc0
'A', # 0xc1
'A', # 0xc2
'A', # 0xc3
# Not "AE" - used in languages other than German
'A', # 0xc4
'A', # 0xc5
'AE', # 0xc6
'C', # 0xc7
'E', # 0xc8
'E', # 0xc9
'E', # 0xca
'E', # 0xcb
'I', # 0xcc
'I', # 0xcd
'I', # 0xce
'I', # 0xcf
'D', # 0xd0
'N', # 0xd1
'O', # 0xd2
'O', # 0xd3
'O', # 0xd4
'O', # 0xd5
# Not "OE" - used in languages other than German
'O', # 0xd6
'x', # 0xd7
'O', # 0xd8
'U', # 0xd9
'U', # 0xda
'U', # 0xdb
# Not "UE" - used in languages other than German
'U', # 0xdc
'Y', # 0xdd
'Th', # 0xde
'ss', # 0xdf
'a', # 0xe0
'a', # 0xe1
'a', # 0xe2
'a', # 0xe3
# Not "ae" - used in languages other than German
'a', # 0xe4
'a', # 0xe5
'ae', # 0xe6
'c', # 0xe7
'e', # 0xe8
'e', # 0xe9
'e', # 0xea
'e', # 0xeb
'i', # 0xec
'i', # 0xed
'i', # 0xee
'i', # 0xef
'd', # 0xf0
'n', # 0xf1
'o', # 0xf2
'o', # 0xf3
'o', # 0xf4
'o', # 0xf5
# Not "oe" - used in languages other than German
'o', # 0xf6
'/', # 0xf7
'o', # 0xf8
'u', # 0xf9
'u', # 0xfa
'u', # 0xfb
# Not "ue" - used in languages other than German
'u', # 0xfc
'y', # 0xfd
'th', # 0xfe
'y', # 0xff
)
| gpl-3.0 |
cloudbase/maas | src/maasserver/migrations/0060_add_zone_object.py | 1 | 17981 | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'Zone'
db.create_table(u'maasserver_zone', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('created', self.gf('django.db.models.fields.DateTimeField')()),
('updated', self.gf('django.db.models.fields.DateTimeField')()),
('name', self.gf('django.db.models.fields.CharField')(unique=True, max_length=256)),
('description', self.gf('django.db.models.fields.TextField')(blank=True)),
))
db.send_create_signal(u'maasserver', ['Zone'])
def backwards(self, orm):
# Deleting model 'Zone'
db.delete_table(u'maasserver_zone')
models = {
u'auth.group': {
'Meta': {'object_name': 'Group'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
u'auth.permission': {
'Meta': {'ordering': "(u'content_type__app_label', u'content_type__model', u'codename')", 'unique_together': "((u'content_type', u'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['contenttypes.ContentType']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
u'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'unique': 'True', 'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
u'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
u'maasserver.bootimage': {
'Meta': {'unique_together': "((u'nodegroup', u'architecture', u'subarchitecture', u'release', u'purpose'),)", 'object_name': 'BootImage'},
'architecture': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'nodegroup': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['maasserver.NodeGroup']"}),
'purpose': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'release': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'subarchitecture': ('django.db.models.fields.CharField', [], {'max_length': '255'})
},
u'maasserver.componenterror': {
'Meta': {'object_name': 'ComponentError'},
'component': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '40'}),
'created': ('django.db.models.fields.DateTimeField', [], {}),
'error': ('django.db.models.fields.CharField', [], {'max_length': '1000'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'updated': ('django.db.models.fields.DateTimeField', [], {})
},
u'maasserver.config': {
'Meta': {'object_name': 'Config'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'value': ('maasserver.fields.JSONObjectField', [], {'null': 'True'})
},
u'maasserver.dhcplease': {
'Meta': {'object_name': 'DHCPLease'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'ip': ('django.db.models.fields.IPAddressField', [], {'unique': 'True', 'max_length': '15'}),
'mac': ('maasserver.fields.MACAddressField', [], {}),
'nodegroup': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['maasserver.NodeGroup']"})
},
u'maasserver.downloadprogress': {
'Meta': {'object_name': 'DownloadProgress'},
'bytes_downloaded': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {}),
'error': ('django.db.models.fields.CharField', [], {'max_length': '1000', 'blank': 'True'}),
'filename': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'nodegroup': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['maasserver.NodeGroup']"}),
'size': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'updated': ('django.db.models.fields.DateTimeField', [], {})
},
u'maasserver.filestorage': {
'Meta': {'unique_together': "((u'filename', u'owner'),)", 'object_name': 'FileStorage'},
'content': ('metadataserver.fields.BinaryField', [], {'blank': 'True'}),
'filename': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'default': "u'e8a6d682-618f-11e3-bf15-3c970e0e56dc'", 'unique': 'True', 'max_length': '36'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'to': u"orm['auth.User']", 'null': 'True', 'blank': 'True'})
},
u'maasserver.macaddress': {
'Meta': {'object_name': 'MACAddress'},
'created': ('django.db.models.fields.DateTimeField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'mac_address': ('maasserver.fields.MACAddressField', [], {'unique': 'True'}),
'node': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['maasserver.Node']"}),
'updated': ('django.db.models.fields.DateTimeField', [], {})
},
u'maasserver.node': {
'Meta': {'object_name': 'Node'},
'after_commissioning_action': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'agent_name': ('django.db.models.fields.CharField', [], {'default': "u''", 'max_length': '255', 'null': 'True', 'blank': 'True'}),
'architecture': ('django.db.models.fields.CharField', [], {'default': "u'i386/generic'", 'max_length': '31'}),
'cpu_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'created': ('django.db.models.fields.DateTimeField', [], {}),
'distro_series': ('django.db.models.fields.CharField', [], {'default': "u''", 'max_length': '20', 'null': 'True', 'blank': 'True'}),
'error': ('django.db.models.fields.CharField', [], {'default': "u''", 'max_length': '255', 'blank': 'True'}),
'hostname': ('django.db.models.fields.CharField', [], {'default': "u''", 'unique': 'True', 'max_length': '255', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'memory': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'netboot': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'nodegroup': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['maasserver.NodeGroup']", 'null': 'True'}),
'owner': ('django.db.models.fields.related.ForeignKey', [], {'default': 'None', 'to': u"orm['auth.User']", 'null': 'True', 'blank': 'True'}),
'power_parameters': ('maasserver.fields.JSONObjectField', [], {'default': "u''", 'blank': 'True'}),
'power_type': ('django.db.models.fields.CharField', [], {'default': "u''", 'max_length': '10', 'blank': 'True'}),
'routers': ('djorm_pgarray.fields.ArrayField', [], {'default': 'None', 'dbtype': "u'macaddr'", 'null': 'True', 'blank': 'True'}),
'status': ('django.db.models.fields.IntegerField', [], {'default': '0', 'max_length': '10'}),
'storage': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'system_id': ('django.db.models.fields.CharField', [], {'default': "u'node-e8a7fb98-618f-11e3-bf15-3c970e0e56dc'", 'unique': 'True', 'max_length': '41'}),
'tags': ('django.db.models.fields.related.ManyToManyField', [], {'to': u"orm['maasserver.Tag']", 'symmetrical': 'False'}),
'token': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['piston.Token']", 'null': 'True'}),
'updated': ('django.db.models.fields.DateTimeField', [], {})
},
u'maasserver.nodegroup': {
'Meta': {'object_name': 'NodeGroup'},
'api_key': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '18'}),
'api_token': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['piston.Token']", 'unique': 'True'}),
'cluster_name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100', 'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {}),
'dhcp_key': ('django.db.models.fields.CharField', [], {'default': "u''", 'max_length': '255', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'maas_url': ('django.db.models.fields.CharField', [], {'default': "u''", 'max_length': '255', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '80', 'blank': 'True'}),
'status': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'updated': ('django.db.models.fields.DateTimeField', [], {}),
'uuid': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '36'})
},
u'maasserver.nodegroupinterface': {
'Meta': {'unique_together': "((u'nodegroup', u'interface'),)", 'object_name': 'NodeGroupInterface'},
'broadcast_ip': ('django.db.models.fields.GenericIPAddressField', [], {'default': 'None', 'max_length': '39', 'null': 'True', 'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {}),
'foreign_dhcp_ip': ('django.db.models.fields.GenericIPAddressField', [], {'default': 'None', 'max_length': '39', 'null': 'True', 'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'interface': ('django.db.models.fields.CharField', [], {'default': "u''", 'max_length': '255', 'blank': 'True'}),
'ip': ('django.db.models.fields.GenericIPAddressField', [], {'max_length': '39'}),
'ip_range_high': ('django.db.models.fields.GenericIPAddressField', [], {'default': 'None', 'max_length': '39', 'null': 'True', 'blank': 'True'}),
'ip_range_low': ('django.db.models.fields.GenericIPAddressField', [], {'default': 'None', 'max_length': '39', 'null': 'True', 'blank': 'True'}),
'management': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'nodegroup': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['maasserver.NodeGroup']"}),
'router_ip': ('django.db.models.fields.GenericIPAddressField', [], {'default': 'None', 'max_length': '39', 'null': 'True', 'blank': 'True'}),
'subnet_mask': ('django.db.models.fields.GenericIPAddressField', [], {'default': 'None', 'max_length': '39', 'null': 'True', 'blank': 'True'}),
'updated': ('django.db.models.fields.DateTimeField', [], {})
},
u'maasserver.sshkey': {
'Meta': {'unique_together': "((u'user', u'key'),)", 'object_name': 'SSHKey'},
'created': ('django.db.models.fields.DateTimeField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.TextField', [], {}),
'updated': ('django.db.models.fields.DateTimeField', [], {}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['auth.User']"})
},
u'maasserver.tag': {
'Meta': {'object_name': 'Tag'},
'comment': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'created': ('django.db.models.fields.DateTimeField', [], {}),
'definition': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'kernel_opts': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '256'}),
'updated': ('django.db.models.fields.DateTimeField', [], {})
},
u'maasserver.userprofile': {
'Meta': {'object_name': 'UserProfile'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'to': u"orm['auth.User']", 'unique': 'True'})
},
u'maasserver.zone': {
'Meta': {'object_name': 'Zone'},
'created': ('django.db.models.fields.DateTimeField', [], {}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '256'}),
'updated': ('django.db.models.fields.DateTimeField', [], {})
},
u'piston.consumer': {
'Meta': {'object_name': 'Consumer'},
'description': ('django.db.models.fields.TextField', [], {}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '18'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'secret': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'status': ('django.db.models.fields.CharField', [], {'default': "'pending'", 'max_length': '16'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'consumers'", 'null': 'True', 'to': u"orm['auth.User']"})
},
u'piston.token': {
'Meta': {'object_name': 'Token'},
'callback': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'callback_confirmed': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'consumer': ('django.db.models.fields.related.ForeignKey', [], {'to': u"orm['piston.Consumer']"}),
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_approved': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '18'}),
'secret': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'timestamp': ('django.db.models.fields.IntegerField', [], {'default': '1386675646L'}),
'token_type': ('django.db.models.fields.IntegerField', [], {}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'tokens'", 'null': 'True', 'to': u"orm['auth.User']"}),
'verifier': ('django.db.models.fields.CharField', [], {'max_length': '10'})
}
}
complete_apps = ['maasserver'] | agpl-3.0 |
hanvo/MusicCloud | Crawler/Install Files/pygame/test/util/build_page/libs/build_client/mocks.py | 3 | 2115 | ################################################################################
# Imports
# StdLib
import sys
import optparse
import os
# User Libs
import regexes
import config
import update
from update_test import fixture, mock_config
from helpers import normp
################################################################################
svn_blame = lambda x: (0, '\n'.join([regexes.SVN_BLAME_TEST.strip()] * 1500))
get_and_brand_latest_svn = lambda x: 0
old_get_platform = config.get_platform_and_previous_rev
def get_platform_and_previous_rev(c, cfile):
old_get_platform(c, cfile)
c.previous_rev = -1
################################################################################
def skip_svn():
update.svn_blame = svn_blame
config.get_and_brand_latest_svn = get_and_brand_latest_svn
def force_build():
config.get_platform_and_previous_rev = get_platform_and_previous_rev
def run_tests():
mock_config (
tests_cmd = ['run_tests.py', 'sprite', '-s', '-F',
'../../output/arstdhneoi.txt'],
src_path = 'pygame/trunk',
install_env = os.environ.copy(),
)
update.run_tests()
print update.run_tests.output
################################################################################
opt_parser = optparse.OptionParser()
opt_parser.add_option('-c', '--config', action = 'store_true')
opt_parser.add_option('-s', '--skip_svn', action = 'store_true')
opt_parser.add_option('-f', '--force_build', action = 'store_true')
opt_parser.add_option('-t', '--test', action = 'store_true')
if __name__ == '__main__':
options, args = opt_parser.parse_args()
sys.argv = sys.argv[:1] + args
if options.skip_svn: skip_svn()
if options.force_build: force_build()
if options.test:
run_tests()
else:
# Swap out some slow components for quick debugging
if options.config: config.main()
else: update.main()
################################################################################ | bsd-3-clause |
NL66278/OCB | addons/crm_partner_assign/crm_partner_assign.py | 55 | 11274 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import random
from openerp.addons.base_geolocalize.models.res_partner import geo_find, geo_query_address
from openerp.osv import osv
from openerp.osv import fields
class res_partner_grade(osv.osv):
_order = 'sequence'
_name = 'res.partner.grade'
_columns = {
'sequence': fields.integer('Sequence'),
'active': fields.boolean('Active'),
'name': fields.char('Grade Name'),
'partner_weight': fields.integer('Grade Weight',
help="Gives the probability to assign a lead to this partner. (0 means no assignation.)"),
}
_defaults = {
'active': lambda *args: 1,
'partner_weight':1
}
class res_partner_activation(osv.osv):
_name = 'res.partner.activation'
_order = 'sequence'
_columns = {
'sequence' : fields.integer('Sequence'),
'name' : fields.char('Name', required=True),
}
class res_partner(osv.osv):
_inherit = "res.partner"
_columns = {
'partner_weight': fields.integer('Grade Weight',
help="Gives the probability to assign a lead to this partner. (0 means no assignation.)"),
'opportunity_assigned_ids': fields.one2many('crm.lead', 'partner_assigned_id',\
'Assigned Opportunities'),
'grade_id': fields.many2one('res.partner.grade', 'Grade'),
'activation' : fields.many2one('res.partner.activation', 'Activation', select=1),
'date_partnership' : fields.date('Partnership Date'),
'date_review' : fields.date('Latest Partner Review'),
'date_review_next' : fields.date('Next Partner Review'),
# customer implementation
'assigned_partner_id': fields.many2one(
'res.partner', 'Implemented by',
),
'implemented_partner_ids': fields.one2many(
'res.partner', 'assigned_partner_id',
string='Implementation References',
),
}
_defaults = {
'partner_weight': lambda *args: 0
}
def onchange_grade_id(self, cr, uid, ids, grade_id, context=None):
res = {'value' :{'partner_weight':0}}
if grade_id:
partner_grade = self.pool.get('res.partner.grade').browse(cr, uid, grade_id)
res['value']['partner_weight'] = partner_grade.partner_weight
return res
class crm_lead(osv.osv):
_inherit = "crm.lead"
_columns = {
'partner_latitude': fields.float('Geo Latitude'),
'partner_longitude': fields.float('Geo Longitude'),
'partner_assigned_id': fields.many2one('res.partner', 'Assigned Partner',track_visibility='onchange' , help="Partner this case has been forwarded/assigned to.", select=True),
'date_assign': fields.date('Assignation Date', help="Last date this case was forwarded/assigned to a partner"),
}
def _merge_data(self, cr, uid, ids, oldest, fields, context=None):
fields += ['partner_latitude', 'partner_longitude', 'partner_assigned_id', 'date_assign']
return super(crm_lead, self)._merge_data(cr, uid, ids, oldest, fields, context=context)
def onchange_assign_id(self, cr, uid, ids, partner_assigned_id, context=None):
"""This function updates the "assignation date" automatically, when manually assign a partner in the geo assign tab
"""
if not partner_assigned_id:
return {'value':{'date_assign': False}}
else:
partners = self.pool.get('res.partner').browse(cr, uid, [partner_assigned_id], context=context)
user_id = partners[0] and partners[0].user_id.id or False
return {'value':
{'date_assign': fields.date.context_today(self,cr,uid,context=context),
'user_id' : user_id}
}
def action_assign_partner(self, cr, uid, ids, context=None):
return self.assign_partner(cr, uid, ids, partner_id=False, context=context)
def assign_partner(self, cr, uid, ids, partner_id=False, context=None):
partner_ids = {}
res = False
res_partner = self.pool.get('res.partner')
if not partner_id:
partner_ids = self.search_geo_partner(cr, uid, ids, context=context)
for lead in self.browse(cr, uid, ids, context=context):
if not partner_id:
partner_id = partner_ids.get(lead.id, False)
if not partner_id:
continue
self.assign_geo_localize(cr, uid, [lead.id], lead.partner_latitude, lead.partner_longitude, context=context)
partner = res_partner.browse(cr, uid, partner_id, context=context)
if partner.user_id:
salesteam_id = partner.section_id and partner.section_id.id or False
for lead_id in ids:
self.allocate_salesman(cr, uid, [lead_id], [partner.user_id.id], team_id=salesteam_id, context=context)
self.write(cr, uid, [lead.id], {'date_assign': fields.date.context_today(self,cr,uid,context=context), 'partner_assigned_id': partner_id}, context=context)
return res
def assign_geo_localize(self, cr, uid, ids, latitude=False, longitude=False, context=None):
if latitude and longitude:
self.write(cr, uid, ids, {
'partner_latitude': latitude,
'partner_longitude': longitude
}, context=context)
return True
# Don't pass context to browse()! We need country name in english below
for lead in self.browse(cr, uid, ids):
if lead.partner_latitude and lead.partner_longitude:
continue
if lead.country_id:
result = geo_find(geo_query_address(street=lead.street,
zip=lead.zip,
city=lead.city,
state=lead.state_id.name,
country=lead.country_id.name))
if result:
self.write(cr, uid, [lead.id], {
'partner_latitude': result[0],
'partner_longitude': result[1]
}, context=context)
return True
def search_geo_partner(self, cr, uid, ids, context=None):
res_partner = self.pool.get('res.partner')
res_partner_ids = {}
self.assign_geo_localize(cr, uid, ids, context=context)
for lead in self.browse(cr, uid, ids, context=context):
partner_ids = []
if not lead.country_id:
continue
latitude = lead.partner_latitude
longitude = lead.partner_longitude
if latitude and longitude:
# 1. first way: in the same country, small area
partner_ids = res_partner.search(cr, uid, [
('partner_weight', '>', 0),
('partner_latitude', '>', latitude - 2), ('partner_latitude', '<', latitude + 2),
('partner_longitude', '>', longitude - 1.5), ('partner_longitude', '<', longitude + 1.5),
('country_id', '=', lead.country_id.id),
], context=context)
# 2. second way: in the same country, big area
if not partner_ids:
partner_ids = res_partner.search(cr, uid, [
('partner_weight', '>', 0),
('partner_latitude', '>', latitude - 4), ('partner_latitude', '<', latitude + 4),
('partner_longitude', '>', longitude - 3), ('partner_longitude', '<' , longitude + 3),
('country_id', '=', lead.country_id.id),
], context=context)
# 3. third way: in the same country, extra large area
if not partner_ids:
partner_ids = res_partner.search(cr, uid, [
('partner_weight','>', 0),
('partner_latitude','>', latitude - 8), ('partner_latitude','<', latitude + 8),
('partner_longitude','>', longitude - 8), ('partner_longitude','<', longitude + 8),
('country_id', '=', lead.country_id.id),
], context=context)
# 5. fifth way: anywhere in same country
if not partner_ids:
# still haven't found any, let's take all partners in the country!
partner_ids = res_partner.search(cr, uid, [
('partner_weight', '>', 0),
('country_id', '=', lead.country_id.id),
], context=context)
# 6. sixth way: closest partner whatsoever, just to have at least one result
if not partner_ids:
# warning: point() type takes (longitude, latitude) as parameters in this order!
cr.execute("""SELECT id, distance
FROM (select id, (point(partner_longitude, partner_latitude) <-> point(%s,%s)) AS distance FROM res_partner
WHERE partner_longitude is not null
AND partner_latitude is not null
AND partner_weight > 0) AS d
ORDER BY distance LIMIT 1""", (longitude, latitude))
res = cr.dictfetchone()
if res:
partner_ids.append(res['id'])
total_weight = 0
toassign = []
for partner in res_partner.browse(cr, uid, partner_ids, context=context):
total_weight += partner.partner_weight
toassign.append( (partner.id, total_weight) )
random.shuffle(toassign) # avoid always giving the leads to the first ones in db natural order!
nearest_weight = random.randint(0, total_weight)
for partner_id, weight in toassign:
if nearest_weight <= weight:
res_partner_ids[lead.id] = partner_id
break
return res_partner_ids
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
dhruvsrivastava/OJ | flask/lib/python2.7/site-packages/pip/_vendor/requests/packages/chardet/escprober.py | 2936 | 3187 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is mozilla.org code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
from . import constants
from .escsm import (HZSMModel, ISO2022CNSMModel, ISO2022JPSMModel,
ISO2022KRSMModel)
from .charsetprober import CharSetProber
from .codingstatemachine import CodingStateMachine
from .compat import wrap_ord
class EscCharSetProber(CharSetProber):
def __init__(self):
CharSetProber.__init__(self)
self._mCodingSM = [
CodingStateMachine(HZSMModel),
CodingStateMachine(ISO2022CNSMModel),
CodingStateMachine(ISO2022JPSMModel),
CodingStateMachine(ISO2022KRSMModel)
]
self.reset()
def reset(self):
CharSetProber.reset(self)
for codingSM in self._mCodingSM:
if not codingSM:
continue
codingSM.active = True
codingSM.reset()
self._mActiveSM = len(self._mCodingSM)
self._mDetectedCharset = None
def get_charset_name(self):
return self._mDetectedCharset
def get_confidence(self):
if self._mDetectedCharset:
return 0.99
else:
return 0.00
def feed(self, aBuf):
for c in aBuf:
# PY3K: aBuf is a byte array, so c is an int, not a byte
for codingSM in self._mCodingSM:
if not codingSM:
continue
if not codingSM.active:
continue
codingState = codingSM.next_state(wrap_ord(c))
if codingState == constants.eError:
codingSM.active = False
self._mActiveSM -= 1
if self._mActiveSM <= 0:
self._mState = constants.eNotMe
return self.get_state()
elif codingState == constants.eItsMe:
self._mState = constants.eFoundIt
self._mDetectedCharset = codingSM.get_coding_state_machine() # nopep8
return self.get_state()
return self.get_state()
| bsd-3-clause |
renyi533/tensorflow | tensorflow/lite/testing/op_tests/tanh.py | 17 | 2112 | # Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Test configs for tanh."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow.compat.v1 as tf
from tensorflow.lite.testing.zip_test_utils import create_tensor_data
from tensorflow.lite.testing.zip_test_utils import make_zip_of_tests
from tensorflow.lite.testing.zip_test_utils import register_make_test_function
@register_make_test_function()
def make_tanh_tests(options):
"""Make a set of tests to do tanh."""
# Chose a set of parameters
test_parameters = [{
"input_shape": [[], [1], [2, 3], [1, 1, 1, 1], [1, 3, 4, 3],
[3, 15, 14, 3], [3, 1, 2, 4, 6], [2, 2, 3, 4, 5, 6]],
"fully_quantize": [True, False],
"input_range": [(-4, 10)]
}]
def build_graph(parameters):
input_tensor = tf.compat.v1.placeholder(
dtype=tf.float32, name="input", shape=parameters["input_shape"])
out = tf.nn.tanh(input_tensor)
return [input_tensor], [out]
def build_inputs(parameters, sess, inputs, outputs):
min_value, max_value = parameters["input_range"]
input_values = create_tensor_data(np.float32, parameters["input_shape"],
min_value, max_value)
return [input_values], sess.run(
outputs, feed_dict=dict(zip(inputs, [input_values])))
make_zip_of_tests(options, test_parameters, build_graph, build_inputs)
| apache-2.0 |
glove747/liberty-neutron | neutron/plugins/nuage/nuage_models.py | 47 | 3092 | # Copyright 2014 Alcatel-Lucent USA Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import sqlalchemy as sa
from sqlalchemy import orm
from neutron.db import model_base
from neutron.db import models_v2
class NetPartition(model_base.BASEV2, models_v2.HasId):
__tablename__ = 'nuage_net_partitions'
name = sa.Column(sa.String(64))
l3dom_tmplt_id = sa.Column(sa.String(36))
l2dom_tmplt_id = sa.Column(sa.String(36))
isolated_zone = sa.Column(sa.String(64))
shared_zone = sa.Column(sa.String(64))
class NetPartitionRouter(model_base.BASEV2):
__tablename__ = "nuage_net_partition_router_mapping"
net_partition_id = sa.Column(sa.String(36),
sa.ForeignKey('nuage_net_partitions.id',
ondelete="CASCADE"),
primary_key=True)
router_id = sa.Column(sa.String(36),
sa.ForeignKey('routers.id', ondelete="CASCADE"),
primary_key=True)
nuage_router_id = sa.Column(sa.String(36), unique=True)
nuage_rtr_rt = sa.Column(sa.String(36))
nuage_rtr_rd = sa.Column(sa.String(36))
class ProviderNetBinding(model_base.BASEV2):
"""Represents binding of virtual network to physical_network and vlan."""
__tablename__ = 'nuage_provider_net_bindings'
network_id = sa.Column(sa.String(36),
sa.ForeignKey('networks.id', ondelete="CASCADE"),
primary_key=True)
network_type = sa.Column(sa.String(32), nullable=False)
physical_network = sa.Column(sa.String(64), nullable=False)
vlan_id = sa.Column(sa.Integer, nullable=False)
network = orm.relationship(
models_v2.Network,
backref=orm.backref("pnetbinding", lazy='joined',
uselist=False, cascade='delete'))
class SubnetL2Domain(model_base.BASEV2):
__tablename__ = 'nuage_subnet_l2dom_mapping'
subnet_id = sa.Column(sa.String(36),
sa.ForeignKey('subnets.id', ondelete="CASCADE"),
primary_key=True)
net_partition_id = sa.Column(sa.String(36),
sa.ForeignKey('nuage_net_partitions.id',
ondelete="CASCADE"))
nuage_subnet_id = sa.Column(sa.String(36), unique=True)
nuage_l2dom_tmplt_id = sa.Column(sa.String(36))
nuage_user_id = sa.Column(sa.String(36))
nuage_group_id = sa.Column(sa.String(36))
nuage_managed_subnet = sa.Column(sa.Boolean())
| apache-2.0 |
WaltXon/curvey | config.py | 1 | 8173 | import copy
inputs={
'curves': {
'oil': {
'Qi_monthly': 19400,
'Qf_monthly': 15,
'nominal_decline_annual': 1.66073,
'b_factor': 1.25,
'Dmin_annual': 0.06,
'EUR': 789000,
},
'gas':{
'Qi_monthly': 111000,
'Qf_monthly': 0,
'nominal_decline_annual': .64,
'b_factor': 1.0,
'Dmin_annual': 0.05,
'EUR': 6581000,
},
'ngl':{},
},
'taxes':{
'ad_valorum': .025,
'severance': {
'oil': 0.046,
'gas':.075,
}
},
'expenses': {
'fixed': 10000,
'variable': {
'oil': .03,
'gas':0.0,
'ngl':0.0,
},
},
'capital':{
'idc': 3.5*10**6,
'icc': 3.0*10**6,
'land':5.0*10**5,
},
'max_life_years': 50,
'production_delay': 3,
'start_date': '20160601',
'standard_days_in_month': 30.4375,
'standard_days_in_year': 365.25,
'price_oil': {'5/1/2019': 53.539999999999999, '5/1/2018': 52.18, '5/1/2017': 51.280000000000001, '06/01/2022': 57.369999999999997, '06/01/2024': 58.75, '05/01/2024': 58.75, '05/01/2022': 57.369999999999997, '05/01/2023': 58.18, '8/1/2021': 56.439999999999998, '8/1/2020': 55.219999999999999, '11/1/2019': 54.25, '11/1/2018': 52.93, '11/1/2020': 55.590000000000003, '11/1/2021': 56.770000000000003, '06/01/2023': 58.18, '3/1/2019': 53.32, '3/1/2018': 52.009999999999998, '2/1/2021': 55.899999999999999, '2/1/2020': 54.600000000000001, '3/1/2017': 51.090000000000003, '8/1/2018': 52.490000000000002, '8/1/2019': 53.82, '8/1/2016': 49.329999999999998, '8/1/2017': 51.469999999999999, '08/01/2024': 58.909999999999997, '08/01/2023': 58.579999999999998, '08/01/2022': 57.770000000000003, '9/1/2019': 53.950000000000003, '9/1/2018': 52.619999999999997, '9/1/2017': 51.530000000000001, '9/1/2016': 49.759999999999998, '6/1/2021': 56.380000000000003, '6/1/2020': 55.130000000000003, '3/1/2020': 54.710000000000001, '3/1/2021': 55.990000000000002, '11/01/2024': 58.909999999999997, '11/01/2023': 58.579999999999998, '11/01/2022': 57.770000000000003, '12/1/2018': 53.07, '12/1/2019': 54.420000000000002, '4/1/2021': 56.100000000000001, '4/1/2020': 54.829999999999998, '01/01/2022': 57.369999999999997, '01/01/2023': 58.18, '01/01/2024': 58.75, '7/1/2017': 51.420000000000002, '7/1/2016': 48.859999999999999, '1/1/2017': 50.850000000000001, '11/1/2017': 51.719999999999999, '11/1/2016': 50.390000000000001, '12/1/2021': 56.93, '12/1/2020': 55.770000000000003, '7/1/2019': 53.729999999999997, '7/1/2018': 52.380000000000003, '03/01/2022': 57.369999999999997, '03/01/2023': 58.18, '03/01/2024': 58.75, '4/1/2017': 51.189999999999998, '1/1/2020': 54.5, '1/1/2021': 55.829999999999998, '4/1/2018': 52.090000000000003, '4/1/2019': 53.420000000000002, '07/01/2024': 58.909999999999997, '07/01/2022': 57.770000000000003, '07/01/2023': 58.579999999999998, '9/1/2020': 55.32, '9/1/2021': 56.520000000000003, '04/01/2023': 58.18, '04/01/2022': 57.369999999999997, '04/01/2024': 58.75, '5/1/2020': 54.969999999999999, '5/1/2021': 56.229999999999997, '10/1/2018': 52.770000000000003, '10/1/2019': 54.090000000000003, '10/1/2016': 50.090000000000003, '10/1/2017': 51.609999999999999, '2/1/2017': 50.979999999999997, '2/1/2018': 51.939999999999998, '2/1/2019': 53.210000000000001, '12/01/2022': 57.770000000000003, '12/01/2023': 58.579999999999998, '12/01/2024': 58.909999999999997, '7/1/2020': 55.159999999999997, '7/1/2021': 56.390000000000001, '10/1/2021': 56.630000000000003, '10/1/2020': 55.439999999999998, '6/1/2018': 52.299999999999997, '6/1/2019': 53.68, '12/1/2016': 50.649999999999999, '12/1/2017': 51.840000000000003, '09/01/2022': 57.770000000000003, '09/01/2023': 58.579999999999998, '6/1/2017': 51.369999999999997, '09/01/2024': 58.909999999999997, '02/01/2024': 58.75, '10/01/2022': 57.770000000000003, '10/01/2023': 58.579999999999998, '10/01/2024': 58.909999999999997, '02/01/2023': 58.18, '02/01/2022': 57.369999999999997, '1/1/2019': 53.140000000000001, '1/1/2018': 51.880000000000003},
'after_strip_escalator_oil': 1.03,
'price_oil_max': 65.0,
'price_gas': {'5/1/2019': 2.87, '5/1/2018': 2.84, '12/1/2028': 4.74, '5/1/2017': 2.85, '8/1/2028': 4.35, '8/1/2023': 3.51, '8/1/2022': 3.34, '8/1/2021': 3.18, '8/1/2020': 3.03, '8/1/2027': 4.19, '8/1/2026': 4.03, '8/1/2025': 3.86, '8/1/2024': 3.69, '11/1/2028': 4.54, '12/1/2021': 3.45, '12/1/2020': 3.3, '11/1/2020': 3.14, '11/1/2021': 3.3, '11/1/2022': 3.47, '11/1/2023': 3.64, '11/1/2024': 3.83, '11/1/2025': 4.01, '11/1/2026': 4.19, '11/1/2027': 4.37, '2/1/2028': 4.69, '3/1/2019': 3.17, '3/1/2018': 3.2, '2/1/2021': 3.41, '2/1/2020': 3.29, '2/1/2023': 3.73, '2/1/2022': 3.56, '3/1/2017': 3.08, '2/1/2024': 3.91, '2/1/2027': 4.49, '2/1/2026': 4.29, '10/1/2027': 4.26, '12/1/2027': 4.56, '10/1/2026': 4.1, '12/1/2026': 4.36, '10/1/2025': 3.93, '10/1/2024': 3.75, '8/1/2018': 2.92, '8/1/2019': 2.96, '8/1/2016': 2.45, '8/1/2017': 2.95, '7/1/2027': 4.15, '9/1/2019': 2.95, '9/1/2018': 2.91, '7/1/2024': 3.65, '7/1/2025': 3.82, '9/1/2017': 2.94, '9/1/2016': 2.5, '6/1/2025': 3.78, '6/1/2024': 3.61, '6/1/2027': 4.11, '6/1/2026': 3.94, '6/1/2021': 3.11, '6/1/2020': 2.97, '6/1/2023': 3.43, '6/1/2022': 3.27, '3/1/2026': 4.23, '3/1/2027': 4.42, '3/1/2024': 3.84, '3/1/2025': 4.03, '3/1/2022': 3.5, '3/1/2023': 3.66, '3/1/2020': 3.22, '3/1/2021': 3.34, '1/1/2022': 3.59, '1/1/2023': 3.76, '7/1/2026': 3.99, '2/1/2025': 4.1, '4/1/2028': 4.25, '4/1/2027': 4.09, '4/1/2026': 3.92, '4/1/2025': 3.76, '4/1/2024': 3.58, '4/1/2023': 3.41, '4/1/2022': 3.24, '4/1/2021': 3.08, '4/1/2020': 2.94, '7/1/2017': 2.93, '7/1/2016': 2.38, '11/1/2017': 3.04, '11/1/2016': 2.75, '11/1/2019': 3.05, '11/1/2018': 3.0, '12/1/2023': 3.79, '12/1/2022': 3.62, '12/1/2025': 4.17, '12/1/2024': 3.98, '7/1/2019': 2.94, '7/1/2018': 2.91, '1/1/2028': 4.71, '4/1/2017': 2.87, '1/1/2020': 3.32, '1/1/2021': 3.43, '4/1/2018': 2.87, '4/1/2019': 2.88, '1/1/2024': 3.93, '1/1/2025': 4.12, '1/1/2026': 4.32, '1/1/2027': 4.51, '3/1/2028': 4.62, '9/1/2020': 3.03, '9/1/2021': 3.18, '9/1/2022': 3.35, '9/1/2023': 3.52, '9/1/2024': 3.7, '9/1/2025': 3.87, '9/1/2026': 4.04, '9/1/2027': 4.21, '9/1/2028': 4.37, '6/1/2028': 4.27, '5/1/2024': 3.57, '5/1/2025': 3.75, '5/1/2026': 3.91, '5/1/2027': 4.07, '5/1/2020': 2.94, '5/1/2021': 3.08, '5/1/2022': 3.24, '5/1/2023': 3.4, '5/1/2028': 4.23, '10/1/2018': 2.93, '10/1/2019': 2.97, '10/1/2016': 2.57, '10/1/2017': 2.96, '2/1/2017': 3.13, '2/1/2018': 3.27, '2/1/2019': 3.23, '7/1/2028': 4.31, '10/1/2028': 4.42, '7/1/2022': 3.31, '7/1/2023': 3.47, '7/1/2020': 3.01, '7/1/2021': 3.15, '10/1/2023': 3.56, '10/1/2022': 3.38, '10/1/2021': 3.22, '10/1/2020': 3.07, '6/1/2018': 2.88, '6/1/2019': 2.91, '12/1/2016': 3.01, '12/1/2017': 3.19, '12/1/2018': 3.15, '12/1/2019': 3.2, '6/1/2017': 2.89, '1/1/2017': 3.13, '1/1/2019': 3.27, '1/1/2018': 3.3},
'after_strip_escalator_gas': 1.03,
'price_gas_max': 4.0,
'price_ngl': {},
'after_strip_escalator_ngl': 1.03,
'price_ngl_max': 0.0,
'working_interest': 1.0,
'net_revenue_interest': .75,
'discount_rate_annual': .10,
}
config=copy.copy(inputs)
for product, record in inputs['curves'].iteritems():
if record !={}:
if float(record['b_factor'])==1.0:
config['curves'][product]['b_factor_adj']=1.001
elif float(record['b_factor'])==0.0:
config['curves'][product]['b_factor_adj']=0.001
else:
config['curves'][product]['b_factor_adj']=record['b_factor']
config['curves'][product]['nominal_decline_monthly']=record['nominal_decline_annual']/12.0
config['curves'][product]['Dmin_monthly']=record['Dmin_annual']/12.0
config['max_life_months']=inputs['max_life_years']*12
config['max_life_days']=inputs['max_life_years']*inputs['standard_days_in_year']
| cc0-1.0 |
slipstream/SlipStreamClient | client/src/external/paramiko/sftp.py | 52 | 6206 | # Copyright (C) 2003-2007 Robey Pointer <robeypointer@gmail.com>
#
# This file is part of paramiko.
#
# Paramiko is free software; you can redistribute it and/or modify it under the
# terms of the GNU Lesser General Public License as published by the Free
# Software Foundation; either version 2.1 of the License, or (at your option)
# any later version.
#
# Paramiko is distributed in the hope that it will be useful, but WITHOUT ANY
# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR
# A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more
# details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Paramiko; if not, write to the Free Software Foundation, Inc.,
# 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA.
import select
import socket
import struct
from paramiko import util
from paramiko.common import asbytes, DEBUG
from paramiko.message import Message
from paramiko.py3compat import byte_chr, byte_ord
CMD_INIT, CMD_VERSION, CMD_OPEN, CMD_CLOSE, CMD_READ, CMD_WRITE, CMD_LSTAT, CMD_FSTAT, \
CMD_SETSTAT, CMD_FSETSTAT, CMD_OPENDIR, CMD_READDIR, CMD_REMOVE, CMD_MKDIR, \
CMD_RMDIR, CMD_REALPATH, CMD_STAT, CMD_RENAME, CMD_READLINK, CMD_SYMLINK = range(1, 21)
CMD_STATUS, CMD_HANDLE, CMD_DATA, CMD_NAME, CMD_ATTRS = range(101, 106)
CMD_EXTENDED, CMD_EXTENDED_REPLY = range(200, 202)
SFTP_OK = 0
SFTP_EOF, SFTP_NO_SUCH_FILE, SFTP_PERMISSION_DENIED, SFTP_FAILURE, SFTP_BAD_MESSAGE, \
SFTP_NO_CONNECTION, SFTP_CONNECTION_LOST, SFTP_OP_UNSUPPORTED = range(1, 9)
SFTP_DESC = ['Success',
'End of file',
'No such file',
'Permission denied',
'Failure',
'Bad message',
'No connection',
'Connection lost',
'Operation unsupported']
SFTP_FLAG_READ = 0x1
SFTP_FLAG_WRITE = 0x2
SFTP_FLAG_APPEND = 0x4
SFTP_FLAG_CREATE = 0x8
SFTP_FLAG_TRUNC = 0x10
SFTP_FLAG_EXCL = 0x20
_VERSION = 3
# for debugging
CMD_NAMES = {
CMD_INIT: 'init',
CMD_VERSION: 'version',
CMD_OPEN: 'open',
CMD_CLOSE: 'close',
CMD_READ: 'read',
CMD_WRITE: 'write',
CMD_LSTAT: 'lstat',
CMD_FSTAT: 'fstat',
CMD_SETSTAT: 'setstat',
CMD_FSETSTAT: 'fsetstat',
CMD_OPENDIR: 'opendir',
CMD_READDIR: 'readdir',
CMD_REMOVE: 'remove',
CMD_MKDIR: 'mkdir',
CMD_RMDIR: 'rmdir',
CMD_REALPATH: 'realpath',
CMD_STAT: 'stat',
CMD_RENAME: 'rename',
CMD_READLINK: 'readlink',
CMD_SYMLINK: 'symlink',
CMD_STATUS: 'status',
CMD_HANDLE: 'handle',
CMD_DATA: 'data',
CMD_NAME: 'name',
CMD_ATTRS: 'attrs',
CMD_EXTENDED: 'extended',
CMD_EXTENDED_REPLY: 'extended_reply'
}
class SFTPError (Exception):
pass
class BaseSFTP (object):
def __init__(self):
self.logger = util.get_logger('paramiko.sftp')
self.sock = None
self.ultra_debug = False
### internals...
def _send_version(self):
self._send_packet(CMD_INIT, struct.pack('>I', _VERSION))
t, data = self._read_packet()
if t != CMD_VERSION:
raise SFTPError('Incompatible sftp protocol')
version = struct.unpack('>I', data[:4])[0]
# if version != _VERSION:
# raise SFTPError('Incompatible sftp protocol')
return version
def _send_server_version(self):
# winscp will freak out if the server sends version info before the
# client finishes sending INIT.
t, data = self._read_packet()
if t != CMD_INIT:
raise SFTPError('Incompatible sftp protocol')
version = struct.unpack('>I', data[:4])[0]
# advertise that we support "check-file"
extension_pairs = ['check-file', 'md5,sha1']
msg = Message()
msg.add_int(_VERSION)
msg.add(*extension_pairs)
self._send_packet(CMD_VERSION, msg)
return version
def _log(self, level, msg, *args):
self.logger.log(level, msg, *args)
def _write_all(self, out):
while len(out) > 0:
n = self.sock.send(out)
if n <= 0:
raise EOFError()
if n == len(out):
return
out = out[n:]
return
def _read_all(self, n):
out = bytes()
while n > 0:
if isinstance(self.sock, socket.socket):
# sometimes sftp is used directly over a socket instead of
# through a paramiko channel. in this case, check periodically
# if the socket is closed. (for some reason, recv() won't ever
# return or raise an exception, but calling select on a closed
# socket will.)
while True:
read, write, err = select.select([self.sock], [], [], 0.1)
if len(read) > 0:
x = self.sock.recv(n)
break
else:
x = self.sock.recv(n)
if len(x) == 0:
raise EOFError()
out += x
n -= len(x)
return out
def _send_packet(self, t, packet):
#self._log(DEBUG2, 'write: %s (len=%d)' % (CMD_NAMES.get(t, '0x%02x' % t), len(packet)))
packet = asbytes(packet)
out = struct.pack('>I', len(packet) + 1) + byte_chr(t) + packet
if self.ultra_debug:
self._log(DEBUG, util.format_binary(out, 'OUT: '))
self._write_all(out)
def _read_packet(self):
x = self._read_all(4)
# most sftp servers won't accept packets larger than about 32k, so
# anything with the high byte set (> 16MB) is just garbage.
if byte_ord(x[0]):
raise SFTPError('Garbage packet received')
size = struct.unpack('>I', x)[0]
data = self._read_all(size)
if self.ultra_debug:
self._log(DEBUG, util.format_binary(data, 'IN: '))
if size > 0:
t = byte_ord(data[0])
#self._log(DEBUG2, 'read: %s (len=%d)' % (CMD_NAMES.get(t), '0x%02x' % t, len(data)-1))
return t, data[1:]
return 0, bytes()
| apache-2.0 |
desheffer/hue-adapter | hue.py | 1 | 4166 | from config import Config
import flask
import json
import os
from ssdp import SSDP
from threading import Thread
import urllib3
config = None
config_file_paths = [
os.path.dirname(os.path.realpath(__file__)) + "/config/default.cfg.local",
"/etc/hue-adapter/default.cfg.local",
]
for config_file_path in config_file_paths:
if os.path.isfile(config_file_path):
config = Config(file(config_file_path))
if not config:
print "Cannot find configuration file"
exit(1)
app = flask.Flask(__name__)
@app.route("/setup.xml")
def get_setup_file():
"""Serve the SSDP setup file."""
out = "<?xml version=\"1.0\"?>\n" + \
"<root xmlns=\"urn:schemas-upnp-org:device-1-0\">\n" + \
"<specVersion>\n" + \
"<major>1</major>\n" + \
"<minor>0</minor>\n" + \
"</specVersion>\n" + \
"<URLBase>http://%s:%d/</URLBase>\n" % (config.web.addr, config.web.port) + \
"<device>\n" + \
"<deviceType>urn:schemas-upnp-org:device:Basic:1</deviceType>\n" + \
"<friendlyName>Philips Hue Emulator</friendlyName>\n" + \
"<manufacturer>Royal Philips Electronics</manufacturer>\n" + \
"<manufacturerURL></manufacturerURL>\n" + \
"<modelDescription>Philips Hue Emulator</modelDescription>\n" + \
"<modelName>Philips hue bridge 2012</modelName>\n" + \
"<modelNumber>929000226503</modelNumber>\n" + \
"<modelURL></modelURL>\n" + \
"<serialNumber>00000000000000000001</serialNumber>\n" + \
"<UDN>uuid:776c1cbc-790a-425f-a890-a761ec57513c</UDN>\n" + \
"</device>\n" + \
"</root>\n"
return flask.Response(out, mimetype="text/xml")
@app.route("/api/<username>/lights", methods=["GET"])
def get_all_lights(username):
"""Get all lights"""
out = {}
for id, light in config.lights.iteritems():
out[id] = {
"state": {
"on": False,
"bri": 0,
"hue": 0,
"sat": 0,
"xy": [0, 0],
"ct": 0,
"alert": "none",
"effect": "none",
"colormode": "hs",
"reachable": True,
},
"type": "Extended color light",
"name": light["name"],
"modelid": "LCT001",
"swversion": "6609461",
"pointsymbol": {},
}
return flask.jsonify(out)
@app.route("/api/<username>/lights/<id>", methods=["GET"])
def get_light(username, id):
"""Get light attributes and state"""
if id in config.lights:
light = config.lights[id]
else:
return "", 3
out = {
"state": {
"on": False,
"bri": 0,
"hue": 0,
"sat": 0,
"xy": [0, 0],
"ct": 0,
"alert": "none",
"effect": "none",
"colormode": "hs",
"reachable": True,
},
"type": "Extended color light",
"name": light["name"],
"modelid": "LCT001",
"swversion": "6609461",
"pointsymbol": {},
}
return flask.jsonify(out)
@app.route("/api/<username>/lights/<id>/state", methods=["PUT"])
def set_lights_state(username, id):
"""Set light state"""
if id in config.lights:
light = config.lights[id]
else:
return "", 3
data = flask.request.get_json(force=True)
if not data or "on" not in data:
return "", 6
if data["on"]:
url = light["on_url"]
else:
url = light["off_url"]
try:
http = urllib3.PoolManager()
r = http.request("GET", url)
except:
return "", 901
out = [
{
"success": {
"/lights/" + id + "/state/on": data["on"]
}
}
]
return flask.Response(json.dumps(out), mimetype="text/json")
if __name__ == "__main__":
ssdp = SSDP(config.web.addr, config.web.port)
ssdp_thread = Thread(target=ssdp.run)
ssdp_thread.setDaemon(True)
ssdp_thread.start()
app.run(host=config.web.addr, port=config.web.port)
| mit |
tony-joseph/crimson_antispam | antispam/management/commands/importspamips.py | 1 | 1141 | import csv
from django.core.management.base import BaseCommand
from django.db import IntegrityError
from antispam.models import SpamIP
class Command(BaseCommand):
help = "Imports spam IP address from csv file into database"
def add_arguments(self, parser):
parser.add_argument('csv_files', nargs='+', type=str)
def handle(self, *args, **kwargs):
inserted_count = 0
skipped_count = 0
for csv_file in kwargs['csv_files']:
with open(csv_file, 'r') as file_handle:
csv_reader = csv.reader(file_handle)
for row in csv_reader:
try:
SpamIP.objects.create(
ip_address=row[0],
created_on=row[1],
)
inserted_count += 1
except IntegrityError:
skipped_count += 1
self.stdout.write(self.style.SUCCESS('Inserted {} new ip addresses'.format(inserted_count)))
self.stdout.write(self.style.WARNING('Skipped {} existing addresses'.format(skipped_count)))
| bsd-3-clause |
xxd3vin/spp-sdk | opt/Python27/Lib/test/test_userstring.py | 88 | 4850 | #!/usr/bin/env python
# UserString is a wrapper around the native builtin string type.
# UserString instances should behave similar to builtin string objects.
import string
from test import test_support, string_tests
from UserString import UserString, MutableString
import warnings
class UserStringTest(
string_tests.CommonTest,
string_tests.MixinStrUnicodeUserStringTest,
string_tests.MixinStrStringUserStringTest,
string_tests.MixinStrUserStringTest
):
type2test = UserString
# Overwrite the three testing methods, because UserString
# can't cope with arguments propagated to UserString
# (and we don't test with subclasses)
def checkequal(self, result, object, methodname, *args):
result = self.fixtype(result)
object = self.fixtype(object)
# we don't fix the arguments, because UserString can't cope with it
realresult = getattr(object, methodname)(*args)
self.assertEqual(
result,
realresult
)
def checkraises(self, exc, object, methodname, *args):
object = self.fixtype(object)
# we don't fix the arguments, because UserString can't cope with it
self.assertRaises(
exc,
getattr(object, methodname),
*args
)
def checkcall(self, object, methodname, *args):
object = self.fixtype(object)
# we don't fix the arguments, because UserString can't cope with it
getattr(object, methodname)(*args)
class MutableStringTest(UserStringTest):
type2test = MutableString
# MutableStrings can be hashed => deactivate test
def test_hash(self):
pass
def test_setitem(self):
s = self.type2test("foo")
self.assertRaises(IndexError, s.__setitem__, -4, "bar")
self.assertRaises(IndexError, s.__setitem__, 3, "bar")
s[-1] = "bar"
self.assertEqual(s, "fobar")
s[0] = "bar"
self.assertEqual(s, "barobar")
def test_delitem(self):
s = self.type2test("foo")
self.assertRaises(IndexError, s.__delitem__, -4)
self.assertRaises(IndexError, s.__delitem__, 3)
del s[-1]
self.assertEqual(s, "fo")
del s[0]
self.assertEqual(s, "o")
del s[0]
self.assertEqual(s, "")
def test_setslice(self):
s = self.type2test("foo")
s[:] = "bar"
self.assertEqual(s, "bar")
s[1:2] = "foo"
self.assertEqual(s, "bfoor")
s[1:-1] = UserString("a")
self.assertEqual(s, "bar")
s[0:10] = 42
self.assertEqual(s, "42")
def test_delslice(self):
s = self.type2test("foobar")
del s[3:10]
self.assertEqual(s, "foo")
del s[-1:10]
self.assertEqual(s, "fo")
def test_extended_set_del_slice(self):
indices = (0, None, 1, 3, 19, 100, -1, -2, -31, -100)
orig = string.ascii_letters + string.digits
for start in indices:
for stop in indices:
# Use indices[1:] when MutableString can handle real
# extended slices
for step in (None, 1, -1):
s = self.type2test(orig)
L = list(orig)
# Make sure we have a slice of exactly the right length,
# but with (hopefully) different data.
data = L[start:stop:step]
data.reverse()
L[start:stop:step] = data
s[start:stop:step] = "".join(data)
self.assertEqual(s, "".join(L))
del L[start:stop:step]
del s[start:stop:step]
self.assertEqual(s, "".join(L))
def test_immutable(self):
s = self.type2test("foobar")
s2 = s.immutable()
self.assertEqual(s, s2)
self.assertIsInstance(s2, UserString)
def test_iadd(self):
s = self.type2test("foo")
s += "bar"
self.assertEqual(s, "foobar")
s += UserString("baz")
self.assertEqual(s, "foobarbaz")
s += 42
self.assertEqual(s, "foobarbaz42")
def test_imul(self):
s = self.type2test("foo")
s *= 1
self.assertEqual(s, "foo")
s *= 2
self.assertEqual(s, "foofoo")
s *= -1
self.assertEqual(s, "")
def test_main():
with warnings.catch_warnings():
warnings.filterwarnings("ignore", ".*MutableString has been removed",
DeprecationWarning)
warnings.filterwarnings("ignore",
".*__(get|set|del)slice__ has been removed",
DeprecationWarning)
test_support.run_unittest(UserStringTest, MutableStringTest)
if __name__ == "__main__":
test_main()
| mit |
peckhams/topoflow | topoflow/components/satzone_darcy_layers.py | 1 | 18736 | #
# Copyright (c) 2001-2016, Scott D. Peckham
#
# Nov 2016.
# Sep 2014.
# Nov 2013. Converted TopoFlow to Python package.
# Jan 2013. Revised handling of input/output names.
# Oct 2012. CSDMS Standard Names and BMI.
# May 2010. Changes to initialize(), read_cfg_file() and unit_test().
# Aug 2009. Updates.
# Jul 2009. Updates.
# Jan 2009, Converted from IDL.
#
#-----------------------------------------------------------------------
# NOTES: This file defines a "Darcy layers" groundwater component
# and related functions. It inherits from the groundwater
# "base class" in "satzone_base.py".
#-----------------------------------------------------------------------
#
# class satzone_component
#
# get_component_name()
# get_attribute() # (10/26/11)
# get_input_var_names() # (5/16/12, Bolton)
# get_output_var_names() # (5/16/12, Bolton)
# get_var_name() # (5/16/12, Bolton)
# get_var_units() # (5/16/12, Bolton)
# ------------------------------------------------------------
# Move all "update_*" methods from satzone_base.py to here ?
# ------------------------------------------------------------
#
# Functions: (commented out)
# Total_Darcy_Layer_Flow_VK()
# Total_Subsurface_Flow()
# Darcy_Layer_Seep_Rate()
#
#-----------------------------------------------------------------------
import numpy as np
import os
from topoflow.components import satzone_base
#-----------------------------------------------------------------------
class satzone_component( satzone_base.satzone_component ):
_att_map = {
'model_name': 'TopoFlow_Saturated_Zone_Darcy_Layers',
'version': '3.1',
'author_name': 'Scott D. Peckham',
'grid_type': 'uniform',
'time_step_type': 'fixed',
'step_method': 'explicit',
#-------------------------------------------------------------
'comp_name': 'SatZoneDarcyLayers',
'model_family': 'TopoFlow',
'cfg_template_file': 'Satzone_Darcy_Layers.cfg.in',
'cfg_extension': '_satzone_darcy_layers.cfg',
'cmt_var_prefix': '/SatZoneDarcyLayers/Input/Var/',
'gui_xml_file': '/home/csdms/cca/topoflow/3.1/src/share/cmt/gui/Satzone_Darcy_Layers.xml',
'dialog_title': 'Saturated Zone: Darcy Layers Parameters',
'time_units': 'seconds' }
#----------------------------------------------
# What about ET? (Taking water off the ground
# water surface??? (Bolton, 5/16/2012)
#----------------------------------------------
_input_var_names = [
'channel_water_x-section__mean_depth', # (d@channels)
'land_surface_water__evaporation_volume_flux', # (ET@evap)
'soil_water_sat-zone_top__recharge_volume_flux' ] # (Rg@infil)
_output_var_names = [
'land_surface__elevation', # elev
'land_surface_water__baseflow_volume_flux', # GW
'land_surface_water__domain_time_integral_of_baseflow_volume_flux', # vol_GW
'model__time_step', # dt
'model_soil_layer-0__porosity', # qs[0]
'model_soil_layer-0__saturated_thickness', # y[0,:,:]
'model_soil_layer-0__thickness', # th[0,:,:]
'model_soil_layer-1__porosity', # qs[1]
'model_soil_layer-1__saturated_thickness', # y[1,:,:]
'model_soil_layer-1__thickness', # th[1,:,:]
'model_soil_layer-2__porosity', # qs[2]
'model_soil_layer-2__saturated_thickness', # y[2,:,:]
'model_soil_layer-2__thickness', # th[2,:,:]
#----------------------------------------------
# These are for *all* soil layers (not used).
#----------------------------------------------
# 'model_soil_layer__porosity', # qs
# 'model_soil_layer__saturated_thickness', # y
# 'model_soil_layer__thickness', # th
#----------------------------------------
# The "top_layer" is same as "layer_0".
#----------------------------------------
'soil_water_sat-zone_top_surface__elevation', # h_table #############
'soil_top-layer__porosity', # qs[0,:,:]
'soil_top-layer__saturated_thickness', # y[0,:,:]
'soil_top-layer__thickness' ] # th[0,:,:]
#-------------------------------------------
# These are read from GUI/file, but could
# still be returned.
#-------------------------------------------
# 'soil_water_sat-zone_top_surface__initial_elevation' ] # h0_table
#-------------------------------------------------------------------
# Note: The variables qs, th and y are ndarrays. If we define
# another variable as a slice or subset of these, such as
# qs_top = qs[0], or y_top = y[0,:,:], then they will
# also change whenever the main ndarray changes.
# To see this, try:
# >>> a = np.ones((3,3))
# >>> b = a[0,:]
# >>> print a
# >>> print b
# >>> a[0,:] = 2
# >>> print a
# >>> print b
# With this trick, we can avoid slices and subscripts in
# the var_name_map, which getattr and setattr don't support.
#-------------------------------------------------------------------
_var_name_map = {
'channel_water_x-section__mean_depth': 'd', # channels comp
'soil_water_sat-zone_top__recharge_volume_flux': 'Rg',
#------------------------------------------------------------------------
'land_surface__elevation': 'elev',
'land_surface_water__baseflow_volume_flux': 'GW',
'land_surface_water__domain_time_integral_of_baseflow_volume_flux': 'vol_GW',
'land_surface_water__evaporation_volume_flux': 'ET',
'model__time_step': 'dt',
#----------------------------------------------------------------
# These are defined in satzone_base.py. (9/22/14)
'model_soil_layer-0__porosity': 'qs_layer_0', ## 'qs[0]',
'model_soil_layer-0__saturated_thickness': 'y_layer_0', ## 'y[0,:,:]',
'model_soil_layer-0__thickness': 'th_layer_0', ## 'th[0,:,:]',
'model_soil_layer-1__porosity': 'qs_layer_1',
'model_soil_layer-1__saturated_thickness': 'y_layer_1',
'model_soil_layer-1__thickness': 'th_layer_1',
'model_soil_layer-2__porosity': 'qs_layer_2',
'model_soil_layer-2__saturated_thickness': 'y_layer_2',
'model_soil_layer-2__thickness': 'th_layer_2',
#----------------------------------------------------------------
## 'model_soil_layer-0__porosity': 'qs[0]',
## 'model_soil_layer-0__saturated_thickness': 'y[0,:,:]',
## 'model_soil_layer-0__thickness': 'th[0,:,:]',
## 'model_soil_layer-1__porosity': 'qs[1]',
## 'model_soil_layer-1__saturated_thickness': 'y[1,:,:]',
## 'model_soil_layer-1__thickness': 'th[1,:,:]',
## 'model_soil_layer-2__porosity': 'qs[2]',
## 'model_soil_layer-2__saturated_thickness': 'y[2,:,:]',
## 'model_soil_layer-2__thickness': 'th[2,:,:]',
#----------------------------------------------
# These are for *all* soil layers (not used).
#----------------------------------------------
# 'model_soil_layers__porosity': 'qs',
# 'model_soil_layers__saturated_thickness': 'y',
# 'model_soil_layers__thickness': 'th',
#----------------------------------------
# The "top_layer" is same as "layer_0".
#----------------------------------------
'soil_water_sat-zone_top_surface__elevation': 'h_table',
'soil_top-layer__porosity': 'qs_layer_0', ## 'qs[0]',
'soil_top-layer__saturated_thickness': 'y_layer_0', ## 'y[0,:,:]',
'soil_top-layer__thickness': 'th_layer_0' } ## 'th[0],
_var_units_map = {
'channel_water_x-section__mean_depth': 'm', # channels comp
'soil_water_sat-zone_top__recharge_volume_flux': 'm s-1',
#----------------------------------------------------------------
'land_surface__elevation': 'm',
'land_surface_water__baseflow_volume_flux': 'm s-1',
'land_surface_water__domain_time_integral_of_baseflow_volume_flux': 'm3',
'land_surface_water__evaporation_volume_flux': 'm s-1',
'model__time_step': 's', ############# CHECK UNITS
'model_soil_layer-0__porosity': '1',
'model_soil_layer-0__saturated_thickness': 'm',
'model_soil_layer-0__thickness':'m',
'model_soil_layer-1__porosity': '1',
'model_soil_layer-1__saturated_thickness': 'm',
'model_soil_layer-1__thickness': 'm',
'model_soil_layer-2__porosity': '1',
'model_soil_layer-2__saturated_thickness': 'm',
'model_soil_layer-2__thickness': 'm',
#----------------------------------------------
# These are for *all* soil layers (not used).
#----------------------------------------------
# 'model_soil_layers__porosity': '1',
# 'model_soil_layers__saturated_thickness': 'm',
# 'model_soil_layers__thickness': 'm',
#----------------------------------------
# The "top_layer" is same as "layer_0".
#----------------------------------------
'soil_water_sat-zone_top_surface__elevation': 'm',
'soil_top-layer__porosity': '1',
'soil_top-layer__saturated_thickness': 'm',
'soil_top-layer__thickness': 'm' }
#------------------------------------------------
# Return NumPy string arrays vs. Python lists ?
#------------------------------------------------
## _input_var_names = np.array( _input_var_names )
## _output_var_names = np.array( _output_var_names )
#-------------------------------------------------------------------
def get_component_name(self):
return 'TopoFlow_Satzone_Darcy_Layers'
# get_component_name()
#-------------------------------------------------------------------
def get_attribute(self, att_name):
try:
return self._att_map[ att_name.lower() ]
except:
print '###################################################'
print ' ERROR: Could not find attribute: ' + att_name
print '###################################################'
print ' '
# get_attribute()
#-------------------------------------------------------------------
def get_input_var_names(self):
#--------------------------------------------------------
# Note: These are currently variables needed from other
# components vs. those read from files or GUI.
#--------------------------------------------------------
return self._input_var_names
# get_input_var_names()
#-------------------------------------------------------------------
def get_output_var_names(self):
return self._output_var_names
# get_output_var_names()
#-------------------------------------------------------------------
def get_var_name(self, long_var_name):
return self._var_name_map[ long_var_name ]
# get_var_name()
#-------------------------------------------------------------------
def get_var_units(self, long_var_name):
return self._var_units_map[ long_var_name ]
# get_var_units()
#-------------------------------------------------------------------
##def Total_Darcy_Layer_Flow_VK(gv, h, y, dw, ds, pIDs):
##
## #-----------------------------------------------------
## #NOTES: gv = gw_vars = structure
## # z = elevation of land surface [m]
## # h = elevation of water table [m]
## # (z-h) = depth to water table [m]
## # Sh = water table slope [unitless]
## # K = hydraulic conductivity [m/s];
## # each layer can have its own K grid,
## # represented as VK1, VK2, etc.
## # dw = element width [m]
## # ds = hor. Dist. between pixel and parent [m]
## # y = wetted flow depth in each layer [m]
## # (could be a recycled local variable)
## # Q = total Darcy-flow discharge [m^3/s]
##
## # (summed over all layers)
## # diff = (partial sum of soil thicknesses -
## # depth to water table)
## #-----------------------------------------------------
##
## # FORWARD_FUNCTION Free_Surface_Slope
##
## #---------------------------------
## #Compute water table slope from h
## #---------------------------------
## #NB! h is assumed to be a grid.
## #---------------------------------
## #NB! Q is zero where Sh is zero.
## #-----------------------------------------
## #NB! Flow direction is still assumed to
## # be given by the DEM's D8 flow grid.
## #-----------------------------------------
## Sh = Free_Surface_Slope(float32(0.0), h, ds, pIDs)
##
## #----------------------------------------
## #Compute wetted-depth, y, for each layer
## #Now passed by caller.
## #----------------------------------------
## #** diff = -(z - h)
##
## #---------------------------------
## #NB! h is assumed to be a grid.
## #---------------------------------
## dims = idl_func.size(h, dimensions=True)
## ncols = dims[0]
## nrows = dims[1]
## Q = np.zeros([nrows, ncols], dtype='Float32')
##
## #------------------------------------
## #Add Q for each layer, via Darcy law
## #------------------------------------
## Q += (gv.VK1 * Sh * dw * y[0,:,:])
## Q += (gv.VK2 * Sh * dw * y[1,:,:])
## Q += (gv.VK3 * Sh * dw * y[2,:,:])
## Q += (gv.VK4 * Sh * dw * y[3,:,:])
## Q += (gv.VK5 * Sh * dw * y[4,:,:])
## Q += (gv.VK6 * Sh * dw * y[5,:,:])
## Q += (gv.VK7 * Sh * dw * y[6,:,:])
## Q += (gv.VK8 * Sh * dw * y[7,:,:])
## Q += (gv.VK9 * Sh * dw * y[8,:,:])
## Q += (gv.VK10 * Sh * dw * y[9,:,:])
##
## return Q
##
### Total_Darcy_Layer_Flow_VK
###-----------------------------------------------------------------------
##def Total_Subsurface_Flow(gv, h, y, dw, ds, pIDs):
##
## #-------------------------------------------------------
## #NOTES: gv = gw_vars = structure
## # h = elevation of water table [m]
##
## # Updates to y are also returned.
## #-------------------------------------------------------
## I2PY_expr = gv.method
## if I2PY_expr == 0:
## Q_gw = float32(0.0)
## elif I2PY_expr == 1:
## Q_gw = Total_Darcy_Layer_Flow(gv, h, y, dw, ds, pIDs)
## elif I2PY_expr == 2:
## Q_gw = Total_Darcy_Layer_Flow_VK(gv, h, y, dw, ds, pIDs)
##
## else:
## raise RuntimeError('no match found for expression')
##
## return Q_gw
##
## Total_Subsurface_Flow
##-----------------------------------------------------------------------
##def Darcy_Layer_Seep_Rate(gv, h, z, y, Rg, dw, ds, da, pIDs, \
## p1, p2, p3, p4, p5, p6, p7, p8, \
## w1, w2, w3, w4, w5, w6, w7, w8):
##
## #-------------------------------------------------
## #Notes: gv = gw_vars = structure
## # Bug fix on 7/19/05, gw_vars vs. gv used.
## #
## #7/19/05: This function may no longer be in use
## # anywhere. The call in the Seepage
## # function is commented out in favor of
## # a call to Total_Darcy_Layer_Flow.
## #-------------------------------------------------
##
## #------------------------------
## #Get the vertical contribution
## #See call to Precipitation.
## #------------------------------
## #Rg = 0.0
##
## #-----------------------------
## #Sum discharges of all layers
## #-----------------------------
## n_params = 25
## I2PY_expr = gw_vars.method
## if I2PY_expr == 0:
## Q_gw = float32(0.0)
## elif I2PY_expr == 1:
## Q_gw = Total_Darcy_Layer_Flow(gv, h, y, dw, ds, pIDs)
##
## elif I2PY_expr == 2:
## Q_gw = Total_Darcy_Layer_Flow_VK(gv, h, y, dw, ds, pIDs)
## else:
## raise RuntimeError('no match found for expression')
##
## #--------------------------
## #Print min and max of Q_gw
## #--------------------------
## Q_min = Q_gw.min()
## Q_max = Q_gw.max()
## TF_Print(' Q_min = ' + str(Q_min))
## TF_Print(' Q_max = ' + str(Q_max))
##
## #--------------------------------
## #Overwrite h & y with new values
## #Need to pass gw_vars.
## #--------------------------------
## gv = Update_Water_Table(h, y, Q_gw, Rg, da, gv, \
## p1, p2, p3, p4, p5, p6, p7, p8, \
## w1, w2, w3, w4, w5, w6, w7, w8)
##
## #--------------------------------------------
## #y is now updated in previous routine, but
## #is initialized with Wetted_Thicknesses fcn.
## #--------------------------------------------
## #*** y = Wetted_Thicknesses(gv, z, h)
##
## #--------------
## #For debugging
## #--------------
## #h_min = min(h, max=h_max)
##
## #print,'h_min = ',h_min
## #print,'h_max = ',h_max
## #------------------------------
## #z_min = min(z, max=z_max)
## #print,'z_min = ',z_min
## #print,'z_max = ',z_max
##
## #------------------------
## #Compute the "seep rate"
## #Can be pos. or neg. ?
## #------------------------
## dh_dt = (h - h_last) / gv.dt
## GW = (h > z) * dh_dt
## #*** GW = (h gt z) * (dh_dt > 0.0)
##
## #------------------------
## #Print min and max of GW
## #------------------------
## gw_min = gw.min()
## gw_max = gw.max()
## TF_Print(' GW_min = ' + str(gw_min))
## TF_Print(' GW_max = ' + str(gw_max))
##
## return GW
##
## Darcy_Layer_Seep_Rate
##-----------------------------------------------------------------------
##
##
| mit |
wprice/qpid-proton | proton-j/src/main/resources/cengine.py | 2 | 29433 | #
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
from org.apache.qpid.proton import Proton
from org.apache.qpid.proton.amqp import Symbol
from org.apache.qpid.proton.amqp.messaging import Source, Target, \
TerminusDurability, TerminusExpiryPolicy, Received, Accepted, \
Rejected, Released, Modified
from org.apache.qpid.proton.amqp.transaction import Coordinator
from org.apache.qpid.proton.amqp.transport import ErrorCondition, \
SenderSettleMode, ReceiverSettleMode
from org.apache.qpid.proton.engine import EndpointState, Sender, \
Receiver, Transport as _Transport, TransportException
from java.util import EnumSet
from compat import array, zeros
from cerror import *
from ccodec import *
# from proton/engine.h
PN_LOCAL_UNINIT = 1
PN_LOCAL_ACTIVE = 2
PN_LOCAL_CLOSED = 4
PN_REMOTE_UNINIT = 8
PN_REMOTE_ACTIVE = 16
PN_REMOTE_CLOSED = 32
PN_SND_UNSETTLED = 0
PN_SND_SETTLED = 1
PN_SND_MIXED = 2
PN_RCV_FIRST = 0
PN_RCV_SECOND = 1
PN_UNSPECIFIED = 0
PN_SOURCE = 1
PN_TARGET = 2
PN_COORDINATOR = 3
PN_NONDURABLE = 0
PN_CONFIGURATION = 1
PN_DELIVERIES = 2
PN_EXPIRE_WITH_LINK = 0
PN_EXPIRE_WITH_SESSION = 1
PN_EXPIRE_WITH_CONNECTION = 2
PN_EXPIRE_NEVER = 3
PN_DIST_MODE_UNSPECIFIED = 0
PN_DIST_MODE_COPY = 1
PN_DIST_MODE_MOVE = 2
PN_RECEIVED = (0x0000000000000023)
PN_ACCEPTED = (0x0000000000000024)
PN_REJECTED = (0x0000000000000025)
PN_RELEASED = (0x0000000000000026)
PN_MODIFIED = (0x0000000000000027)
PN_TRACE_OFF = _Transport.TRACE_OFF
PN_TRACE_RAW = _Transport.TRACE_RAW
PN_TRACE_FRM = _Transport.TRACE_FRM
PN_TRACE_DRV = _Transport.TRACE_DRV
def wrap(obj, wrapper):
if obj:
ctx = obj.getContext()
if not ctx:
ctx = wrapper(obj)
obj.setContext(ctx)
return ctx
class pn_condition:
def __init__(self):
self.name = None
self.description = None
self.info = pn_data(0)
def decode(self, impl):
if impl is None:
self.name = None
self.description = None
self.info.clear()
else:
cond = impl.getCondition()
if cond is None:
self.name = None
else:
self.name = cond.toString()
self.description = impl.getDescription()
obj2dat(impl.getInfo(), self.info)
def encode(self):
if self.name is None:
return None
else:
impl = ErrorCondition()
impl.setCondition(Symbol.valueOf(self.name))
impl.setDescription(self.description)
impl.setInfo(dat2obj(self.info))
return impl
def pn_condition_is_set(cond):
return bool(cond.name)
def pn_condition_get_name(cond):
return cond.name
def pn_condition_set_name(cond, name):
cond.name = name
def pn_condition_get_description(cond):
return cond.description
def pn_condition_set_description(cond, description):
cond.description = description
def pn_condition_clear(cond):
cond.name = None
cond.description = None
cond.info.clear()
def pn_condition_info(cond):
return cond.info
class endpoint_wrapper:
def __init__(self, impl):
self.impl = impl
self.condition = pn_condition()
self.remote_condition = pn_condition()
def on_close(self):
cond = self.condition.encode()
if cond:
self.impl.setCondition(cond)
def remote_condition(self):
self.remote_condition.decode(self.impl.getRemoteCondition())
return self.remote_condition
class pn_connection_wrapper(endpoint_wrapper):
def __init__(self, impl):
endpoint_wrapper.__init__(self, impl)
self.properties = pn_data(0)
self.offered_capabilities = pn_data(0)
self.desired_capabilities = pn_data(0)
def pn_connection():
return wrap(Proton.connection(), pn_connection_wrapper)
def set2mask(local, remote):
mask = 0
if local.contains(EndpointState.UNINITIALIZED):
mask |= PN_LOCAL_UNINIT
if local.contains(EndpointState.ACTIVE):
mask |= PN_LOCAL_ACTIVE
if local.contains(EndpointState.CLOSED):
mask |= PN_LOCAL_CLOSED
if remote.contains(EndpointState.UNINITIALIZED):
mask |= PN_REMOTE_UNINIT
if remote.contains(EndpointState.ACTIVE):
mask |= PN_REMOTE_ACTIVE
if remote.contains(EndpointState.CLOSED):
mask |= PN_REMOTE_CLOSED
return mask
def endpoint_state(impl):
return set2mask(EnumSet.of(impl.getLocalState()),
EnumSet.of(impl.getRemoteState()))
def pn_connection_state(conn):
return endpoint_state(conn.impl)
def pn_connection_condition(conn):
return conn.condition
def pn_connection_remote_condition(conn):
return remote_condition(conn)
def pn_connection_properties(conn):
return conn.properties
def pn_connection_remote_properties(conn):
return obj2dat(conn.impl.getRemoteProperties())
def pn_connection_offered_capabilities(conn):
return conn.offered_capabilities
def pn_connection_remote_offered_capabilities(conn):
return array2dat(conn.impl.getRemoteOfferedCapabilities(), PN_SYMBOL)
def pn_connection_desired_capabilities(conn):
return conn.desired_capabilities
def pn_connection_remote_desired_capabilities(conn):
return array2dat(conn.impl.getRemoteDesiredCapabilities(), PN_SYMBOL)
def pn_connection_attachments(conn):
return conn.impl.attachments()
def pn_connection_set_container(conn, name):
conn.impl.setContainer(name)
def pn_connection_get_container(conn):
return conn.impl.getContainer()
def pn_connection_remote_container(conn):
return conn.impl.getRemoteContainer()
def pn_connection_get_hostname(conn):
return conn.impl.getHostname()
def pn_connection_set_hostname(conn, name):
conn.impl.setHostname(name)
def pn_connection_remote_hostname(conn):
return conn.impl.getRemoteHostname()
def pn_connection_open(conn):
props = dat2obj(conn.properties)
offered = dat2obj(conn.offered_capabilities)
desired = dat2obj(conn.desired_capabilities)
if props:
conn.impl.setProperties(props)
if offered:
conn.impl.setOfferedCapabilities(array(list(offered), Symbol))
if desired:
conn.impl.setDesiredCapabilities(array(list(desired), Symbol))
conn.impl.open()
def pn_connection_close(conn):
conn.on_close()
conn.impl.close()
def pn_connection_release(conn):
conn.impl.free()
def pn_connection_transport(conn):
return wrap(conn.impl.getTransport(), pn_transport_wrapper)
class pn_session_wrapper(endpoint_wrapper):
pass
def pn_session(conn):
return wrap(conn.impl.session(), pn_session_wrapper)
def pn_session_attachments(ssn):
return ssn.impl.attachments()
def pn_session_state(ssn):
return endpoint_state(ssn.impl)
def pn_session_get_incoming_capacity(ssn):
return ssn.impl.getIncomingCapacity()
def pn_session_set_incoming_capacity(ssn, capacity):
ssn.impl.setIncomingCapacity(capacity)
def pn_session_incoming_bytes(ssn):
return ssn.impl.getIncomingBytes()
def pn_session_outgoing_bytes(ssn):
return ssn.impl.getOutgoingBytes()
def pn_session_get_outgoing_window(ssn):
return ssn.impl.getOutgoingWindow()
def pn_session_set_outgoing_window(ssn, window):
ssn.impl.setOutgoingWindow(window)
def pn_session_condition(ssn):
return ssn.condition
def pn_session_remote_condition(ssn):
return remote_condition(ssn)
def pn_session_open(ssn):
ssn.impl.open()
def pn_session_close(ssn):
ssn.on_close()
ssn.impl.close()
def mask2set(mask):
local = []
remote = []
if PN_LOCAL_UNINIT & mask:
local.append(EndpointState.UNINITIALIZED)
if PN_LOCAL_ACTIVE & mask:
local.append(EndpointState.ACTIVE)
if PN_LOCAL_CLOSED & mask:
local.append(EndpointState.CLOSED)
if PN_REMOTE_UNINIT & mask:
remote.append(EndpointState.UNINITIALIZED)
if PN_REMOTE_ACTIVE & mask:
remote.append(EndpointState.ACTIVE)
if PN_REMOTE_CLOSED & mask:
remote.append(EndpointState.CLOSED)
if local:
local = EnumSet.of(*local)
else:
local = None
if remote:
remote = EnumSet.of(*remote)
else:
remote = None
return local, remote
def pn_session_head(conn, mask):
local, remote = mask2set(mask)
return wrap(conn.impl.sessionHead(local, remote), pn_session_wrapper)
def pn_session_connection(ssn):
return wrap(ssn.impl.getConnection(), pn_connection_wrapper)
def pn_sender(ssn, name):
return wrap(ssn.impl.sender(name), pn_link_wrapper)
def pn_receiver(ssn, name):
return wrap(ssn.impl.receiver(name), pn_link_wrapper)
def pn_session_free(ssn):
ssn.impl.free()
TERMINUS_TYPES_J2P = {
Source: PN_SOURCE,
Target: PN_TARGET,
Coordinator: PN_COORDINATOR,
None.__class__: PN_UNSPECIFIED
}
TERMINUS_TYPES_P2J = {
PN_SOURCE: Source,
PN_TARGET: Target,
PN_COORDINATOR: Coordinator,
PN_UNSPECIFIED: lambda: None
}
DURABILITY_P2J = {
PN_NONDURABLE: TerminusDurability.NONE,
PN_CONFIGURATION: TerminusDurability.CONFIGURATION,
PN_DELIVERIES: TerminusDurability.UNSETTLED_STATE
}
DURABILITY_J2P = {
TerminusDurability.NONE: PN_NONDURABLE,
TerminusDurability.CONFIGURATION: PN_CONFIGURATION,
TerminusDurability.UNSETTLED_STATE: PN_DELIVERIES
}
EXPIRY_POLICY_P2J = {
PN_EXPIRE_WITH_LINK: TerminusExpiryPolicy.LINK_DETACH,
PN_EXPIRE_WITH_SESSION: TerminusExpiryPolicy.SESSION_END,
PN_EXPIRE_WITH_CONNECTION: TerminusExpiryPolicy.CONNECTION_CLOSE,
PN_EXPIRE_NEVER: TerminusExpiryPolicy.NEVER
}
EXPIRY_POLICY_J2P = {
TerminusExpiryPolicy.LINK_DETACH: PN_EXPIRE_WITH_LINK,
TerminusExpiryPolicy.SESSION_END: PN_EXPIRE_WITH_SESSION,
TerminusExpiryPolicy.CONNECTION_CLOSE: PN_EXPIRE_WITH_CONNECTION,
TerminusExpiryPolicy.NEVER: PN_EXPIRE_NEVER
}
DISTRIBUTION_MODE_P2J = {
PN_DIST_MODE_UNSPECIFIED: None,
PN_DIST_MODE_COPY: Symbol.valueOf("copy"),
PN_DIST_MODE_MOVE: Symbol.valueOf("move")
}
DISTRIBUTION_MODE_J2P = {
None: PN_DIST_MODE_UNSPECIFIED,
Symbol.valueOf("copy"): PN_DIST_MODE_COPY,
Symbol.valueOf("move"): PN_DIST_MODE_MOVE
}
class pn_terminus:
def __init__(self, type):
self.type = type
self.address = None
self.durability = PN_NONDURABLE
self.expiry_policy = PN_EXPIRE_WITH_SESSION
self.distribution_mode = PN_DIST_MODE_UNSPECIFIED
self.timeout = 0
self.dynamic = False
self.properties = pn_data(0)
self.capabilities = pn_data(0)
self.outcomes = pn_data(0)
self.filter = pn_data(0)
def copy(self, src):
self.type = src.type
self.address = src.address
self.durability = src.durability
self.expiry_policy = src.expiry_policy
self.timeout = src.timeout
self.dynamic = src.dynamic
self.properties = src.properties
self.capabilities = src.capabilities
self.outcomes = src.outcomes
self.filter = src.filter
def decode(self, impl):
if impl is not None:
self.type = TERMINUS_TYPES_J2P[impl.__class__]
if self.type in (PN_SOURCE, PN_TARGET):
self.address = impl.getAddress()
self.durability = DURABILITY_J2P[impl.getDurable()]
self.expiry_policy = EXPIRY_POLICY_J2P[impl.getExpiryPolicy()]
self.timeout = impl.getTimeout().longValue()
self.dynamic = impl.getDynamic()
obj2dat(impl.getDynamicNodeProperties(), self.properties)
array2dat(impl.getCapabilities(), PN_SYMBOL, self.capabilities)
if self.type == PN_SOURCE:
self.distribution_mode = DISTRIBUTION_MODE_J2P[impl.getDistributionMode()]
array2dat(impl.getOutcomes(), PN_SYMBOL, self.outcomes)
obj2dat(impl.getFilter(), self.filter)
def encode(self):
impl = TERMINUS_TYPES_P2J[self.type]()
if self.type in (PN_SOURCE, PN_TARGET):
impl.setAddress(self.address)
impl.setDurable(DURABILITY_P2J[self.durability])
impl.setExpiryPolicy(EXPIRY_POLICY_P2J[self.expiry_policy])
impl.setTimeout(UnsignedInteger.valueOf(self.timeout))
impl.setDynamic(self.dynamic)
props = dat2obj(self.properties)
caps = dat2obj(self.capabilities)
if props: impl.setDynamicNodeProperties(props)
if caps:
impl.setCapabilities(*array(list(caps), Symbol))
if self.type == PN_SOURCE:
impl.setDistributionMode(DISTRIBUTION_MODE_P2J[self.distribution_mode])
outcomes = dat2obj(self.outcomes)
filter = dat2obj(self.filter)
if outcomes: impl.setOutcomes(outcomes)
if filter: impl.setFilter(filter)
return impl
def pn_terminus_get_type(terminus):
return terminus.type
def pn_terminus_set_type(terminus, type):
terminus.type = type
return 0
def pn_terminus_get_address(terminus):
return terminus.address
def pn_terminus_set_address(terminus, address):
terminus.address = address
return 0
def pn_terminus_get_durability(terminus):
return terminus.durability
def pn_terminus_get_expiry_policy(terminus):
return terminus.expiry_policy
def pn_terminus_set_timeout(terminus, timeout):
terminus.timeout = timeout
return 0
def pn_terminus_get_timeout(terminus):
return terminus.timeout
def pn_terminus_get_distribution_mode(terminus):
return terminus.distribution_mode
def pn_terminus_set_distribution_mode(terminus, mode):
terminus.distribution_mode = mode
return 0
def pn_terminus_is_dynamic(terminus):
return terminus.dynamic
def pn_terminus_set_dynamic(terminus, dynamic):
terminus.dynamic = dynamic
return 0
def pn_terminus_properties(terminus):
return terminus.properties
def pn_terminus_capabilities(terminus):
return terminus.capabilities
def pn_terminus_outcomes(terminus):
return terminus.outcomes
def pn_terminus_filter(terminus):
return terminus.filter
def pn_terminus_copy(terminus, src):
terminus.copy(src)
return 0
class pn_link_wrapper(endpoint_wrapper):
def __init__(self, impl):
endpoint_wrapper.__init__(self, impl)
self.source = pn_terminus(PN_SOURCE)
self.remote_source = pn_terminus(PN_UNSPECIFIED)
self.target = pn_terminus(PN_TARGET)
self.remote_target = pn_terminus(PN_UNSPECIFIED)
def on_open(self):
self.impl.setSource(self.source.encode())
self.impl.setTarget(self.target.encode())
def pn_link_attachments(link):
return link.impl.attachments()
def pn_link_source(link):
link.source.decode(link.impl.getSource())
return link.source
def pn_link_remote_source(link):
link.remote_source.decode(link.impl.getRemoteSource())
return link.remote_source
def pn_link_target(link):
link.target.decode(link.impl.getTarget())
return link.target
def pn_link_remote_target(link):
link.remote_target.decode(link.impl.getRemoteTarget())
return link.remote_target
def pn_link_condition(link):
return link.condition
def pn_link_remote_condition(link):
return remote_condition(link)
SND_SETTLE_MODE_P2J = {
PN_SND_UNSETTLED: SenderSettleMode.UNSETTLED,
PN_SND_SETTLED: SenderSettleMode.SETTLED,
PN_SND_MIXED: SenderSettleMode.MIXED,
None: None
}
SND_SETTLE_MODE_J2P = {
SenderSettleMode.UNSETTLED: PN_SND_UNSETTLED,
SenderSettleMode.SETTLED: PN_SND_SETTLED,
SenderSettleMode.MIXED: PN_SND_MIXED,
None: None
}
def pn_link_set_snd_settle_mode(link, mode):
link.impl.setSenderSettleMode(SND_SETTLE_MODE_P2J[mode])
def pn_link_snd_settle_mode(link):
return SND_SETTLE_MODE_J2P[link.impl.getSenderSettleMode()]
def pn_link_remote_snd_settle_mode(link):
return SND_SETTLE_MODE_J2P[link.impl.getRemoteSenderSettleMode()]
RCV_SETTLE_MODE_P2J = {
PN_RCV_FIRST: ReceiverSettleMode.FIRST,
PN_RCV_SECOND: ReceiverSettleMode.SECOND,
None: None
}
RCV_SETTLE_MODE_J2P = {
ReceiverSettleMode.FIRST: PN_RCV_FIRST,
ReceiverSettleMode.SECOND: PN_RCV_SECOND,
None: None
}
def pn_link_set_rcv_settle_mode(link, mode):
link.impl.setReceiverSettleMode(RCV_SETTLE_MODE_P2J[mode])
def pn_link_rcv_settle_mode(link):
return RCV_SETTLE_MODE_J2P[link.impl.getReceiverSettleMode()]
def pn_link_remote_rcv_settle_mode(link):
return RCV_SETTLE_MODE_J2P[link.impl.getRemoteReceiverSettleMode()]
def pn_link_is_sender(link):
return isinstance(link.impl, Sender)
def pn_link_is_receiver(link):
return isinstance(link.impl, Receiver)
def pn_link_head(conn, mask):
local, remote = mask2set(mask)
return wrap(conn.impl.linkHead(local, remote), pn_link_wrapper)
def pn_link_next(link, mask):
local, remote = mask2set(mask)
return wrap(link.impl.next(local, remote), pn_link_wrapper)
def pn_link_session(link):
return wrap(link.impl.getSession(), pn_session_wrapper)
def pn_link_state(link):
return endpoint_state(link.impl)
def pn_link_name(link):
return link.impl.getName()
def pn_link_open(link):
link.on_open()
link.impl.open()
def pn_link_close(link):
link.on_close()
link.impl.close()
def pn_link_detach(link):
link.on_close()
link.impl.detach()
def pn_link_flow(link, n):
link.impl.flow(n)
def pn_link_drain(link, n):
link.impl.drain(n)
def pn_link_drained(link):
return link.impl.drained()
def pn_link_draining(link):
return link.impl.draining()
def pn_link_credit(link):
return link.impl.getCredit()
def pn_link_queued(link):
return link.impl.getQueued()
def pn_link_unsettled(link):
return link.impl.getUnsettled()
def pn_link_send(link, bytes):
return link.impl.send(array(bytes, 'b'), 0, len(bytes))
def pn_link_recv(link, limit):
ary = zeros(limit, 'b')
n = link.impl.recv(ary, 0, limit)
if n >= 0:
bytes = ary[:n].tostring()
else:
bytes = None
return n, bytes
def pn_link_advance(link):
return link.impl.advance()
def pn_link_current(link):
return wrap(link.impl.current(), pn_delivery_wrapper)
def pn_link_free(link):
link.impl.free()
def pn_work_head(conn):
return wrap(conn.impl.getWorkHead(), pn_delivery_wrapper)
def pn_work_next(dlv):
return wrap(dlv.impl.getWorkNext(), pn_delivery_wrapper)
DELIVERY_STATES = {
Received: PN_RECEIVED,
Accepted: PN_ACCEPTED,
Rejected: PN_REJECTED,
Released: PN_RELEASED,
Modified: PN_MODIFIED,
None.__class__: 0
}
DISPOSITIONS = {
PN_RECEIVED: Received,
PN_ACCEPTED: Accepted,
PN_REJECTED: Rejected,
PN_RELEASED: Released,
PN_MODIFIED: Modified,
0: lambda: None
}
class pn_disposition:
def __init__(self):
self.type = 0
self.data = pn_data(0)
self.failed = False
self.undeliverable = False
self.annotations = pn_data(0)
self.condition = pn_condition()
self.section_number = 0
self.section_offset = 0
def decode(self, impl):
self.type = DELIVERY_STATES[impl.__class__]
if self.type == PN_REJECTED:
self.condition.decode(impl.getError())
else:
pn_condition_clear(self.condition)
if self.type == PN_MODIFIED:
self.failed = impl.getDeliveryFailed()
self.undeliverable = impl.getUndeliverableHere()
obj2dat(impl.getMessageAnnotations(), self.annotations)
else:
self.failed = False
self.undeliverable = False
pn_data_clear(self.annotations)
if self.type == PN_RECEIVED:
self.section_number = impl.getSectionNumber().longValue()
self.section_offset = impl.getSectionOffset().longValue()
else:
self.section_number = 0
self.section_offset = 0
self.data.clear()
if impl:
# XXX
#self.data.putObject(impl)
pass
self.data.rewind()
def encode(self):
if self.type not in DISPOSITIONS:
raise Skipped()
impl = DISPOSITIONS[self.type]()
if impl is None:
return impl
if self.type == PN_REJECTED:
impl.setError(self.condition.encode())
if self.type == PN_MODIFIED:
impl.setDeliveryFailed(self.failed)
impl.setUndeliverableHere(self.undeliverable)
ann = dat2obj(self.annotations)
if ann: impl.setMessageAnnotations(ann)
if self.type == PN_RECEIVED:
if self.section_number:
impl.setSectionNumber(UnsignedInteger.valueOf(self.section_number))
if self.section_offset:
impl.setSectionOffset(UnsignedLong.valueOf(self.section_offset))
return impl
def pn_disposition_type(dsp):
return dsp.type
def pn_disposition_is_failed(dsp):
return dsp.failed
def pn_disposition_set_failed(dsp, failed):
dsp.failed = failed
def pn_disposition_is_undeliverable(dsp):
return dsp.undeliverable
def pn_disposition_set_undeliverable(dsp, undeliverable):
dsp.undeliverable = undeliverable
def pn_disposition_data(dsp):
return dsp.data
def pn_disposition_annotations(dsp):
return dsp.annotations
def pn_disposition_condition(dsp):
return dsp.condition
def pn_disposition_get_section_number(dsp):
return dsp.section_number
def pn_disposition_set_section_number(dsp, number):
dsp.section_number = number
def pn_disposition_get_section_offset(dsp):
return dsp.section_offset
def pn_disposition_set_section_offset(dsp, offset):
dsp.section_offset = offset
class pn_delivery_wrapper:
def __init__(self, impl):
self.impl = impl
self.local = pn_disposition()
self.remote = pn_disposition()
def pn_delivery(link, tag):
return wrap(link.impl.delivery(array(tag, 'b')), pn_delivery_wrapper)
def pn_delivery_tag(dlv):
return dlv.impl.getTag().tostring()
def pn_delivery_attachments(dlv):
return dlv.impl.attachments()
def pn_delivery_partial(dlv):
return dlv.impl.isPartial()
def pn_delivery_pending(dlv):
return dlv.impl.pending()
def pn_delivery_writable(dlv):
return dlv.impl.isWritable()
def pn_delivery_readable(dlv):
return dlv.impl.isReadable()
def pn_delivery_updated(dlv):
return dlv.impl.isUpdated()
def pn_delivery_settled(dlv):
return dlv.impl.remotelySettled()
def pn_delivery_local(dlv):
dlv.local.decode(dlv.impl.getLocalState())
return dlv.local
def pn_delivery_local_state(dlv):
dlv.local.decode(dlv.impl.getLocalState())
return dlv.local.type
def pn_delivery_remote(dlv):
dlv.remote.decode(dlv.impl.getRemoteState())
return dlv.remote
def pn_delivery_remote_state(dlv):
dlv.remote.decode(dlv.impl.getRemoteState())
return dlv.remote.type
def pn_delivery_update(dlv, state):
dlv.local.type = state
dlv.impl.disposition(dlv.local.encode())
def pn_delivery_link(dlv):
return wrap(dlv.impl.getLink(), pn_link_wrapper)
def pn_delivery_settle(dlv):
dlv.impl.settle()
class pn_transport_wrapper:
def __init__(self, impl):
self.impl = impl
self.server = False
self.condition = pn_condition()
def pn_transport():
return wrap(Proton.transport(), pn_transport_wrapper)
def pn_transport_get_pytracer(trans):
raise Skipped()
def pn_transport_attachments(trans):
return trans.impl.attachments()
def pn_transport_set_server(trans):
trans.server = True;
def pn_transport_get_max_frame(trans):
return trans.impl.getMaxFrameSize()
def pn_transport_set_max_frame(trans, value):
trans.impl.setMaxFrameSize(value)
def pn_transport_get_remote_max_frame(trans):
return trans.impl.getRemoteMaxFrameSize()
def pn_transport_set_idle_timeout(trans, value):
trans.impl.setIdleTimeout(value);
def pn_transport_get_idle_timeout(trans):
return trans.impl.getIdleTimeout()
def pn_transport_get_remote_idle_timeout(trans):
return trans.impl.getRemoteIdleTimeout()
def pn_transport_get_frames_input(trans):
return trans.impl.getFramesInput()
def pn_transport_get_frames_output(trans):
return trans.impl.getFramesOutput()
def pn_transport_set_channel_max(trans, n):
trans.impl.setChannelMax(n)
def pn_transport_get_channel_max(trans):
return trans.impl.getChannelMax()
def pn_transport_remote_channel_max(trans):
return trans.impl.getRemoteChannelMax()
def pn_transport_tick(trans, now):
return trans.impl.tick(now);
def pn_transport_bind(trans, conn):
trans.impl.bind(conn.impl)
return 0
def pn_transport_unbind(trans):
trans.impl.unbind()
return 0
def pn_transport_trace(trans, n):
trans.impl.trace(n)
def pn_transport_pending(trans):
return trans.impl.pending()
def pn_transport_peek(trans, size):
size = min(trans.impl.pending(), size)
ba = zeros(size, 'b')
if size:
bb = trans.impl.head()
bb.get(ba)
bb.position(0)
return 0, ba.tostring()
def pn_transport_pop(trans, size):
trans.impl.pop(size)
def pn_transport_capacity(trans):
return trans.impl.capacity()
def pn_transport_push(trans, input):
result = 0
while input:
cap = pn_transport_capacity(trans)
if cap < 0:
return cap
elif len(input) > cap:
trimmed = input[:cap]
else:
trimmed = input
bb = trans.impl.tail()
bb.put(array(trimmed, 'b'))
trans.impl.process()
input = input[cap:]
result += len(trimmed)
return result
def pn_transport_close_head(trans):
trans.impl.close_head()
return 0
def pn_transport_close_tail(trans):
trans.impl.close_tail()
return 0
def pn_transport_closed(trans):
return trans.impl.isClosed()
def pn_transport_condition(trans):
trans.condition.decode(trans.impl.getCondition())
return trans.condition
from org.apache.qpid.proton.engine import Event
PN_REACTOR_INIT = Event.Type.REACTOR_INIT
PN_REACTOR_QUIESCED = Event.Type.REACTOR_QUIESCED
PN_REACTOR_FINAL = Event.Type.REACTOR_FINAL
PN_TIMER_TASK = Event.Type.TIMER_TASK
PN_CONNECTION_INIT = Event.Type.CONNECTION_INIT
PN_CONNECTION_BOUND = Event.Type.CONNECTION_BOUND
PN_CONNECTION_UNBOUND = Event.Type.CONNECTION_UNBOUND
PN_CONNECTION_LOCAL_OPEN = Event.Type.CONNECTION_LOCAL_OPEN
PN_CONNECTION_REMOTE_OPEN = Event.Type.CONNECTION_REMOTE_OPEN
PN_CONNECTION_LOCAL_CLOSE = Event.Type.CONNECTION_LOCAL_CLOSE
PN_CONNECTION_REMOTE_CLOSE = Event.Type.CONNECTION_REMOTE_CLOSE
PN_CONNECTION_FINAL = Event.Type.CONNECTION_FINAL
PN_SESSION_INIT = Event.Type.SESSION_INIT
PN_SESSION_LOCAL_OPEN = Event.Type.SESSION_LOCAL_OPEN
PN_SESSION_REMOTE_OPEN = Event.Type.SESSION_REMOTE_OPEN
PN_SESSION_LOCAL_CLOSE = Event.Type.SESSION_LOCAL_CLOSE
PN_SESSION_REMOTE_CLOSE = Event.Type.SESSION_REMOTE_CLOSE
PN_SESSION_FINAL = Event.Type.SESSION_FINAL
PN_LINK_INIT = Event.Type.LINK_INIT
PN_LINK_LOCAL_OPEN = Event.Type.LINK_LOCAL_OPEN
PN_LINK_REMOTE_OPEN = Event.Type.LINK_REMOTE_OPEN
PN_LINK_LOCAL_CLOSE = Event.Type.LINK_LOCAL_CLOSE
PN_LINK_REMOTE_CLOSE = Event.Type.LINK_REMOTE_CLOSE
PN_LINK_LOCAL_DETACH = Event.Type.LINK_LOCAL_DETACH
PN_LINK_REMOTE_DETACH = Event.Type.LINK_REMOTE_DETACH
PN_LINK_FLOW = Event.Type.LINK_FLOW
PN_LINK_FINAL = Event.Type.LINK_FINAL
PN_DELIVERY = Event.Type.DELIVERY
PN_TRANSPORT = Event.Type.TRANSPORT
PN_TRANSPORT_ERROR = Event.Type.TRANSPORT_ERROR
PN_TRANSPORT_HEAD_CLOSED = Event.Type.TRANSPORT_HEAD_CLOSED
PN_TRANSPORT_TAIL_CLOSED = Event.Type.TRANSPORT_TAIL_CLOSED
PN_TRANSPORT_CLOSED = Event.Type.TRANSPORT_CLOSED
PN_SELECTABLE_INIT = Event.Type.SELECTABLE_INIT
PN_SELECTABLE_UPDATED = Event.Type.SELECTABLE_UPDATED
PN_SELECTABLE_READABLE = Event.Type.SELECTABLE_READABLE
PN_SELECTABLE_WRITABLE = Event.Type.SELECTABLE_WRITABLE
PN_SELECTABLE_EXPIRED = Event.Type.SELECTABLE_EXPIRED
PN_SELECTABLE_ERROR = Event.Type.SELECTABLE_ERROR
PN_SELECTABLE_FINAL = Event.Type.SELECTABLE_FINAL
def pn_collector():
return Proton.collector()
def pn_connection_collect(conn, coll):
conn.impl.collect(coll)
class pn_event:
def __init__(self, impl):
self.impl = impl
def pn_collector_peek(coll):
ev = coll.peek()
if ev:
return pn_event(ev.copy())
else:
return None
def pn_collector_pop(coll):
coll.pop()
def pn_collector_free(coll):
pass
def pn_event_reactor(event):
return event.impl.getReactor()
def pn_event_connection(event):
return wrap(event.impl.getConnection(), pn_connection_wrapper)
def pn_event_session(event):
return wrap(event.impl.getSession(), pn_session_wrapper)
def pn_event_link(event):
return wrap(event.impl.getLink(), pn_link_wrapper)
def pn_event_delivery(event):
return wrap(event.impl.getDelivery(), pn_delivery_wrapper)
def pn_event_transport(event):
return wrap(event.impl.getTransport(), pn_transport_wrapper)
from org.apache.qpid.proton.engine.impl import ConnectionImpl, SessionImpl, \
SenderImpl, ReceiverImpl, DeliveryImpl, TransportImpl
from org.apache.qpid.proton.reactor.impl import TaskImpl, SelectableImpl
J2C = {
ConnectionImpl: "pn_connection",
SessionImpl: "pn_session",
SenderImpl: "pn_link",
ReceiverImpl: "pn_link",
DeliveryImpl: "pn_delivery",
TransportImpl: "pn_transport",
TaskImpl: "pn_task",
SelectableImpl: "pn_selectable"
}
wrappers = {
"pn_connection": lambda x: wrap(x, pn_connection_wrapper),
"pn_session": lambda x: wrap(x, pn_session_wrapper),
"pn_link": lambda x: wrap(x, pn_link_wrapper),
"pn_delivery": lambda x: wrap(x, pn_delivery_wrapper),
"pn_transport": lambda x: wrap(x, pn_transport_wrapper),
"pn_task": lambda x: x,
"pn_selectable": lambda x: x,
"pn_void": lambda x: x
}
def pn_event_class(event):
ctx = event.impl.getContext()
return J2C.get(ctx.getClass(), "pn_void")
def pn_event_context(event):
return wrappers[pn_event_class(event)](event.impl.getContext())
def pn_event_type(event):
return event.impl.getType()
def pn_event_type_name(etype):
return str(etype)
def pn_event_category(event):
return event.impl.getCategory()
def pn_event_attachments(event):
return event.impl.attachments()
| apache-2.0 |
billychasen/thedailyshow | aligner.py | 1 | 2655 | from PIL import Image, ImageDraw
from faces import Faces
from throttle import Throttle
class Aligner:
"""
Takes two images and aligns the second image face to the first image face.
- Assumes Jon is on the right
"""
def __init__(self):
self.faces = []
def add_face_no_throttle(self, image_path):
self.add_face(image_path)
@Throttle(0.3)
def add_face(self, image_path):
faces = Faces()
faces.from_file(image_path)
image = Image.open(image_path)
if not faces.data:
raise MissingFaceException("image has no faces")
face = self.find_key_face(faces, image)
if not face:
raise NoRectangleException("can't find valid rectangle for faces in image " + image_path)
self.faces.append({"face": face, "image": image})
def align(self, out_path):
if len(self.faces) != 2:
raise NotEnoughFacesException("need more faces")
image1 = self.faces[0]["image"]
image2 = self.faces[1]["image"]
image1_rect = self.faces[0]["face"]["faceRectangle"]
image2_rect = self.faces[1]["face"]["faceRectangle"]
scale_factor = image1_rect["width"] / float(image2_rect["width"])
width, height = image2.size
image2 = image2.resize((int(width * scale_factor), int(height * scale_factor)), Image.ANTIALIAS)
new_image = Image.new('RGBA', image1.size, 'black')
new_rect = {"top": image2_rect["top"] * scale_factor, "left": image2_rect["left"] * scale_factor,
"width": image2_rect["width"] * scale_factor, "height": image2_rect["height"] * scale_factor}
offset = (int(round(image1_rect["left"] - new_rect["left"])), int(round(image1_rect["top"] - new_rect["top"])))
new_image.paste(image2, offset)
new_image.save(out_path)
def find_key_face(self, faces, image):
# current algorithm is return the face that is further left than midpoint
for face in faces.data:
width, height = image.size
if face["faceRectangle"]["left"] > width / 2:
return face
return None
class MissingFaceException(Exception):
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
class NoRectangleException(Exception):
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
class NotEnoughFacesException(Exception):
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
| mit |
feroda/django | django/contrib/contenttypes/fields.py | 73 | 22873 | from __future__ import unicode_literals
from collections import defaultdict
from django.contrib.contenttypes.models import ContentType
from django.core import checks
from django.core.exceptions import FieldDoesNotExist, ObjectDoesNotExist
from django.db import DEFAULT_DB_ALIAS, connection, models, router, transaction
from django.db.models import DO_NOTHING, signals
from django.db.models.base import ModelBase
from django.db.models.fields.related import (
ForeignObject, ForeignObjectRel, ForeignRelatedObjectsDescriptor,
)
from django.db.models.query_utils import PathInfo
from django.utils.encoding import python_2_unicode_compatible, smart_text
from django.utils.functional import cached_property
@python_2_unicode_compatible
class GenericForeignKey(object):
"""
Provide a generic many-to-one relation through the ``content_type`` and
``object_id`` fields.
This class also doubles as an accessor to the related object (similar to
ReverseSingleRelatedObjectDescriptor) by adding itself as a model
attribute.
"""
# Field flags
auto_created = False
concrete = False
editable = False
hidden = False
is_relation = True
many_to_many = False
many_to_one = True
one_to_many = False
one_to_one = False
related_model = None
remote_field = None
allow_unsaved_instance_assignment = False
def __init__(self, ct_field='content_type', fk_field='object_id', for_concrete_model=True):
self.ct_field = ct_field
self.fk_field = fk_field
self.for_concrete_model = for_concrete_model
self.editable = False
self.rel = None
self.column = None
def contribute_to_class(self, cls, name, **kwargs):
self.name = name
self.model = cls
self.cache_attr = "_%s_cache" % name
cls._meta.add_field(self, virtual=True)
# Only run pre-initialization field assignment on non-abstract models
if not cls._meta.abstract:
signals.pre_init.connect(self.instance_pre_init, sender=cls)
setattr(cls, name, self)
def __str__(self):
model = self.model
app = model._meta.app_label
return '%s.%s.%s' % (app, model._meta.object_name, self.name)
def check(self, **kwargs):
errors = []
errors.extend(self._check_field_name())
errors.extend(self._check_object_id_field())
errors.extend(self._check_content_type_field())
return errors
def _check_field_name(self):
if self.name.endswith("_"):
return [
checks.Error(
'Field names must not end with an underscore.',
hint=None,
obj=self,
id='fields.E001',
)
]
else:
return []
def _check_object_id_field(self):
try:
self.model._meta.get_field(self.fk_field)
except FieldDoesNotExist:
return [
checks.Error(
"The GenericForeignKey object ID references the non-existent field '%s'." % self.fk_field,
hint=None,
obj=self,
id='contenttypes.E001',
)
]
else:
return []
def _check_content_type_field(self):
"""
Check if field named `field_name` in model `model` exists and is a
valid content_type field (is a ForeignKey to ContentType).
"""
try:
field = self.model._meta.get_field(self.ct_field)
except FieldDoesNotExist:
return [
checks.Error(
"The GenericForeignKey content type references the non-existent field '%s.%s'." % (
self.model._meta.object_name, self.ct_field
),
hint=None,
obj=self,
id='contenttypes.E002',
)
]
else:
if not isinstance(field, models.ForeignKey):
return [
checks.Error(
"'%s.%s' is not a ForeignKey." % (
self.model._meta.object_name, self.ct_field
),
hint=(
"GenericForeignKeys must use a ForeignKey to "
"'contenttypes.ContentType' as the 'content_type' field."
),
obj=self,
id='contenttypes.E003',
)
]
elif field.remote_field.model != ContentType:
return [
checks.Error(
"'%s.%s' is not a ForeignKey to 'contenttypes.ContentType'." % (
self.model._meta.object_name, self.ct_field
),
hint=(
"GenericForeignKeys must use a ForeignKey to "
"'contenttypes.ContentType' as the 'content_type' field."
),
obj=self,
id='contenttypes.E004',
)
]
else:
return []
def instance_pre_init(self, signal, sender, args, kwargs, **_kwargs):
"""
Handle initializing an object with the generic FK instead of
content_type and object_id fields.
"""
if self.name in kwargs:
value = kwargs.pop(self.name)
if value is not None:
kwargs[self.ct_field] = self.get_content_type(obj=value)
kwargs[self.fk_field] = value._get_pk_val()
else:
kwargs[self.ct_field] = None
kwargs[self.fk_field] = None
def get_content_type(self, obj=None, id=None, using=None):
if obj is not None:
return ContentType.objects.db_manager(obj._state.db).get_for_model(
obj, for_concrete_model=self.for_concrete_model)
elif id is not None:
return ContentType.objects.db_manager(using).get_for_id(id)
else:
# This should never happen. I love comments like this, don't you?
raise Exception("Impossible arguments to GFK.get_content_type!")
def get_prefetch_queryset(self, instances, queryset=None):
if queryset is not None:
raise ValueError("Custom queryset can't be used for this lookup.")
# For efficiency, group the instances by content type and then do one
# query per model
fk_dict = defaultdict(set)
# We need one instance for each group in order to get the right db:
instance_dict = {}
ct_attname = self.model._meta.get_field(self.ct_field).get_attname()
for instance in instances:
# We avoid looking for values if either ct_id or fkey value is None
ct_id = getattr(instance, ct_attname)
if ct_id is not None:
fk_val = getattr(instance, self.fk_field)
if fk_val is not None:
fk_dict[ct_id].add(fk_val)
instance_dict[ct_id] = instance
ret_val = []
for ct_id, fkeys in fk_dict.items():
instance = instance_dict[ct_id]
ct = self.get_content_type(id=ct_id, using=instance._state.db)
ret_val.extend(ct.get_all_objects_for_this_type(pk__in=fkeys))
# For doing the join in Python, we have to match both the FK val and the
# content type, so we use a callable that returns a (fk, class) pair.
def gfk_key(obj):
ct_id = getattr(obj, ct_attname)
if ct_id is None:
return None
else:
model = self.get_content_type(id=ct_id,
using=obj._state.db).model_class()
return (model._meta.pk.get_prep_value(getattr(obj, self.fk_field)),
model)
return (ret_val,
lambda obj: (obj._get_pk_val(), obj.__class__),
gfk_key,
True,
self.cache_attr)
def is_cached(self, instance):
return hasattr(instance, self.cache_attr)
def __get__(self, instance, instance_type=None):
if instance is None:
return self
try:
return getattr(instance, self.cache_attr)
except AttributeError:
rel_obj = None
# Make sure to use ContentType.objects.get_for_id() to ensure that
# lookups are cached (see ticket #5570). This takes more code than
# the naive ``getattr(instance, self.ct_field)``, but has better
# performance when dealing with GFKs in loops and such.
f = self.model._meta.get_field(self.ct_field)
ct_id = getattr(instance, f.get_attname(), None)
if ct_id is not None:
ct = self.get_content_type(id=ct_id, using=instance._state.db)
try:
rel_obj = ct.get_object_for_this_type(pk=getattr(instance, self.fk_field))
except ObjectDoesNotExist:
pass
setattr(instance, self.cache_attr, rel_obj)
return rel_obj
def __set__(self, instance, value):
ct = None
fk = None
if value is not None:
ct = self.get_content_type(obj=value)
fk = value._get_pk_val()
if not self.allow_unsaved_instance_assignment and fk is None:
raise ValueError(
'Cannot assign "%r": "%s" instance isn\'t saved in the database.' %
(value, value._meta.object_name)
)
setattr(instance, self.ct_field, ct)
setattr(instance, self.fk_field, fk)
setattr(instance, self.cache_attr, value)
class GenericRel(ForeignObjectRel):
"""
Used by GenericRelation to store information about the relation.
"""
def __init__(self, field, to, related_name=None, related_query_name=None, limit_choices_to=None):
super(GenericRel, self).__init__(
field, to,
related_name=related_query_name or '+',
related_query_name=related_query_name,
limit_choices_to=limit_choices_to,
on_delete=DO_NOTHING,
)
class GenericRelation(ForeignObject):
"""
Provide a reverse to a relation created by a GenericForeignKey.
"""
# Field flags
auto_created = False
many_to_many = False
many_to_one = False
one_to_many = True
one_to_one = False
rel_class = GenericRel
def __init__(self, to, object_id_field='object_id', content_type_field='content_type',
for_concrete_model=True, related_query_name=None, limit_choices_to=None, **kwargs):
kwargs['rel'] = self.rel_class(
self, to,
related_query_name=related_query_name,
limit_choices_to=limit_choices_to,
)
kwargs['blank'] = True
kwargs['editable'] = False
kwargs['serialize'] = False
# This construct is somewhat of an abuse of ForeignObject. This field
# represents a relation from pk to object_id field. But, this relation
# isn't direct, the join is generated reverse along foreign key. So,
# the from_field is object_id field, to_field is pk because of the
# reverse join.
super(GenericRelation, self).__init__(
to, from_fields=[object_id_field], to_fields=[], **kwargs)
self.object_id_field_name = object_id_field
self.content_type_field_name = content_type_field
self.for_concrete_model = for_concrete_model
def check(self, **kwargs):
errors = super(GenericRelation, self).check(**kwargs)
errors.extend(self._check_generic_foreign_key_existence())
return errors
def _check_generic_foreign_key_existence(self):
target = self.remote_field.model
if isinstance(target, ModelBase):
fields = target._meta.virtual_fields
if any(isinstance(field, GenericForeignKey) and
field.ct_field == self.content_type_field_name and
field.fk_field == self.object_id_field_name
for field in fields):
return []
else:
return [
checks.Error(
("The GenericRelation defines a relation with the model "
"'%s.%s', but that model does not have a GenericForeignKey.") % (
target._meta.app_label, target._meta.object_name
),
hint=None,
obj=self,
id='contenttypes.E004',
)
]
else:
return []
def resolve_related_fields(self):
self.to_fields = [self.model._meta.pk.name]
return [(self.remote_field.model._meta.get_field(self.object_id_field_name), self.model._meta.pk)]
def get_path_info(self):
opts = self.remote_field.model._meta
target = opts.pk
return [PathInfo(self.model._meta, opts, (target,), self.remote_field, True, False)]
def get_reverse_path_info(self):
opts = self.model._meta
from_opts = self.remote_field.model._meta
return [PathInfo(from_opts, opts, (opts.pk,), self, not self.unique, False)]
def get_choices_default(self):
return super(GenericRelation, self).get_choices(include_blank=False)
def value_to_string(self, obj):
qs = getattr(obj, self.name).all()
return smart_text([instance._get_pk_val() for instance in qs])
def contribute_to_class(self, cls, name, **kwargs):
kwargs['virtual_only'] = True
super(GenericRelation, self).contribute_to_class(cls, name, **kwargs)
self.model = cls
setattr(cls, self.name, ReverseGenericRelatedObjectsDescriptor(self.remote_field))
def set_attributes_from_rel(self):
pass
def get_internal_type(self):
return "ManyToManyField"
def get_content_type(self):
"""
Return the content type associated with this field's model.
"""
return ContentType.objects.get_for_model(self.model,
for_concrete_model=self.for_concrete_model)
def get_extra_restriction(self, where_class, alias, remote_alias):
field = self.remote_field.model._meta.get_field(self.content_type_field_name)
contenttype_pk = self.get_content_type().pk
cond = where_class()
lookup = field.get_lookup('exact')(field.get_col(remote_alias), contenttype_pk)
cond.add(lookup, 'AND')
return cond
def bulk_related_objects(self, objs, using=DEFAULT_DB_ALIAS):
"""
Return all objects related to ``objs`` via this ``GenericRelation``.
"""
return self.remote_field.model._base_manager.db_manager(using).filter(**{
"%s__pk" % self.content_type_field_name: ContentType.objects.db_manager(using).get_for_model(
self.model, for_concrete_model=self.for_concrete_model).pk,
"%s__in" % self.object_id_field_name: [obj.pk for obj in objs]
})
class ReverseGenericRelatedObjectsDescriptor(ForeignRelatedObjectsDescriptor):
"""
Accessor to the related objects manager on the one-to-many relation created
by GenericRelation.
In the example::
class Post(Model):
comments = GenericRelation(Comment)
``post.comments`` is a ReverseGenericRelatedObjectsDescriptor instance.
"""
@cached_property
def related_manager_cls(self):
return create_generic_related_manager(
self.rel.model._default_manager.__class__,
self.rel,
)
def create_generic_related_manager(superclass, rel):
"""
Factory function to create a manager that subclasses another manager
(generally the default manager of a given model) and adds behaviors
specific to generic relations.
"""
class GenericRelatedObjectManager(superclass):
def __init__(self, instance=None):
super(GenericRelatedObjectManager, self).__init__()
self.instance = instance
self.model = rel.model
content_type = ContentType.objects.db_manager(instance._state.db).get_for_model(
instance, for_concrete_model=rel.field.for_concrete_model)
self.content_type = content_type
qn = connection.ops.quote_name
join_cols = rel.field.get_joining_columns(reverse_join=True)[0]
self.source_col_name = qn(join_cols[0])
self.target_col_name = qn(join_cols[1])
self.content_type_field_name = rel.field.content_type_field_name
self.object_id_field_name = rel.field.object_id_field_name
self.prefetch_cache_name = rel.field.attname
self.pk_val = instance._get_pk_val()
self.core_filters = {
'%s__pk' % self.content_type_field_name: content_type.id,
self.object_id_field_name: self.pk_val,
}
def __call__(self, **kwargs):
# We use **kwargs rather than a kwarg argument to enforce the
# `manager='manager_name'` syntax.
manager = getattr(self.model, kwargs.pop('manager'))
manager_class = create_generic_related_manager(manager.__class__, rel)
return manager_class(instance=self.instance)
do_not_call_in_templates = True
def __str__(self):
return repr(self)
def get_queryset(self):
try:
return self.instance._prefetched_objects_cache[self.prefetch_cache_name]
except (AttributeError, KeyError):
db = self._db or router.db_for_read(self.model, instance=self.instance)
return super(GenericRelatedObjectManager, self).get_queryset().using(db).filter(**self.core_filters)
def get_prefetch_queryset(self, instances, queryset=None):
if queryset is None:
queryset = super(GenericRelatedObjectManager, self).get_queryset()
queryset._add_hints(instance=instances[0])
queryset = queryset.using(queryset._db or self._db)
query = {
'%s__pk' % self.content_type_field_name: self.content_type.id,
'%s__in' % self.object_id_field_name: set(obj._get_pk_val() for obj in instances)
}
# We (possibly) need to convert object IDs to the type of the
# instances' PK in order to match up instances:
object_id_converter = instances[0]._meta.pk.to_python
return (queryset.filter(**query),
lambda relobj: object_id_converter(getattr(relobj, self.object_id_field_name)),
lambda obj: obj._get_pk_val(),
False,
self.prefetch_cache_name)
def add(self, *objs):
db = router.db_for_write(self.model, instance=self.instance)
with transaction.atomic(using=db, savepoint=False):
for obj in objs:
if not isinstance(obj, self.model):
raise TypeError("'%s' instance expected" % self.model._meta.object_name)
setattr(obj, self.content_type_field_name, self.content_type)
setattr(obj, self.object_id_field_name, self.pk_val)
obj.save()
add.alters_data = True
def remove(self, *objs, **kwargs):
if not objs:
return
bulk = kwargs.pop('bulk', True)
self._clear(self.filter(pk__in=[o.pk for o in objs]), bulk)
remove.alters_data = True
def clear(self, **kwargs):
bulk = kwargs.pop('bulk', True)
self._clear(self, bulk)
clear.alters_data = True
def _clear(self, queryset, bulk):
db = router.db_for_write(self.model, instance=self.instance)
queryset = queryset.using(db)
if bulk:
# `QuerySet.delete()` creates its own atomic block which
# contains the `pre_delete` and `post_delete` signal handlers.
queryset.delete()
else:
with transaction.atomic(using=db, savepoint=False):
for obj in queryset:
obj.delete()
_clear.alters_data = True
def set(self, objs, **kwargs):
# Force evaluation of `objs` in case it's a queryset whose value
# could be affected by `manager.clear()`. Refs #19816.
objs = tuple(objs)
clear = kwargs.pop('clear', False)
db = router.db_for_write(self.model, instance=self.instance)
with transaction.atomic(using=db, savepoint=False):
if clear:
self.clear()
self.add(*objs)
else:
old_objs = set(self.using(db).all())
new_objs = []
for obj in objs:
if obj in old_objs:
old_objs.remove(obj)
else:
new_objs.append(obj)
self.remove(*old_objs)
self.add(*new_objs)
set.alters_data = True
def create(self, **kwargs):
kwargs[self.content_type_field_name] = self.content_type
kwargs[self.object_id_field_name] = self.pk_val
db = router.db_for_write(self.model, instance=self.instance)
return super(GenericRelatedObjectManager, self).using(db).create(**kwargs)
create.alters_data = True
def get_or_create(self, **kwargs):
kwargs[self.content_type_field_name] = self.content_type
kwargs[self.object_id_field_name] = self.pk_val
db = router.db_for_write(self.model, instance=self.instance)
return super(GenericRelatedObjectManager, self).using(db).get_or_create(**kwargs)
get_or_create.alters_data = True
def update_or_create(self, **kwargs):
kwargs[self.content_type_field_name] = self.content_type
kwargs[self.object_id_field_name] = self.pk_val
db = router.db_for_write(self.model, instance=self.instance)
return super(GenericRelatedObjectManager, self).using(db).update_or_create(**kwargs)
update_or_create.alters_data = True
return GenericRelatedObjectManager
| bsd-3-clause |
kikusu/chainer | cupy/math/hyperbolic.py | 6 | 1025 | from cupy.math import ufunc
sinh = ufunc.create_math_ufunc(
'sinh', 1, 'cupy_sinh',
'''Elementwise hyperbolic sine function.
.. seealso:: :data:`numpy.sinh`
''')
cosh = ufunc.create_math_ufunc(
'cosh', 1, 'cupy_cosh',
'''Elementwise hyperbolic cosine function.
.. seealso:: :data:`numpy.cosh`
''')
tanh = ufunc.create_math_ufunc(
'tanh', 1, 'cupy_tanh',
'''Elementwise hyperbolic tangent function.
.. seealso:: :data:`numpy.tanh`
''')
arcsinh = ufunc.create_math_ufunc(
'asinh', 1, 'cupy_arcsinh',
'''Elementwise inverse of hyperbolic sine function.
.. seealso:: :data:`numpy.arcsinh`
''')
arccosh = ufunc.create_math_ufunc(
'acosh', 1, 'cupy_arccosh',
'''Elementwise inverse of hyperbolic cosine function.
.. seealso:: :data:`numpy.arccosh`
''')
arctanh = ufunc.create_math_ufunc(
'atanh', 1, 'cupy_arctanh',
'''Elementwise inverse of hyperbolic tangent function.
.. seealso:: :data:`numpy.arctanh`
''')
| mit |
Lujeni/ansible | lib/ansible/plugins/cache/yaml.py | 64 | 1905 | # (c) 2017, Brian Coca
# (c) 2017 Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
DOCUMENTATION = '''
cache: yaml
short_description: YAML formatted files.
description:
- This cache uses YAML formatted, per host, files saved to the filesystem.
version_added: "2.3"
author: Brian Coca (@bcoca)
options:
_uri:
required: True
description:
- Path in which the cache plugin will save the files
env:
- name: ANSIBLE_CACHE_PLUGIN_CONNECTION
ini:
- key: fact_caching_connection
section: defaults
_prefix:
description: User defined prefix to use when creating the files
env:
- name: ANSIBLE_CACHE_PLUGIN_PREFIX
ini:
- key: fact_caching_prefix
section: defaults
_timeout:
default: 86400
description: Expiration timeout for the cache plugin data
env:
- name: ANSIBLE_CACHE_PLUGIN_TIMEOUT
ini:
- key: fact_caching_timeout
section: defaults
type: integer
'''
import codecs
import yaml
from ansible.parsing.yaml.loader import AnsibleLoader
from ansible.parsing.yaml.dumper import AnsibleDumper
from ansible.plugins.cache import BaseFileCacheModule
class CacheModule(BaseFileCacheModule):
"""
A caching module backed by yaml files.
"""
def _load(self, filepath):
with codecs.open(filepath, 'r', encoding='utf-8') as f:
return AnsibleLoader(f).get_single_data()
def _dump(self, value, filepath):
with codecs.open(filepath, 'w', encoding='utf-8') as f:
yaml.dump(value, f, Dumper=AnsibleDumper, default_flow_style=False)
| gpl-3.0 |
joone/chromium-crosswalk | tools/perf/metrics/timeline_unittest.py | 7 | 8435 | # Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import unittest
from telemetry.testing import test_page_test_results
from telemetry.timeline import model as model_module
from telemetry.web_perf import timeline_interaction_record as tir_module
from metrics import timeline
def _GetInteractionRecord(start, end):
return tir_module.TimelineInteractionRecord("test-record", start, end)
class LoadTimesTimelineMetric(unittest.TestCase):
def GetResults(self, metric, model, renderer_thread, interaction_records):
results = test_page_test_results.TestPageTestResults(self)
metric.AddResults(model, renderer_thread, interaction_records, results)
return results
def testSanitizing(self):
model = model_module.TimelineModel()
renderer_main = model.GetOrCreateProcess(1).GetOrCreateThread(2)
renderer_main.name = 'CrRendererMain'
# [ X ]
# [ Y ]
renderer_main.BeginSlice('cat1', 'x.y', 10, 0)
renderer_main.EndSlice(20, 20)
model.FinalizeImport()
metric = timeline.LoadTimesTimelineMetric()
results = self.GetResults(
metric, model=model, renderer_thread=renderer_main,
interaction_records=[_GetInteractionRecord(0, float('inf'))])
results.AssertHasPageSpecificScalarValue(
'CrRendererMain|x_y', 'ms', 10)
results.AssertHasPageSpecificScalarValue(
'CrRendererMain|x_y_max', 'ms', 10)
results.AssertHasPageSpecificScalarValue(
'CrRendererMain|x_y_avg', 'ms', 10)
def testTimelineBetweenRange(self):
model = model_module.TimelineModel()
renderer_main = model.GetOrCreateProcess(1).GetOrCreateThread(2)
renderer_main.name = 'CrRendererMain'
# [ X ] [ Z ]
# [ Y ] [ T ]
# [ interaction record ]
renderer_main.BeginSlice('cat1', 'x.y', 10, 0)
renderer_main.EndSlice(20, 20)
renderer_main.BeginSlice('cat1', 'z.t', 30, 0)
renderer_main.EndSlice(35, 35)
model.FinalizeImport()
metric = timeline.LoadTimesTimelineMetric()
results = self.GetResults(
metric, model=model, renderer_thread=renderer_main,
interaction_records=[_GetInteractionRecord(10, 20)])
results.AssertHasPageSpecificScalarValue(
'CrRendererMain|x_y', 'ms', 10)
results.AssertHasPageSpecificScalarValue(
'CrRendererMain|x_y_max', 'ms', 10)
results.AssertHasPageSpecificScalarValue(
'CrRendererMain|x_y_avg', 'ms', 10)
def testCounterSanitizing(self):
model = model_module.TimelineModel()
renderer_main = model.GetOrCreateProcess(1).GetOrCreateThread(2)
renderer_main.name = 'CrRendererMain'
x_counter = renderer_main.parent.GetOrCreateCounter('cat', 'x.y')
x_counter.samples += [1, 2]
x_counter.series_names += ['a']
x_counter.timestamps += [0, 1]
model.FinalizeImport()
metric = timeline.LoadTimesTimelineMetric()
results = self.GetResults(
metric, model=model, renderer_thread=renderer_main,
interaction_records=[_GetInteractionRecord(0, float('inf'))])
results.AssertHasPageSpecificScalarValue(
'cat_x_y', 'count', 3)
results.AssertHasPageSpecificScalarValue(
'cat_x_y_avg', 'count', 1.5)
class ThreadTimesTimelineMetricUnittest(unittest.TestCase):
def GetResults(self, metric, model, renderer_thread, interaction_record):
results = test_page_test_results.TestPageTestResults(self)
metric.AddResults(model, renderer_thread, interaction_record,
results)
return results
def testResults(self):
model = model_module.TimelineModel()
renderer_main = model.GetOrCreateProcess(1).GetOrCreateThread(2)
renderer_main.name = 'CrRendererMain'
metric = timeline.ThreadTimesTimelineMetric()
metric.details_to_report = timeline.ReportMainThreadOnly
results = self.GetResults(metric, model, renderer_main.parent,
[_GetInteractionRecord(1, 2)])
# Test that all result thread categories exist
for name in timeline.TimelineThreadCategories.values():
results.GetPageSpecificValueNamed(
timeline.ThreadCpuTimeResultName(name, 'frame'))
def testBasic(self):
model = model_module.TimelineModel()
renderer_main = model.GetOrCreateProcess(1).GetOrCreateThread(2)
renderer_main.name = 'CrRendererMain'
# Create two frame swaps (Results times should be divided by two) for
# an interaction that lasts 20 milliseconds.
cc_main = model.GetOrCreateProcess(1).GetOrCreateThread(3)
cc_main.name = 'Compositor'
cc_main.BeginSlice('cc_cat', timeline.FrameTraceName, 10, 10)
cc_main.EndSlice(11, 11)
cc_main.BeginSlice('cc_cat', timeline.FrameTraceName, 12, 12)
cc_main.EndSlice(13, 13)
# [ X ] [ Z ]
# [ Y ]
renderer_main.BeginSlice('cat1', 'X', 10, 0)
renderer_main.BeginSlice('cat2', 'Y', 15, 5)
renderer_main.EndSlice(16, 5.5)
renderer_main.EndSlice(30, 19.5)
renderer_main.BeginSlice('cat1', 'Z', 31, 20)
renderer_main.BeginSlice('cat1', 'Z', 33, 21)
model.FinalizeImport()
# Exclude 'Z' using an action-range.
metric = timeline.ThreadTimesTimelineMetric()
metric.details_to_report = timeline.ReportMainThreadOnly
results = self.GetResults(metric, model, renderer_main.parent,
[_GetInteractionRecord(10, 30)])
# Test for the results we expect.
main_thread = "renderer_main"
cc_thread = 'renderer_compositor'
assert_results = [
(timeline.ThreadMeanFrameTimeResultName(cc_thread), 'ms', 10.0),
(timeline.ThreadTasksResultName(main_thread, 'frame'), 'tasks', 0.5),
(timeline.ThreadTasksResultName(main_thread, 'second'), 'tasks', 50.0),
(timeline.ThreadTasksResultName(cc_thread, 'frame'), 'tasks', 1.0),
(timeline.ThreadTasksResultName(cc_thread, 'second'), 'tasks', 100.0),
(timeline.ThreadCpuTimeResultName(main_thread, 'frame'), 'ms', 9.75),
(timeline.ThreadCpuTimeResultName(main_thread, 'second'), '%', 97.5),
(timeline.ThreadCpuTimeResultName(cc_thread, 'frame'), 'ms', 1.0),
(timeline.ThreadCpuTimeResultName(cc_thread, 'second'), '%', 10.0),
(timeline.ThreadDetailResultName(main_thread, 'frame', 'cat1'),
'ms', 9.5),
(timeline.ThreadDetailResultName(main_thread, 'second', 'cat1'),
'%', 95.0),
(timeline.ThreadDetailResultName(main_thread, 'frame', 'cat2'),
'ms', 0.5),
(timeline.ThreadDetailResultName(main_thread, 'second', 'cat2'),
'%', 5.0),
(timeline.ThreadDetailResultName(
main_thread, 'frame', 'idle'), 'ms', 0),
(timeline.ThreadDetailResultName(
main_thread, 'second', 'idle'), '%', 0)
]
for name, unit, value in assert_results:
results.AssertHasPageSpecificScalarValue(name, unit, value)
def testOverheadIsRemoved(self):
model = model_module.TimelineModel()
renderer_main = model.GetOrCreateProcess(1).GetOrCreateThread(2)
renderer_main.name = 'CrRendererMain'
# Create one frame swap.
cc_main = model.GetOrCreateProcess(1).GetOrCreateThread(3)
cc_main.name = 'Compositor'
cc_main.BeginSlice('cc_cat', timeline.FrameTraceName, 10, 10)
cc_main.EndSlice(11, 11)
# [ X ]
# [Overhead]
overhead_category = timeline.OverheadTraceCategory
overhead_name = timeline.OverheadTraceName
renderer_main.BeginSlice('cat1', 'X', 10, 0)
renderer_main.BeginSlice(overhead_category, overhead_name, 15, 5)
renderer_main.EndSlice(16, 6)
renderer_main.EndSlice(30, 10)
model.FinalizeImport()
# Include everything in an action-range.
metric = timeline.ThreadTimesTimelineMetric()
metric.details_to_report = timeline.ReportMainThreadOnly
results = self.GetResults(metric, model, renderer_main.parent,
[_GetInteractionRecord(10, 30)])
# Test a couple specific results.
assert_results = [
(timeline.ThreadCpuTimeResultName(
'renderer_main', 'frame'), 'ms', 9.0),
(timeline.ThreadCpuTimeResultName(
'renderer_main', 'second'), '%', 45.0),
]
for name, unit, value in assert_results:
results.AssertHasPageSpecificScalarValue(name, unit, value)
| bsd-3-clause |
romain-dartigues/ansible | lib/ansible/plugins/callback/splunk.py | 54 | 8017 | # -*- coding: utf-8 -*-
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
DOCUMENTATION = '''
callback: splunk
type: aggregate
short_description: Sends task result events to Splunk HTTP Event Collector
author: "Stuart Hirst <support@convergingdata.com>"
description:
- This callback plugin will send task results as JSON formatted events to a Splunk HTTP collector.
- The companion Splunk Monitoring & Diagnostics App is available here "https://splunkbase.splunk.com/app/4023/"
- Credit to "Ryan Currah (@ryancurrah)" for original source upon which this is based.
version_added: "2.7"
requirements:
- Whitelisting this callback plugin
- 'Create a HTTP Event Collector in Splunk'
- 'Define the url and token in ansible.cfg'
options:
url:
description: URL to the Splunk HTTP collector source
env:
- name: SPLUNK_URL
ini:
- section: callback_splunk
key: url
authtoken:
description: Token to authenticate the connection to the Splunk HTTP collector
env:
- name: SPLUNK_AUTHTOKEN
ini:
- section: callback_splunk
key: authtoken
'''
EXAMPLES = '''
examples: >
To enable, add this to your ansible.cfg file in the defaults block
[defaults]
callback_whitelist = splunk
Set the environment variable
export SPLUNK_URL=http://mysplunkinstance.datapaas.io:8088/services/collector/event
export SPLUNK_AUTHTOKEN=f23blad6-5965-4537-bf69-5b5a545blabla88
Set the ansible.cfg variable in the callback_splunk block
[callback_splunk]
url = http://mysplunkinstance.datapaas.io:8088/services/collector/event
authtoken = f23blad6-5965-4537-bf69-5b5a545blabla88
'''
import json
import uuid
import socket
import getpass
from datetime import datetime
from os.path import basename
from ansible.module_utils.urls import open_url
from ansible.parsing.ajson import AnsibleJSONEncoder
from ansible.plugins.callback import CallbackBase
class SplunkHTTPCollectorSource(object):
def __init__(self):
self.ansible_check_mode = False
self.ansible_playbook = ""
self.ansible_version = ""
self.session = str(uuid.uuid4())
self.host = socket.gethostname()
self.ip_address = socket.gethostbyname(socket.gethostname())
self.user = getpass.getuser()
def send_event(self, url, authtoken, state, result, runtime):
if result._task_fields['args'].get('_ansible_check_mode') is True:
self.ansible_check_mode = True
if result._task_fields['args'].get('_ansible_version'):
self.ansible_version = \
result._task_fields['args'].get('_ansible_version')
if result._task._role:
ansible_role = str(result._task._role)
else:
ansible_role = None
data = {}
data['uuid'] = result._task._uuid
data['session'] = self.session
data['status'] = state
data['timestamp'] = datetime.utcnow().strftime('%Y-%m-%d %H:%M:%S '
'+0000')
data['host'] = self.host
data['ip_address'] = self.ip_address
data['user'] = self.user
data['runtime'] = runtime
data['ansible_version'] = self.ansible_version
data['ansible_check_mode'] = self.ansible_check_mode
data['ansible_host'] = result._host.name
data['ansible_playbook'] = self.ansible_playbook
data['ansible_role'] = ansible_role
data['ansible_task'] = result._task_fields
data['ansible_result'] = result._result
# This wraps the json payload in and outer json event needed by Splunk
jsondata = json.dumps(data, cls=AnsibleJSONEncoder, sort_keys=True)
jsondata = '{"event":' + jsondata + "}"
open_url(
url,
jsondata,
headers={
'Content-type': 'application/json',
'Authorization': 'Splunk ' + authtoken
},
method='POST'
)
class CallbackModule(CallbackBase):
CALLBACK_VERSION = 2.0
CALLBACK_TYPE = 'aggregate'
CALLBACK_NAME = 'splunk'
CALLBACK_NEEDS_WHITELIST = True
def __init__(self, display=None):
super(CallbackModule, self).__init__(display=display)
self.start_datetimes = {} # Collect task start times
self.url = None
self.authtoken = None
self.splunk = SplunkHTTPCollectorSource()
def _runtime(self, result):
return (
datetime.utcnow() -
self.start_datetimes[result._task._uuid]
).total_seconds()
def set_options(self, task_keys=None, var_options=None, direct=None):
super(CallbackModule, self).set_options(task_keys=task_keys, var_options=var_options, direct=direct)
self.url = self.get_option('url')
if self.url is None:
self.disabled = True
self._display.warning('Splunk HTTP collector source URL was '
'not provided. The Splunk HTTP collector '
'source URL can be provided using the '
'`SPLUNK_URL` environment variable or '
'in the ansible.cfg file.')
self.authtoken = self.get_option('authtoken')
if self.authtoken is None:
self.disabled = True
self._display.warning('Splunk HTTP collector requires an authentication'
'token. The Splunk HTTP collector '
'authentication token can be provided using the '
'`SPLUNK_AUTHTOKEN` environment variable or '
'in the ansible.cfg file.')
def v2_playbook_on_start(self, playbook):
self.splunk.ansible_playbook = basename(playbook._file_name)
def v2_playbook_on_task_start(self, task, is_conditional):
self.start_datetimes[task._uuid] = datetime.utcnow()
def v2_playbook_on_handler_task_start(self, task):
self.start_datetimes[task._uuid] = datetime.utcnow()
def v2_runner_on_ok(self, result, **kwargs):
self.splunk.send_event(
self.url,
self.authtoken,
'OK',
result,
self._runtime(result)
)
def v2_runner_on_skipped(self, result, **kwargs):
self.splunk.send_event(
self.url,
self.authtoken,
'SKIPPED',
result,
self._runtime(result)
)
def v2_runner_on_failed(self, result, **kwargs):
self.splunk.send_event(
self.url,
self.authtoken,
'FAILED',
result,
self._runtime(result)
)
def runner_on_async_failed(self, result, **kwargs):
self.splunk.send_event(
self.url,
self.authtoken,
'FAILED',
result,
self._runtime(result)
)
def v2_runner_on_unreachable(self, result, **kwargs):
self.splunk.send_event(
self.url,
self.authtoken,
'UNREACHABLE',
result,
self._runtime(result)
)
| gpl-3.0 |
wangjun/scrapy | tests/test_downloadermiddleware_stats.py | 101 | 1596 | from unittest import TestCase
from scrapy.downloadermiddlewares.stats import DownloaderStats
from scrapy.http import Request, Response
from scrapy.spiders import Spider
from scrapy.utils.test import get_crawler
class MyException(Exception):
pass
class TestDownloaderStats(TestCase):
def setUp(self):
self.crawler = get_crawler(Spider)
self.spider = self.crawler._create_spider('scrapytest.org')
self.mw = DownloaderStats(self.crawler.stats)
self.crawler.stats.open_spider(self.spider)
self.req = Request('http://scrapytest.org')
self.res = Response('scrapytest.org', status=400)
def assertStatsEqual(self, key, value):
self.assertEqual(
self.crawler.stats.get_value(key, spider=self.spider),
value,
str(self.crawler.stats.get_stats(self.spider))
)
def test_process_request(self):
self.mw.process_request(self.req, self.spider)
self.assertStatsEqual('downloader/request_count', 1)
def test_process_response(self):
self.mw.process_response(self.req, self.res, self.spider)
self.assertStatsEqual('downloader/response_count', 1)
def test_process_exception(self):
self.mw.process_exception(self.req, MyException(), self.spider)
self.assertStatsEqual('downloader/exception_count', 1)
self.assertStatsEqual(
'downloader/exception_type_count/tests.test_downloadermiddleware_stats.MyException',
1
)
def tearDown(self):
self.crawler.stats.close_spider(self.spider, '')
| bsd-3-clause |
takis/odoo | addons/website_forum_doc/__openerp__.py | 322 | 1508 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2014-Today OpenERP SA (<http://www.openerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Documentation',
'category': 'Website',
'summary': 'Forum, Documentation',
'version': '1.0',
'description': """
Documentation based on question and pertinent answers of Forum
""",
'author': 'OpenERP SA',
'depends': [
'website_forum'
],
'data': [
'data/doc_data.xml',
'security/ir.model.access.csv',
'views/doc.xml',
'views/website_doc.xml',
],
'demo': [
'data/doc_demo.xml',
],
'installable': True,
}
| agpl-3.0 |
neumerance/deploy | .venv/lib/python2.7/site-packages/django/contrib/gis/geoip/tests.py | 102 | 4766 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import os
from django.conf import settings
from django.contrib.gis.geos import GEOSGeometry
from django.contrib.gis.geoip import GeoIP, GeoIPException
from django.utils import unittest
from django.utils import six
# Note: Requires use of both the GeoIP country and city datasets.
# The GEOIP_DATA path should be the only setting set (the directory
# should contain links or the actual database files 'GeoIP.dat' and
# 'GeoLiteCity.dat'.
class GeoIPTest(unittest.TestCase):
def test01_init(self):
"Testing GeoIP initialization."
g1 = GeoIP() # Everything inferred from GeoIP path
path = settings.GEOIP_PATH
g2 = GeoIP(path, 0) # Passing in data path explicitly.
g3 = GeoIP.open(path, 0) # MaxMind Python API syntax.
for g in (g1, g2, g3):
self.assertEqual(True, bool(g._country))
self.assertEqual(True, bool(g._city))
# Only passing in the location of one database.
city = os.path.join(path, 'GeoLiteCity.dat')
cntry = os.path.join(path, 'GeoIP.dat')
g4 = GeoIP(city, country='')
self.assertEqual(None, g4._country)
g5 = GeoIP(cntry, city='')
self.assertEqual(None, g5._city)
# Improper parameters.
bad_params = (23, 'foo', 15.23)
for bad in bad_params:
self.assertRaises(GeoIPException, GeoIP, cache=bad)
if isinstance(bad, six.string_types):
e = GeoIPException
else:
e = TypeError
self.assertRaises(e, GeoIP, bad, 0)
def test02_bad_query(self):
"Testing GeoIP query parameter checking."
cntry_g = GeoIP(city='<foo>')
# No city database available, these calls should fail.
self.assertRaises(GeoIPException, cntry_g.city, 'google.com')
self.assertRaises(GeoIPException, cntry_g.coords, 'yahoo.com')
# Non-string query should raise TypeError
self.assertRaises(TypeError, cntry_g.country_code, 17)
self.assertRaises(TypeError, cntry_g.country_name, GeoIP)
def test03_country(self):
"Testing GeoIP country querying methods."
g = GeoIP(city='<foo>')
fqdn = 'www.google.com'
addr = '12.215.42.19'
for query in (fqdn, addr):
for func in (g.country_code, g.country_code_by_addr, g.country_code_by_name):
self.assertEqual('US', func(query))
for func in (g.country_name, g.country_name_by_addr, g.country_name_by_name):
self.assertEqual('United States', func(query))
self.assertEqual({'country_code' : 'US', 'country_name' : 'United States'},
g.country(query))
def test04_city(self):
"Testing GeoIP city querying methods."
g = GeoIP(country='<foo>')
addr = '128.249.1.1'
fqdn = 'tmc.edu'
for query in (fqdn, addr):
# Country queries should still work.
for func in (g.country_code, g.country_code_by_addr, g.country_code_by_name):
self.assertEqual('US', func(query))
for func in (g.country_name, g.country_name_by_addr, g.country_name_by_name):
self.assertEqual('United States', func(query))
self.assertEqual({'country_code' : 'US', 'country_name' : 'United States'},
g.country(query))
# City information dictionary.
d = g.city(query)
self.assertEqual('USA', d['country_code3'])
self.assertEqual('Houston', d['city'])
self.assertEqual('TX', d['region'])
self.assertEqual(713, d['area_code'])
geom = g.geos(query)
self.assertTrue(isinstance(geom, GEOSGeometry))
lon, lat = (-95.4010, 29.7079)
lat_lon = g.lat_lon(query)
lat_lon = (lat_lon[1], lat_lon[0])
for tup in (geom.tuple, g.coords(query), g.lon_lat(query), lat_lon):
self.assertAlmostEqual(lon, tup[0], 4)
self.assertAlmostEqual(lat, tup[1], 4)
def test05_unicode_response(self):
"Testing that GeoIP strings are properly encoded, see #16553."
g = GeoIP()
d = g.city('62.224.93.23')
self.assertEqual('Schümberg', d['city'])
def test06_unicode_query(self):
"Testing that GeoIP accepts unicode string queries, see #17059."
g = GeoIP()
d = g.country('whitehouse.gov')
self.assertEqual('US', d['country_code'])
def suite():
s = unittest.TestSuite()
s.addTest(unittest.makeSuite(GeoIPTest))
return s
def run(verbosity=1):
unittest.TextTestRunner(verbosity=verbosity).run(suite())
| apache-2.0 |
marcoantoniooliveira/labweb | oscar/lib/python2.7/site-packages/pip/_vendor/html5lib/inputstream.py | 168 | 30602 | from __future__ import absolute_import, division, unicode_literals
from pip._vendor.six import text_type
import codecs
import re
from .constants import EOF, spaceCharacters, asciiLetters, asciiUppercase
from .constants import encodings, ReparseException
from . import utils
from io import StringIO
try:
from io import BytesIO
except ImportError:
BytesIO = StringIO
try:
from io import BufferedIOBase
except ImportError:
class BufferedIOBase(object):
pass
# Non-unicode versions of constants for use in the pre-parser
spaceCharactersBytes = frozenset([item.encode("ascii") for item in spaceCharacters])
asciiLettersBytes = frozenset([item.encode("ascii") for item in asciiLetters])
asciiUppercaseBytes = frozenset([item.encode("ascii") for item in asciiUppercase])
spacesAngleBrackets = spaceCharactersBytes | frozenset([b">", b"<"])
invalid_unicode_re = re.compile("[\u0001-\u0008\u000B\u000E-\u001F\u007F-\u009F\uD800-\uDFFF\uFDD0-\uFDEF\uFFFE\uFFFF\U0001FFFE\U0001FFFF\U0002FFFE\U0002FFFF\U0003FFFE\U0003FFFF\U0004FFFE\U0004FFFF\U0005FFFE\U0005FFFF\U0006FFFE\U0006FFFF\U0007FFFE\U0007FFFF\U0008FFFE\U0008FFFF\U0009FFFE\U0009FFFF\U000AFFFE\U000AFFFF\U000BFFFE\U000BFFFF\U000CFFFE\U000CFFFF\U000DFFFE\U000DFFFF\U000EFFFE\U000EFFFF\U000FFFFE\U000FFFFF\U0010FFFE\U0010FFFF]")
non_bmp_invalid_codepoints = set([0x1FFFE, 0x1FFFF, 0x2FFFE, 0x2FFFF, 0x3FFFE,
0x3FFFF, 0x4FFFE, 0x4FFFF, 0x5FFFE, 0x5FFFF,
0x6FFFE, 0x6FFFF, 0x7FFFE, 0x7FFFF, 0x8FFFE,
0x8FFFF, 0x9FFFE, 0x9FFFF, 0xAFFFE, 0xAFFFF,
0xBFFFE, 0xBFFFF, 0xCFFFE, 0xCFFFF, 0xDFFFE,
0xDFFFF, 0xEFFFE, 0xEFFFF, 0xFFFFE, 0xFFFFF,
0x10FFFE, 0x10FFFF])
ascii_punctuation_re = re.compile("[\u0009-\u000D\u0020-\u002F\u003A-\u0040\u005B-\u0060\u007B-\u007E]")
# Cache for charsUntil()
charsUntilRegEx = {}
class BufferedStream:
"""Buffering for streams that do not have buffering of their own
The buffer is implemented as a list of chunks on the assumption that
joining many strings will be slow since it is O(n**2)
"""
def __init__(self, stream):
self.stream = stream
self.buffer = []
self.position = [-1, 0] # chunk number, offset
def tell(self):
pos = 0
for chunk in self.buffer[:self.position[0]]:
pos += len(chunk)
pos += self.position[1]
return pos
def seek(self, pos):
assert pos < self._bufferedBytes()
offset = pos
i = 0
while len(self.buffer[i]) < offset:
offset -= pos
i += 1
self.position = [i, offset]
def read(self, bytes):
if not self.buffer:
return self._readStream(bytes)
elif (self.position[0] == len(self.buffer) and
self.position[1] == len(self.buffer[-1])):
return self._readStream(bytes)
else:
return self._readFromBuffer(bytes)
def _bufferedBytes(self):
return sum([len(item) for item in self.buffer])
def _readStream(self, bytes):
data = self.stream.read(bytes)
self.buffer.append(data)
self.position[0] += 1
self.position[1] = len(data)
return data
def _readFromBuffer(self, bytes):
remainingBytes = bytes
rv = []
bufferIndex = self.position[0]
bufferOffset = self.position[1]
while bufferIndex < len(self.buffer) and remainingBytes != 0:
assert remainingBytes > 0
bufferedData = self.buffer[bufferIndex]
if remainingBytes <= len(bufferedData) - bufferOffset:
bytesToRead = remainingBytes
self.position = [bufferIndex, bufferOffset + bytesToRead]
else:
bytesToRead = len(bufferedData) - bufferOffset
self.position = [bufferIndex, len(bufferedData)]
bufferIndex += 1
rv.append(bufferedData[bufferOffset:bufferOffset + bytesToRead])
remainingBytes -= bytesToRead
bufferOffset = 0
if remainingBytes:
rv.append(self._readStream(remainingBytes))
return "".join(rv)
def HTMLInputStream(source, encoding=None, parseMeta=True, chardet=True):
if hasattr(source, "read"):
isUnicode = isinstance(source.read(0), text_type)
else:
isUnicode = isinstance(source, text_type)
if isUnicode:
if encoding is not None:
raise TypeError("Cannot explicitly set an encoding with a unicode string")
return HTMLUnicodeInputStream(source)
else:
return HTMLBinaryInputStream(source, encoding, parseMeta, chardet)
class HTMLUnicodeInputStream:
"""Provides a unicode stream of characters to the HTMLTokenizer.
This class takes care of character encoding and removing or replacing
incorrect byte-sequences and also provides column and line tracking.
"""
_defaultChunkSize = 10240
def __init__(self, source):
"""Initialises the HTMLInputStream.
HTMLInputStream(source, [encoding]) -> Normalized stream from source
for use by html5lib.
source can be either a file-object, local filename or a string.
The optional encoding parameter must be a string that indicates
the encoding. If specified, that encoding will be used,
regardless of any BOM or later declaration (such as in a meta
element)
parseMeta - Look for a <meta> element containing encoding information
"""
# Craziness
if len("\U0010FFFF") == 1:
self.reportCharacterErrors = self.characterErrorsUCS4
self.replaceCharactersRegexp = re.compile("[\uD800-\uDFFF]")
else:
self.reportCharacterErrors = self.characterErrorsUCS2
self.replaceCharactersRegexp = re.compile("([\uD800-\uDBFF](?![\uDC00-\uDFFF])|(?<![\uD800-\uDBFF])[\uDC00-\uDFFF])")
# List of where new lines occur
self.newLines = [0]
self.charEncoding = ("utf-8", "certain")
self.dataStream = self.openStream(source)
self.reset()
def reset(self):
self.chunk = ""
self.chunkSize = 0
self.chunkOffset = 0
self.errors = []
# number of (complete) lines in previous chunks
self.prevNumLines = 0
# number of columns in the last line of the previous chunk
self.prevNumCols = 0
# Deal with CR LF and surrogates split over chunk boundaries
self._bufferedCharacter = None
def openStream(self, source):
"""Produces a file object from source.
source can be either a file object, local filename or a string.
"""
# Already a file object
if hasattr(source, 'read'):
stream = source
else:
stream = StringIO(source)
return stream
def _position(self, offset):
chunk = self.chunk
nLines = chunk.count('\n', 0, offset)
positionLine = self.prevNumLines + nLines
lastLinePos = chunk.rfind('\n', 0, offset)
if lastLinePos == -1:
positionColumn = self.prevNumCols + offset
else:
positionColumn = offset - (lastLinePos + 1)
return (positionLine, positionColumn)
def position(self):
"""Returns (line, col) of the current position in the stream."""
line, col = self._position(self.chunkOffset)
return (line + 1, col)
def char(self):
""" Read one character from the stream or queue if available. Return
EOF when EOF is reached.
"""
# Read a new chunk from the input stream if necessary
if self.chunkOffset >= self.chunkSize:
if not self.readChunk():
return EOF
chunkOffset = self.chunkOffset
char = self.chunk[chunkOffset]
self.chunkOffset = chunkOffset + 1
return char
def readChunk(self, chunkSize=None):
if chunkSize is None:
chunkSize = self._defaultChunkSize
self.prevNumLines, self.prevNumCols = self._position(self.chunkSize)
self.chunk = ""
self.chunkSize = 0
self.chunkOffset = 0
data = self.dataStream.read(chunkSize)
# Deal with CR LF and surrogates broken across chunks
if self._bufferedCharacter:
data = self._bufferedCharacter + data
self._bufferedCharacter = None
elif not data:
# We have no more data, bye-bye stream
return False
if len(data) > 1:
lastv = ord(data[-1])
if lastv == 0x0D or 0xD800 <= lastv <= 0xDBFF:
self._bufferedCharacter = data[-1]
data = data[:-1]
self.reportCharacterErrors(data)
# Replace invalid characters
# Note U+0000 is dealt with in the tokenizer
data = self.replaceCharactersRegexp.sub("\ufffd", data)
data = data.replace("\r\n", "\n")
data = data.replace("\r", "\n")
self.chunk = data
self.chunkSize = len(data)
return True
def characterErrorsUCS4(self, data):
for i in range(len(invalid_unicode_re.findall(data))):
self.errors.append("invalid-codepoint")
def characterErrorsUCS2(self, data):
# Someone picked the wrong compile option
# You lose
skip = False
for match in invalid_unicode_re.finditer(data):
if skip:
continue
codepoint = ord(match.group())
pos = match.start()
# Pretty sure there should be endianness issues here
if utils.isSurrogatePair(data[pos:pos + 2]):
# We have a surrogate pair!
char_val = utils.surrogatePairToCodepoint(data[pos:pos + 2])
if char_val in non_bmp_invalid_codepoints:
self.errors.append("invalid-codepoint")
skip = True
elif (codepoint >= 0xD800 and codepoint <= 0xDFFF and
pos == len(data) - 1):
self.errors.append("invalid-codepoint")
else:
skip = False
self.errors.append("invalid-codepoint")
def charsUntil(self, characters, opposite=False):
""" Returns a string of characters from the stream up to but not
including any character in 'characters' or EOF. 'characters' must be
a container that supports the 'in' method and iteration over its
characters.
"""
# Use a cache of regexps to find the required characters
try:
chars = charsUntilRegEx[(characters, opposite)]
except KeyError:
if __debug__:
for c in characters:
assert(ord(c) < 128)
regex = "".join(["\\x%02x" % ord(c) for c in characters])
if not opposite:
regex = "^%s" % regex
chars = charsUntilRegEx[(characters, opposite)] = re.compile("[%s]+" % regex)
rv = []
while True:
# Find the longest matching prefix
m = chars.match(self.chunk, self.chunkOffset)
if m is None:
# If nothing matched, and it wasn't because we ran out of chunk,
# then stop
if self.chunkOffset != self.chunkSize:
break
else:
end = m.end()
# If not the whole chunk matched, return everything
# up to the part that didn't match
if end != self.chunkSize:
rv.append(self.chunk[self.chunkOffset:end])
self.chunkOffset = end
break
# If the whole remainder of the chunk matched,
# use it all and read the next chunk
rv.append(self.chunk[self.chunkOffset:])
if not self.readChunk():
# Reached EOF
break
r = "".join(rv)
return r
def unget(self, char):
# Only one character is allowed to be ungotten at once - it must
# be consumed again before any further call to unget
if char is not None:
if self.chunkOffset == 0:
# unget is called quite rarely, so it's a good idea to do
# more work here if it saves a bit of work in the frequently
# called char and charsUntil.
# So, just prepend the ungotten character onto the current
# chunk:
self.chunk = char + self.chunk
self.chunkSize += 1
else:
self.chunkOffset -= 1
assert self.chunk[self.chunkOffset] == char
class HTMLBinaryInputStream(HTMLUnicodeInputStream):
"""Provides a unicode stream of characters to the HTMLTokenizer.
This class takes care of character encoding and removing or replacing
incorrect byte-sequences and also provides column and line tracking.
"""
def __init__(self, source, encoding=None, parseMeta=True, chardet=True):
"""Initialises the HTMLInputStream.
HTMLInputStream(source, [encoding]) -> Normalized stream from source
for use by html5lib.
source can be either a file-object, local filename or a string.
The optional encoding parameter must be a string that indicates
the encoding. If specified, that encoding will be used,
regardless of any BOM or later declaration (such as in a meta
element)
parseMeta - Look for a <meta> element containing encoding information
"""
# Raw Stream - for unicode objects this will encode to utf-8 and set
# self.charEncoding as appropriate
self.rawStream = self.openStream(source)
HTMLUnicodeInputStream.__init__(self, self.rawStream)
self.charEncoding = (codecName(encoding), "certain")
# Encoding Information
# Number of bytes to use when looking for a meta element with
# encoding information
self.numBytesMeta = 512
# Number of bytes to use when using detecting encoding using chardet
self.numBytesChardet = 100
# Encoding to use if no other information can be found
self.defaultEncoding = "windows-1252"
# Detect encoding iff no explicit "transport level" encoding is supplied
if (self.charEncoding[0] is None):
self.charEncoding = self.detectEncoding(parseMeta, chardet)
# Call superclass
self.reset()
def reset(self):
self.dataStream = codecs.getreader(self.charEncoding[0])(self.rawStream,
'replace')
HTMLUnicodeInputStream.reset(self)
def openStream(self, source):
"""Produces a file object from source.
source can be either a file object, local filename or a string.
"""
# Already a file object
if hasattr(source, 'read'):
stream = source
else:
stream = BytesIO(source)
try:
stream.seek(stream.tell())
except:
stream = BufferedStream(stream)
return stream
def detectEncoding(self, parseMeta=True, chardet=True):
# First look for a BOM
# This will also read past the BOM if present
encoding = self.detectBOM()
confidence = "certain"
# If there is no BOM need to look for meta elements with encoding
# information
if encoding is None and parseMeta:
encoding = self.detectEncodingMeta()
confidence = "tentative"
# Guess with chardet, if avaliable
if encoding is None and chardet:
confidence = "tentative"
try:
try:
from charade.universaldetector import UniversalDetector
except ImportError:
from chardet.universaldetector import UniversalDetector
buffers = []
detector = UniversalDetector()
while not detector.done:
buffer = self.rawStream.read(self.numBytesChardet)
assert isinstance(buffer, bytes)
if not buffer:
break
buffers.append(buffer)
detector.feed(buffer)
detector.close()
encoding = detector.result['encoding']
self.rawStream.seek(0)
except ImportError:
pass
# If all else fails use the default encoding
if encoding is None:
confidence = "tentative"
encoding = self.defaultEncoding
# Substitute for equivalent encodings:
encodingSub = {"iso-8859-1": "windows-1252"}
if encoding.lower() in encodingSub:
encoding = encodingSub[encoding.lower()]
return encoding, confidence
def changeEncoding(self, newEncoding):
assert self.charEncoding[1] != "certain"
newEncoding = codecName(newEncoding)
if newEncoding in ("utf-16", "utf-16-be", "utf-16-le"):
newEncoding = "utf-8"
if newEncoding is None:
return
elif newEncoding == self.charEncoding[0]:
self.charEncoding = (self.charEncoding[0], "certain")
else:
self.rawStream.seek(0)
self.reset()
self.charEncoding = (newEncoding, "certain")
raise ReparseException("Encoding changed from %s to %s" % (self.charEncoding[0], newEncoding))
def detectBOM(self):
"""Attempts to detect at BOM at the start of the stream. If
an encoding can be determined from the BOM return the name of the
encoding otherwise return None"""
bomDict = {
codecs.BOM_UTF8: 'utf-8',
codecs.BOM_UTF16_LE: 'utf-16-le', codecs.BOM_UTF16_BE: 'utf-16-be',
codecs.BOM_UTF32_LE: 'utf-32-le', codecs.BOM_UTF32_BE: 'utf-32-be'
}
# Go to beginning of file and read in 4 bytes
string = self.rawStream.read(4)
assert isinstance(string, bytes)
# Try detecting the BOM using bytes from the string
encoding = bomDict.get(string[:3]) # UTF-8
seek = 3
if not encoding:
# Need to detect UTF-32 before UTF-16
encoding = bomDict.get(string) # UTF-32
seek = 4
if not encoding:
encoding = bomDict.get(string[:2]) # UTF-16
seek = 2
# Set the read position past the BOM if one was found, otherwise
# set it to the start of the stream
self.rawStream.seek(encoding and seek or 0)
return encoding
def detectEncodingMeta(self):
"""Report the encoding declared by the meta element
"""
buffer = self.rawStream.read(self.numBytesMeta)
assert isinstance(buffer, bytes)
parser = EncodingParser(buffer)
self.rawStream.seek(0)
encoding = parser.getEncoding()
if encoding in ("utf-16", "utf-16-be", "utf-16-le"):
encoding = "utf-8"
return encoding
class EncodingBytes(bytes):
"""String-like object with an associated position and various extra methods
If the position is ever greater than the string length then an exception is
raised"""
def __new__(self, value):
assert isinstance(value, bytes)
return bytes.__new__(self, value.lower())
def __init__(self, value):
self._position = -1
def __iter__(self):
return self
def __next__(self):
p = self._position = self._position + 1
if p >= len(self):
raise StopIteration
elif p < 0:
raise TypeError
return self[p:p + 1]
def next(self):
# Py2 compat
return self.__next__()
def previous(self):
p = self._position
if p >= len(self):
raise StopIteration
elif p < 0:
raise TypeError
self._position = p = p - 1
return self[p:p + 1]
def setPosition(self, position):
if self._position >= len(self):
raise StopIteration
self._position = position
def getPosition(self):
if self._position >= len(self):
raise StopIteration
if self._position >= 0:
return self._position
else:
return None
position = property(getPosition, setPosition)
def getCurrentByte(self):
return self[self.position:self.position + 1]
currentByte = property(getCurrentByte)
def skip(self, chars=spaceCharactersBytes):
"""Skip past a list of characters"""
p = self.position # use property for the error-checking
while p < len(self):
c = self[p:p + 1]
if c not in chars:
self._position = p
return c
p += 1
self._position = p
return None
def skipUntil(self, chars):
p = self.position
while p < len(self):
c = self[p:p + 1]
if c in chars:
self._position = p
return c
p += 1
self._position = p
return None
def matchBytes(self, bytes):
"""Look for a sequence of bytes at the start of a string. If the bytes
are found return True and advance the position to the byte after the
match. Otherwise return False and leave the position alone"""
p = self.position
data = self[p:p + len(bytes)]
rv = data.startswith(bytes)
if rv:
self.position += len(bytes)
return rv
def jumpTo(self, bytes):
"""Look for the next sequence of bytes matching a given sequence. If
a match is found advance the position to the last byte of the match"""
newPosition = self[self.position:].find(bytes)
if newPosition > -1:
# XXX: This is ugly, but I can't see a nicer way to fix this.
if self._position == -1:
self._position = 0
self._position += (newPosition + len(bytes) - 1)
return True
else:
raise StopIteration
class EncodingParser(object):
"""Mini parser for detecting character encoding from meta elements"""
def __init__(self, data):
"""string - the data to work on for encoding detection"""
self.data = EncodingBytes(data)
self.encoding = None
def getEncoding(self):
methodDispatch = (
(b"<!--", self.handleComment),
(b"<meta", self.handleMeta),
(b"</", self.handlePossibleEndTag),
(b"<!", self.handleOther),
(b"<?", self.handleOther),
(b"<", self.handlePossibleStartTag))
for byte in self.data:
keepParsing = True
for key, method in methodDispatch:
if self.data.matchBytes(key):
try:
keepParsing = method()
break
except StopIteration:
keepParsing = False
break
if not keepParsing:
break
return self.encoding
def handleComment(self):
"""Skip over comments"""
return self.data.jumpTo(b"-->")
def handleMeta(self):
if self.data.currentByte not in spaceCharactersBytes:
# if we have <meta not followed by a space so just keep going
return True
# We have a valid meta element we want to search for attributes
hasPragma = False
pendingEncoding = None
while True:
# Try to find the next attribute after the current position
attr = self.getAttribute()
if attr is None:
return True
else:
if attr[0] == b"http-equiv":
hasPragma = attr[1] == b"content-type"
if hasPragma and pendingEncoding is not None:
self.encoding = pendingEncoding
return False
elif attr[0] == b"charset":
tentativeEncoding = attr[1]
codec = codecName(tentativeEncoding)
if codec is not None:
self.encoding = codec
return False
elif attr[0] == b"content":
contentParser = ContentAttrParser(EncodingBytes(attr[1]))
tentativeEncoding = contentParser.parse()
if tentativeEncoding is not None:
codec = codecName(tentativeEncoding)
if codec is not None:
if hasPragma:
self.encoding = codec
return False
else:
pendingEncoding = codec
def handlePossibleStartTag(self):
return self.handlePossibleTag(False)
def handlePossibleEndTag(self):
next(self.data)
return self.handlePossibleTag(True)
def handlePossibleTag(self, endTag):
data = self.data
if data.currentByte not in asciiLettersBytes:
# If the next byte is not an ascii letter either ignore this
# fragment (possible start tag case) or treat it according to
# handleOther
if endTag:
data.previous()
self.handleOther()
return True
c = data.skipUntil(spacesAngleBrackets)
if c == b"<":
# return to the first step in the overall "two step" algorithm
# reprocessing the < byte
data.previous()
else:
# Read all attributes
attr = self.getAttribute()
while attr is not None:
attr = self.getAttribute()
return True
def handleOther(self):
return self.data.jumpTo(b">")
def getAttribute(self):
"""Return a name,value pair for the next attribute in the stream,
if one is found, or None"""
data = self.data
# Step 1 (skip chars)
c = data.skip(spaceCharactersBytes | frozenset([b"/"]))
assert c is None or len(c) == 1
# Step 2
if c in (b">", None):
return None
# Step 3
attrName = []
attrValue = []
# Step 4 attribute name
while True:
if c == b"=" and attrName:
break
elif c in spaceCharactersBytes:
# Step 6!
c = data.skip()
break
elif c in (b"/", b">"):
return b"".join(attrName), b""
elif c in asciiUppercaseBytes:
attrName.append(c.lower())
elif c is None:
return None
else:
attrName.append(c)
# Step 5
c = next(data)
# Step 7
if c != b"=":
data.previous()
return b"".join(attrName), b""
# Step 8
next(data)
# Step 9
c = data.skip()
# Step 10
if c in (b"'", b'"'):
# 10.1
quoteChar = c
while True:
# 10.2
c = next(data)
# 10.3
if c == quoteChar:
next(data)
return b"".join(attrName), b"".join(attrValue)
# 10.4
elif c in asciiUppercaseBytes:
attrValue.append(c.lower())
# 10.5
else:
attrValue.append(c)
elif c == b">":
return b"".join(attrName), b""
elif c in asciiUppercaseBytes:
attrValue.append(c.lower())
elif c is None:
return None
else:
attrValue.append(c)
# Step 11
while True:
c = next(data)
if c in spacesAngleBrackets:
return b"".join(attrName), b"".join(attrValue)
elif c in asciiUppercaseBytes:
attrValue.append(c.lower())
elif c is None:
return None
else:
attrValue.append(c)
class ContentAttrParser(object):
def __init__(self, data):
assert isinstance(data, bytes)
self.data = data
def parse(self):
try:
# Check if the attr name is charset
# otherwise return
self.data.jumpTo(b"charset")
self.data.position += 1
self.data.skip()
if not self.data.currentByte == b"=":
# If there is no = sign keep looking for attrs
return None
self.data.position += 1
self.data.skip()
# Look for an encoding between matching quote marks
if self.data.currentByte in (b'"', b"'"):
quoteMark = self.data.currentByte
self.data.position += 1
oldPosition = self.data.position
if self.data.jumpTo(quoteMark):
return self.data[oldPosition:self.data.position]
else:
return None
else:
# Unquoted value
oldPosition = self.data.position
try:
self.data.skipUntil(spaceCharactersBytes)
return self.data[oldPosition:self.data.position]
except StopIteration:
# Return the whole remaining value
return self.data[oldPosition:]
except StopIteration:
return None
def codecName(encoding):
"""Return the python codec name corresponding to an encoding or None if the
string doesn't correspond to a valid encoding."""
if isinstance(encoding, bytes):
try:
encoding = encoding.decode("ascii")
except UnicodeDecodeError:
return None
if encoding:
canonicalName = ascii_punctuation_re.sub("", encoding).lower()
return encodings.get(canonicalName, None)
else:
return None
| bsd-3-clause |
google/crmint | backends/core/workers.py | 1 | 56339 | # Copyright 2019 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module with CRMintApp worker classes."""
from datetime import datetime
from datetime import timedelta
from fnmatch import fnmatch
from functools import wraps
import json
import os
from random import random
import time
import urllib
from urllib2 import HTTPError
import uuid
import yaml
from apiclient.discovery import build
from apiclient.errors import HttpError
from apiclient.http import MediaIoBaseUpload
import cloudstorage as gcs
from google.cloud import bigquery
from google.cloud.exceptions import ClientError
from googleads import adwords
from oauth2client.service_account import ServiceAccountCredentials
import requests
import zeep.cache
_KEY_FILE = os.path.join(os.path.dirname(__file__), '..', 'data',
'service-account.json')
AVAILABLE = (
'AutoMLPredictor',
'BQMLTrainer',
'BQQueryLauncher',
'BQToAppConversionAPI',
'BQToCM',
'BQToMeasurementProtocol',
'BQToStorageExporter',
'Commenter',
'GAAudiencesUpdater',
'GADataImporter',
'GAToBQImporter',
'MLPredictor',
'MLTrainer',
'MLVersionDeployer',
'StorageChecker',
'StorageCleaner',
'StorageToBQImporter',
)
# Defines how many times to retry a function wrapped in Worker.retry()
# on failure, 3 times by default.
DEFAULT_MAX_RETRIES = os.environ.get('MAX_RETRIES', 3)
# pylint: disable=too-few-public-methods
class WorkerException(Exception):
"""Worker execution exceptions expected in task handler."""
class Worker(object):
"""Abstract worker class."""
# A list describing worker parameters. Each element in the list is a tuple
# of five elements: 0) parameter's name, 1) parameter's type, 2) True if
# parameter is required, False otherwise, 3) default value to use when
# parameter value is missing, and 4) label to show near parameter's field in
# a web UI. See examples below in worker classes.
PARAMS = []
GLOBAL_SETTINGS = []
# Maximum number of worker execution attempts.
MAX_ATTEMPTS = 1
def __init__(self, params, pipeline_id, job_id):
self._pipeline_id = pipeline_id
self._job_id = job_id
self._params = params
for p in self.PARAMS:
try:
self._params[p[0]]
except KeyError:
self._params[p[0]] = p[3]
self._workers_to_enqueue = []
def _log(self, level, message, *substs):
from core import cloud_logging
self.retry(cloud_logging.logger.log_struct)({
'labels': {
'pipeline_id': self._pipeline_id,
'job_id': self._job_id,
'worker_class': self.__class__.__name__,
},
'log_level': level,
'message': message % substs,
})
def log_info(self, message, *substs):
self._log('INFO', message, *substs)
def log_warn(self, message, *substs):
self._log('WARNING', message, *substs)
def log_error(self, message, *substs):
self._log('ERROR', message, *substs)
def execute(self):
self.log_info('Started with params: %s',
json.dumps(self._params, sort_keys=True, indent=2,
separators=(', ', ': ')))
try:
self._execute()
except ClientError as e:
raise WorkerException(e)
self.log_info('Finished successfully')
return self._workers_to_enqueue
def _execute(self):
"""Abstract method that does actual worker's job."""
pass
def _enqueue(self, worker_class, worker_params, delay=0):
self._workers_to_enqueue.append((worker_class, worker_params, delay))
def retry(self, func, max_retries=DEFAULT_MAX_RETRIES):
"""Decorator implementing retries with exponentially increasing delays."""
@wraps(func)
def func_with_retries(*args, **kwargs):
"""Retriable version of function being decorated."""
tries = 0
while tries < max_retries:
try:
return func(*args, **kwargs)
except HttpError as e:
# If it is a client side error, then there's no reason to retry.
if e.resp.status > 399 and e.resp.status < 500:
raise e
except HTTPError as e:
# If it is a client side error, then there's no reason to retry.
if e.code > 399 and e.code < 500:
raise e
except Exception as e: # pylint: disable=broad-except
tries += 1
delay = 5 * 2 ** (tries + random())
time.sleep(delay)
return func(*args, **kwargs)
return func_with_retries
class Commenter(Worker):
"""Dummy worker that fails when checkbox is unchecked."""
PARAMS = [
('comment', 'text', False, '', 'Comment'),
('success', 'boolean', True, False, 'Finish successfully'),
]
def _execute(self):
if not self._params['success']:
msg = '"{}" is unchecked: {}'.format(
self.PARAMS[1][4],
self._params['comment'])
raise WorkerException(msg)
class BQWorker(Worker):
"""Abstract BigQuery worker."""
def _get_client(self):
bigquery.Client.SCOPE = (
'https://www.googleapis.com/auth/bigquery',
'https://www.googleapis.com/auth/cloud-platform',
'https://www.googleapis.com/auth/drive')
client = bigquery.Client.from_service_account_json(_KEY_FILE)
if self._params['bq_project_id'].strip():
client.project = self._params['bq_project_id']
return client
def _bq_setup(self):
self._client = self._get_client()
self._dataset = self._client.dataset(self._params['bq_dataset_id'])
self._table = self._dataset.table(self._params['bq_table_id'])
self._job_name = '%i_%i_%s_%s' % (self._pipeline_id, self._job_id,
self.__class__.__name__, uuid.uuid4())
def _begin_and_wait(self, *jobs):
for job in jobs:
job.begin()
delay = 5
wait_time = 0
all_jobs_done = False
while not all_jobs_done:
wait_time += delay
if wait_time > 300: # If 5 minutes passed, then spawn BQWaiter.
worker_params = {
'job_names': [job.name for job in jobs],
'bq_project_id': self._params['bq_project_id']
}
self._enqueue('BQWaiter', worker_params, 60)
return
time.sleep(delay)
if delay < 30:
delay = [5, 10, 15, 20, 30][wait_time / 60]
all_jobs_done = True
for job in jobs:
job.reload()
if job.error_result is not None:
raise WorkerException(job.error_result['message'])
if job.state != 'DONE':
all_jobs_done = False
break
class BQWaiter(BQWorker):
"""Worker that checks BQ job status and respawns itself if job is running."""
def _execute(self):
client = self._get_client()
for job_name in self._params['job_names']:
# pylint: disable=protected-access
job = bigquery.job._AsyncJob(job_name, client)
# pylint: enable=protected-access
job.reload()
if job.error_result is not None:
raise WorkerException(job.error_result['message'])
if job.state != 'DONE':
worker_params = {
'job_names': self._params['job_names'],
'bq_project_id': self._params['bq_project_id']
}
self._enqueue('BQWaiter', worker_params, 60)
return
class BQQueryLauncher(BQWorker):
"""Worker to run SQL queries in BigQuery."""
PARAMS = [
('query', 'sql', True, '', 'Query'),
('bq_project_id', 'string', False, '', 'BQ Project ID'),
('bq_dataset_id', 'string', True, '', 'BQ Dataset ID'),
('bq_table_id', 'string', True, '', 'BQ Table ID'),
('overwrite', 'boolean', True, False, 'Overwrite table'),
]
def _execute(self):
self._bq_setup()
job = self._client.run_async_query(self._job_name, self._params['query'])
job.destination = self._table
job.use_legacy_sql = False
if self._params['overwrite']:
job.write_disposition = 'WRITE_TRUNCATE'
else:
job.write_disposition = 'WRITE_APPEND'
self._begin_and_wait(job)
class StorageWorker(Worker):
"""Abstract worker class for Cloud Storage workers."""
def _get_matching_stats(self, patterned_uris):
stats = []
patterns = {}
for patterned_uri in patterned_uris:
patterned_uri_split = patterned_uri.split('/')
bucket = '/'.join(patterned_uri_split[1:3])
pattern = '/'.join(patterned_uri_split[1:])
try:
if pattern not in patterns[bucket]:
patterns[bucket].append(pattern)
except KeyError:
patterns[bucket] = [pattern]
for bucket in patterns:
for stat in gcs.listbucket(bucket):
if not stat.is_dir:
for pattern in patterns[bucket]:
if fnmatch(stat.filename, pattern):
stats.append(stat)
break
return stats
class StorageCleaner(StorageWorker):
"""Worker to delete stale files in Cloud Storage."""
PARAMS = [
('file_uris', 'string_list', True, '',
('List of file URIs and URI patterns (e.g. gs://bucket/data.csv or '
'gs://bucket/data_*.csv)')),
('expiration_days', 'number', True, 30,
'Days to keep files since last modification'),
]
def _execute(self):
delta = timedelta(self._params['expiration_days'])
expiration_datetime = datetime.now() - delta
expiration_timestamp = time.mktime(expiration_datetime.timetuple())
stats = self._get_matching_stats(self._params['file_uris'])
for stat in stats:
if stat.st_ctime < expiration_timestamp:
gcs.delete(stat.filename)
self.log_info('gs:/%s file deleted.', stat.filename)
class StorageChecker(StorageWorker):
"""Worker to check if files matching the patterns exist in Cloud Storage."""
PARAMS = [
('file_uris', 'string_list', True, '',
('List of file URIs and URI patterns (e.g. gs://bucket/data.csv or '
'gs://bucket/data_*.csv)')),
('min_size', 'number', False, '',
'Least total size of matching files in bytes required for success'),
]
def _execute(self):
try:
min_size = int(self._params['min_size'])
except TypeError:
min_size = 0
stats = self._get_matching_stats(self._params['file_uris'])
if not stats:
raise WorkerException('Files matching the patterns were not found')
size = reduce(lambda total, stat: total + stat.st_size, stats, 0)
if size < min_size:
raise WorkerException('Files matching the patterns are too small')
class StorageToBQImporter(StorageWorker, BQWorker):
"""Worker to import a CSV file into a BigQuery table."""
PARAMS = [
('source_uris', 'string_list', '', True,
'Source CSV or JSON files URIs (e.g. gs://bucket/data.csv)'),
('bq_project_id', 'string', False, '', 'BQ Project ID'),
('bq_dataset_id', 'string', True, '', 'BQ Dataset ID'),
('bq_table_id', 'string', True, '', 'BQ Table ID'),
('overwrite', 'boolean', True, False, 'Overwrite table'),
('dont_create', 'boolean', True, False,
'Don\'t create table if doesn\'t exist'),
('autodetect', 'boolean', True, False,
'Autodetect schema and other parameters'),
('rows_to_skip', 'number', False, 0, 'Header rows to skip'),
('errors_to_allow', 'number', False, 0, 'Number of errors allowed'),
('import_json', 'boolean', False, False, 'Source is in JSON format'),
]
def _get_source_uris(self):
stats = self._get_matching_stats(self._params['source_uris'])
return ['gs:/%s' % s.filename for s in stats]
def _execute(self):
self._bq_setup()
source_uris = self._get_source_uris()
job = self._client.load_table_from_storage(
self._job_name, self._table, *source_uris)
if self._params['import_json']:
job.source_format = 'NEWLINE_DELIMITED_JSON'
else:
try:
job.skip_leading_rows = self._params['rows_to_skip']
except KeyError:
job.skip_leading_rows = 0
job.autodetect = self._params['autodetect']
if job.autodetect:
# Ugly patch to make autodetection work. See https://goo.gl/shWLKf
# pylint: disable=protected-access
def _build_resource_with_autodetect():
resource = bigquery.job.LoadTableFromStorageJob._build_resource(job)
resource['configuration']['load']['autodetect'] = True
return resource
job._build_resource = _build_resource_with_autodetect
# pylint: enable=protected-access
else:
job.allow_jagged_rows = True
job.allow_quoted_newlines = True
job.ignore_unknown_values = True
try:
job.max_bad_records = self._params['errors_to_allow']
except KeyError:
job.max_bad_records = 0
if self._params['overwrite']:
job.write_disposition = 'WRITE_TRUNCATE'
else:
job.write_disposition = 'WRITE_APPEND'
if self._params['dont_create']:
job.create_disposition = 'CREATE_NEVER'
else:
job.create_disposition = 'CREATE_IF_NEEDED'
self._begin_and_wait(job)
class BQToStorageExporter(BQWorker):
"""Worker to export a BigQuery table to a CSV file."""
PARAMS = [
('bq_project_id', 'string', False, '', 'BQ Project ID'),
('bq_dataset_id', 'string', True, '', 'BQ Dataset ID'),
('bq_table_id', 'string', True, '', 'BQ Table ID'),
('destination_uri', 'string', True, '',
'Destination CSV or JSON file URI (e.g. gs://bucket/data.csv)'),
('print_header', 'boolean', True, False, 'Include a header row'),
('export_json', 'boolean', False, False, 'Export in JSON format'),
]
def _execute(self):
self._bq_setup()
job = self._client.extract_table_to_storage(
self._job_name, self._table, self._params['destination_uri'])
job.print_header = self._params['print_header']
if self._params['export_json']:
job.destination_format = 'NEWLINE_DELIMITED_JSON'
self._begin_and_wait(job)
class GAWorker(Worker):
"""Abstract class with GA-specific methods."""
def _ga_setup(self, v='v4'):
credentials = ServiceAccountCredentials.from_json_keyfile_name(_KEY_FILE)
service = 'analyticsreporting' if v == 'v4' else 'analytics'
self._ga_client = build(service, v, credentials=credentials)
def _parse_accountid_from_propertyid(self):
return self._params['property_id'].split('-')[1]
class GAToBQImporter(BQWorker, GAWorker):
"""Worker to load data into BQ from GA using Core Reporting API."""
PARAMS = [
('view_ids', 'string_list', True, '', 'View IDs (e.g. 12345)'),
('start_date', 'string', True, '', 'Start date (e.g. 2015-12-31)'),
('end_date', 'string', True, '', 'End date (e.g. 2016-12-31)'),
('day_by_day', 'boolean', True, False, 'Fetch data day by day'),
('metrics', 'string_list', True, '', 'Metrics (e.g. ga:users)'),
('dimensions', 'string_list', False, '', 'Dimensions (e.g. ga:source)'),
('filters', 'string', False, '',
'Filters (e.g. ga:deviceCategory==mobile)'),
('include_empty_rows', 'boolean', True, False, 'Include empty rows'),
('bq_project_id', 'string', False, '', 'BQ Project ID'),
('bq_dataset_id', 'string', True, '', 'BQ Dataset ID'),
('bq_table_id', 'string', True, '', 'BQ Table ID'),
]
def _compose_report(self):
dimensions = [{'name': d} for d in self._params['dimensions']]
metrics = [{'expression': m} for m in self._params['metrics']]
self._request = {
'viewId': None,
'dateRanges': None,
'dimensions': dimensions,
'metrics': metrics,
'filtersExpression': self._params['filters'],
'hideTotals': True,
'hideValueRanges': True,
'includeEmptyRows': self._params['include_empty_rows'],
'samplingLevel': 'LARGE',
'pageSize': 10000,
}
def _get_report(self, view_id, start_date, end_date):
# TODO(dulacp): refactor this method, too complex branching logic
log_str = 'View ID %s from %s till %s' % (view_id, start_date, end_date)
self.log_info('Fetch for %s started', log_str)
rows_fetched = 0
self._request['view_id'] = view_id
self._request['dateRanges'] = [{
'startDate': start_date,
'endDate': end_date,
}]
body = {'reportRequests': [self._request]}
while True:
request = self._ga_client.reports().batchGet(body=body)
response = self.retry(request.execute)()
report = response['reports'][0]
dimensions = [d.replace(':', '_') for d in
report['columnHeader']['dimensions']]
metrics = [m['name'].replace(':', '_') for m in
report['columnHeader']['metricHeader']['metricHeaderEntries']]
ga_row = {
'view_id': view_id,
'start_date': start_date,
'end_date': end_date,
}
try:
for row in report['data']['rows']:
for dimension, value in zip(dimensions, row['dimensions']):
ga_row[dimension] = value
for metric, value in zip(metrics, row['metrics'][0]['values']):
ga_row[metric] = value
bq_row = []
for field in self._table.schema:
try:
bq_row.append(ga_row[field.name])
except KeyError:
bq_row.append(None)
self._bq_rows.append(tuple(bq_row))
self._flush()
rows_fetched += len(report['data']['rows'])
try:
self._request['pageToken'] = report['nextPageToken']
except KeyError:
try:
del self._request['pageToken']
except KeyError:
pass
break
except KeyError:
break
if rows_fetched:
self.log_info('%i rows of data fetched for %s', rows_fetched, log_str)
else:
self.log_warn('No rows of data fetched for %s', log_str)
def _flush(self, forced=False):
if self._bq_rows:
if forced or len(self._bq_rows) > 9999:
for i in xrange(0, len(self._bq_rows), 10000):
self._table.insert_data(self._bq_rows[i:i + 10000])
self._bq_rows = []
def _execute(self):
self._bq_setup()
self._table.reload()
self._ga_setup()
self._compose_report()
self._bq_rows = []
if self._params['day_by_day']:
start_date = datetime.strptime(
self._params['start_date'], '%Y-%m-%d').date()
end_date = datetime.strptime(
self._params['end_date'], '%Y-%m-%d').date()
date_str = start_date.strftime('%Y-%m-%d')
for view_id in self._params['view_ids']:
self._get_report(view_id, date_str, date_str)
self._flush(forced=True)
if start_date != end_date:
start_date += timedelta(1)
params = self._params.copy()
params['start_date'] = start_date.strftime('%Y-%m-%d')
self._enqueue(self.__class__.__name__, params)
else:
for view_id in self._params['view_ids']:
self._get_report(
view_id, self._params['start_date'], self._params['end_date'])
self._flush(forced=True)
class GADataImporter(GAWorker):
"""Imports CSV data from Cloud Storage to GA using Data Import."""
PARAMS = [
('csv_uri', 'string', True, '',
'CSV data file URI (e.g. gs://bucket/data.csv)'),
('property_id', 'string', True, '',
'GA Property Tracking ID (e.g. UA-12345-3)'),
('dataset_id', 'string', True, '',
'GA Dataset ID (e.g. sLj2CuBTDFy6CedBJw)'),
('max_uploads', 'number', False, '',
'Maximum uploads to keep in GA Dataset (leave empty to keep all)'),
('delete_before', 'boolean', True, False,
'Delete older uploads before upload'),
('account_id', 'string', False, '', 'GA Account ID'),
]
_BUFFER_SIZE = 256 * 1024
def _upload(self):
with gcs.open(self._file_name, read_buffer_size=self._BUFFER_SIZE) as f:
media = MediaIoBaseUpload(f, mimetype='application/octet-stream',
chunksize=self._BUFFER_SIZE, resumable=True)
request = self._ga_client.management().uploads().uploadData(
accountId=self._account_id,
webPropertyId=self._params['property_id'],
customDataSourceId=self._params['dataset_id'],
media_body=media)
response = None
tries = 0
milestone = 0
while response is None and tries < 5:
try:
status, response = request.next_chunk()
except HttpError, e:
if e.resp.status in [404, 500, 502, 503, 504]:
tries += 1
delay = 5 * 2 ** (tries + random())
self.log_warn('%s, Retrying in %.1f seconds...', e, delay)
time.sleep(delay)
else:
raise WorkerException(e)
else:
tries = 0
if status:
progress = int(status.progress() * 100)
if progress >= milestone:
self.log_info('Uploaded %d%%.', int(status.progress() * 100))
milestone += 20
self.log_info('Upload Complete.')
def _delete_older(self, uploads_to_keep):
request = self._ga_client.management().uploads().list(
accountId=self._account_id, webPropertyId=self._params['property_id'],
customDataSourceId=self._params['dataset_id'])
response = self.retry(request.execute)()
uploads = sorted(response.get('items', []), key=lambda u: u['uploadTime'])
if uploads_to_keep:
ids_to_delete = [u['id'] for u in uploads[:-uploads_to_keep]]
else:
ids_to_delete = [u['id'] for u in uploads]
if ids_to_delete:
request = self._ga_client.management().uploads().deleteUploadData(
accountId=self._account_id,
webPropertyId=self._params['property_id'],
customDataSourceId=self._params['dataset_id'],
body={
'customDataImportUids': ids_to_delete})
self.retry(request.execute)()
self.log_info('%i older upload(s) deleted.', len(ids_to_delete))
def _execute(self):
self._ga_setup('v3')
if self._params['account_id']:
self._account_id = self._params['account_id']
else:
self._account_id = self._parse_accountid_from_propertyid()
self._file_name = self._params['csv_uri'].replace('gs:/', '')
if self._params['max_uploads'] > 0 and self._params['delete_before']:
self._delete_older(self._params['max_uploads'] - 1)
self._upload()
if self._params['max_uploads'] > 0 and not self._params['delete_before']:
self._delete_older(self._params['max_uploads'])
class GAAudiencesUpdater(BQWorker, GAWorker):
"""Worker to update GA audiences using values from a BQ table.
See: https://developers.google.com/analytics/devguides/config/mgmt/v3/mgmtReference/management/remarketingAudience#resource
for more details on the required GA Audience JSON template format.
"""
PARAMS = [
('property_id', 'string', True, '',
'GA Property Tracking ID (e.g. UA-12345-3)'),
('bq_project_id', 'string', False, '', 'BQ Project ID'),
('bq_dataset_id', 'string', True, '', 'BQ Dataset ID'),
('bq_table_id', 'string', True, '', 'BQ Table ID'),
('template', 'text', True, '', 'GA audience JSON template'),
('account_id', 'string', False, '', 'GA Account ID'),
]
def _infer_audiences(self):
self._inferred_audiences = {}
fields = [f.name for f in self._table.schema]
for row in self._table.fetch_data():
try:
template_rendered = self._params['template'] % dict(zip(fields, row))
audience = json.loads(template_rendered)
except ValueError as e:
raise WorkerException(e)
self._inferred_audiences[audience['name']] = audience
def _get_audiences(self):
audiences = []
start_index = 1
max_results = 100
total_results = 100
while start_index <= total_results:
request = self._ga_client.management().remarketingAudience().list(
accountId=self._account_id,
webPropertyId=self._params['property_id'],
start_index=start_index,
max_results=max_results)
response = self.retry(request.execute)()
total_results = response['totalResults']
start_index += max_results
audiences += response['items']
self._current_audiences = {}
names = self._inferred_audiences.keys()
for audience in audiences:
if audience['name'] in names:
self._current_audiences[audience['name']] = audience
def _equal(self, patch, audience):
"""Checks whether applying a patch would not change an audience.
Args:
patch: An object that is going to be used as a patch to update the
audience.
audience: An object representing audience to be patched.
Returns:
True if applying the patch won't change the audience, False otherwise.
"""
dicts = [(patch, audience)]
for d1, d2 in dicts:
keys = d1 if isinstance(d1, dict) else xrange(len(d1))
for k in keys:
try:
d2[k]
except (IndexError, KeyError):
return False
if isinstance(d1[k], dict):
if isinstance(d2[k], dict):
dicts.append((d1[k], d2[k]))
else:
return False
elif isinstance(d1[k], list):
if isinstance(d2[k], list) and len(d1[k]) == len(d2[k]):
dicts.append((d1[k], d2[k]))
else:
return False
elif d1[k] != d2[k]:
return False
return True
def _get_diff(self):
"""Composes lists of audiences to be created and updated in GA."""
self._audiences_to_insert = []
self._audiences_to_patch = {}
for name in self._inferred_audiences:
inferred_audience = self._inferred_audiences[name]
if name in self._current_audiences:
current_audience = self._current_audiences[name]
if not self._equal(inferred_audience, current_audience):
self._audiences_to_patch[current_audience['id']] = inferred_audience
else:
self._audiences_to_insert.append(inferred_audience)
def _update_ga_audiences(self):
"""Updates and/or creates audiences in GA."""
for audience in self._audiences_to_insert:
request = self._ga_client.management().remarketingAudience().insert(
accountId=self._account_id,
webPropertyId=self._params['property_id'],
body=audience)
self.retry(request.execute)()
for audience_id in self._audiences_to_patch:
audience = self._audiences_to_patch[audience_id]
request = self._ga_client.management().remarketingAudience().patch(
accountId=self._account_id,
webPropertyId=self._params['property_id'],
remarketingAudienceId=audience_id,
body=audience)
self.retry(request.execute)()
def _execute(self):
if self._params['account_id']:
self._account_id = self._params['account_id']
else:
self._account_id = self._parse_accountid_from_propertyid()
self._bq_setup()
self._table.reload()
self._ga_setup('v3')
self._infer_audiences()
self._get_audiences()
self._get_diff()
self._update_ga_audiences()
class MLWorker(Worker):
"""Abstract ML Engine worker."""
def _get_ml_client(self):
credentials = ServiceAccountCredentials.from_json_keyfile_name(_KEY_FILE)
self._ml_client = build('ml', 'v1', credentials=credentials)
def _get_ml_job_id(self):
self._ml_job_id = '%s_%i_%i_%s' % (self.__class__.__name__,
self._pipeline_id, self._job_id,
str(uuid.uuid4()).replace('-', '_'))
class MLWaiter(MLWorker):
"""Worker that checks ML job status and respawns itself if job is running."""
FINAL_STATUSES = ('STATE_UNSPECIFIED', 'SUCCEEDED', 'FAILED', 'CANCELLED')
def _execute(self):
self._get_ml_client()
request = self._ml_client.projects().jobs().get(
name=self._params['job_name'])
job = self.retry(request.execute)()
if job.get('state') not in self.FINAL_STATUSES:
self._enqueue('MLWaiter', {'job_name': self._params['job_name']}, 60)
class MLOperationWaiter(MLWorker):
""" Worker that checks an ML operation's status and respawns'
'itslef until the operation is done."""
def _execute(self):
self._get_ml_client()
request = self._ml_client.projects().operations().get(
name=self._params['operation_name'])
operation = self.retry(request.execute)()
if operation['done'] != True:
self._enqueue('MLOperationWaiter',
{'operation_name': self._params['operation_name']}, 60)
class MLPredictor(MLWorker):
"""Worker to create ML batch prediction jobs."""
PARAMS = [
('project', 'string', True, '', 'ML project ID'),
('model', 'string', True, '', 'ML model name'),
('version', 'string', True, '', 'ML model version'),
('input_uris', 'string_list', True, '',
'URIs of input JSON files (e.g. gs://bucket/data.json)'),
('output_uri', 'string', True, '',
'URI of folder to put predictions into (e.g. gs://bucket/folder)'),
]
def _execute(self):
project_id = 'projects/%s' % self._params['project']
version_name = '%s/models/%s/versions/%s' % (
project_id, self._params['model'], self._params['version'])
self._get_ml_job_id()
body = {
'jobId': self._ml_job_id,
'predictionInput': {
'dataFormat': 'JSON',
'inputPaths': self._params['input_uris'],
'outputPath': self._params['output_uri'],
'region': 'europe-west1',
'versionName': version_name,
}
}
self._get_ml_client()
request = self._ml_client.projects().jobs().create(parent=project_id,
body=body)
self.retry(request.execute)()
job_name = '%s/jobs/%s' % (project_id, self._ml_job_id)
self._enqueue('MLWaiter', {'job_name': job_name}, 60)
class MLTrainer(MLWorker):
"""Worker to train a ML model"""
PARAMS = [
('project', 'string', True, '', 'ML project ID'),
('jobDir', 'string', True, '',
'URI of folder to put output generated by AI platform '
'(e.g. gs://bucket/folder)'),
('packageUris', 'string', True, '',
'URI of python package e.g. gs://bucket/folder/filename.tar.gz'),
('scaleTier', 'string', True, '',
'Scale Tier e.g. BASIC, STANDARD_1'),
('runtimeVersion', 'string', True, '',
'Runtime version e.g. 1.10'),
('pythonModule', 'string', True, '',
'Name of python module e.g. trainer.task'),
('args', 'string_list', True, '',
'Enter the arguments to be passed to the python package. '
'Key in one line, value in the next.')
]
def _execute(self):
self._get_ml_job_id()
body = {
'jobId': self._ml_job_id,
'trainingInput': {
'args': [a.strip() for a in self._params['args']],
'packageUris': self._params['packageUris'],
'region': 'europe-west1',
'jobDir': self._params['jobDir'],
'runtimeVersion': self._params['runtimeVersion'],
'pythonModule': '%s' % (self._params['pythonModule'])
}
}
project_id = 'projects/%s' % self._params['project']
self._get_ml_client()
request = self._ml_client.projects().jobs().create(
parent=project_id, body=body)
self.retry(request.execute)()
job_name = '%s/jobs/%s' % (project_id, self._ml_job_id)
self._enqueue('MLWaiter', {'job_name': job_name}, 60)
class MLVersionDeployer(MLWorker, StorageWorker):
"""Worker to deploy ML Model Version"""
PARAMS = [
('project', 'string', True, '', 'ML project ID'),
('jobDir', 'string', True, '',
'URI of GCS folder with a trained model'
'(e.g. gs://bucket/folder)'),
('modelName', 'string', True, '',
'Name of the ML model in Google Cloud AI Platform'),
('versionName', 'string', True, '',
'Name of the version (letters, numbers, underscores only; '
'must start with a letter)'),
('runtimeVersion', 'string', True, '',
'Runtime version e.g. 1.10'),
('pythonVersion', 'string', True, '', 'Version of python, e.g. 3.5'),
('framework', 'string', True, '', 'Framework, eg. TENSORFLOW')
]
def _execute(self):
self._get_ml_job_id()
# Find directory where newest saved model is located
bucket = self._params['jobDir']
stats = gcs.listbucket(bucket[4:])
newest_file = None
for stat in stats:
if stat.filename.find('saved_model.pb') != -1:
if newest_file is None:
newest_file = stat
if newest_file:
if stat.st_ctime > newest_file.st_ctime:
newest_file = stat
body = {
"name": self._params['versionName'],
"description": "Test from python",
"deploymentUri": ("gs:/" + newest_file.
filename[0:newest_file.filename.rfind('/')]),
"pythonVersion": self._params['pythonVersion'],
"runtimeVersion": self._params['runtimeVersion'],
"framework": self._params['framework']
}
project_id = 'projects/%s' % self._params['project']
self._get_ml_client()
request = self._ml_client.projects().models().versions().create(
parent=project_id + "/models/" + self._params['modelName'], body=body)
response = self.retry(request.execute)()
self._enqueue('MLOperationWaiter', {'operation_name': response['name']}, 60)
class MeasurementProtocolException(WorkerException):
"""Measurement Protocol execution exception."""
pass
class BQToMeasurementProtocol(BQWorker):
"""Worker to push data through Measurement Protocol."""
PARAMS = [
('bq_project_id', 'string', False, '', 'BQ Project ID'),
('bq_dataset_id', 'string', True, '', 'BQ Dataset ID'),
('bq_table_id', 'string', True, '', 'BQ Table ID'),
('mp_batch_size', 'number', True, 20, ('Measurement Protocol batch size '
'(https://goo.gl/7VeWuB)')),
('debug', 'boolean', True, False, 'Debug mode'),
]
# BigQuery batch size for querying results. Default to 1000.
BQ_BATCH_SIZE = int(1e3)
# Maximum number of jobs to enqueued before spawning a new scheduler.
MAX_ENQUEUED_JOBS = 50
def _execute(self):
self._bq_setup()
self._table.reload()
page_token = self._params.get('bq_page_token', None)
batch_size = self.BQ_BATCH_SIZE
query_iterator = self.retry(self._table.fetch_data)(
max_results=batch_size,
page_token=page_token)
enqueued_jobs_count = 0
for query_page in query_iterator.pages: # pylint: disable=unused-variable
# Enqueue job for this page
worker_params = self._params.copy()
worker_params['bq_page_token'] = page_token
worker_params['bq_batch_size'] = self.BQ_BATCH_SIZE
self._enqueue('BQToMeasurementProtocolProcessor', worker_params, 0)
enqueued_jobs_count += 1
# Updates the page token reference for the next iteration.
page_token = query_iterator.next_page_token
# Spawns a new job to schedule the remaining pages.
if (enqueued_jobs_count >= self.MAX_ENQUEUED_JOBS
and page_token is not None):
worker_params = self._params.copy()
worker_params['bq_page_token'] = page_token
self._enqueue(self.__class__.__name__, worker_params, 0)
return
class BQToMeasurementProtocolProcessor(BQWorker):
"""Worker pushing to Measurement Protocol the first page only of a query."""
def _flatten(self, data):
flat = False
while not flat:
flat = True
for k in data.keys():
if data[k] is None:
del data[k]
elif isinstance(data[k], list):
for i, v in enumerate(data[k]):
data['%s%i' % (k, i + 1)] = v
del data[k]
flat = False
elif isinstance(data[k], dict):
for l in data[k]:
data['%s%s' % (k, l)] = data[k][l]
del data[k]
flat = False
def _get_payload_from_data(self, data):
self._flatten(data)
payload = {'v': 1} # Use version 1
payload.update(data)
return payload
def _prepare_payloads_for_batch_request(self, payloads):
"""Merges payloads to send them in a batch request.
Args:
payloads: list of payload, each payload being a dictionary.
Returns:
Concatenated url-encoded payloads. For example:
param1=value10¶m2=value20
param1=value11¶m2=value21
"""
assert isinstance(payloads, (list, tuple))
payloads_utf8 = [sorted([(k, unicode(p[k]).encode('utf-8')) for k in p],
key=lambda t: t[0]) for p in payloads]
return '\n'.join([urllib.urlencode(p) for p in payloads_utf8])
def _send_batch_hits(self, batch_payload, user_agent='CRMint / 0.1'):
"""Sends a batch request to the Measurement Protocol endpoint.
NB: Use the the HitBuilder service to validate a Measurement Protocol
hit format with the Measurement Protocol Validation Server.
https://ga-dev-tools.appspot.com/hit-builder/
Args:
batch_payload: list of payloads, each payload being a list of key/values
tuples to pass to the Measurement Protocol batch endpoint.
user_agent: string representing the client User Agent.
Raises:
MeasurementProtocolException: if the HTTP request fails.
"""
headers = {'user-agent': user_agent}
if self._debug:
for payload in batch_payload.split('\n'):
response = requests.post(
'https://www.google-analytics.com/debug/collect',
headers=headers,
data=payload)
result = json.loads(response.text)
if (not result['hitParsingResult'] or
not result['hitParsingResult'][0]['valid']):
message = ('Invalid payload ("&" characters replaced with new lines):'
'\n\n%s\n\nValidation response:\n\n%s')
readable_payload = payload.replace('&', '\n')
self.log_warn(message, readable_payload, response.text)
else:
response = requests.post('https://www.google-analytics.com/batch',
headers=headers,
data=batch_payload)
if response.status_code != requests.codes.ok:
raise MeasurementProtocolException(
'Failed to send event hit with status code (%s) and parameters: %s'
% (response.status_code, batch_payload))
def _send_payload_list(self, payload_list):
batch_payload = self._prepare_payloads_for_batch_request(payload_list)
try:
self.retry(self._send_batch_hits, max_retries=1)(batch_payload)
except MeasurementProtocolException as e:
escaped_message = e.message.replace('%', '%%')
self.log_error(escaped_message)
def _process_query_results(self, query_data, query_schema):
"""Sends event hits from query data."""
fields = [f.name for f in query_schema]
payload_list = []
for row in query_data:
data = dict(zip(fields, row))
payload = self._get_payload_from_data(data)
payload_list.append(payload)
if len(payload_list) >= self._params['mp_batch_size']:
self._send_payload_list(payload_list)
payload_list = []
if payload_list:
# Sends remaining payloads.
self._send_payload_list(payload_list)
def _execute(self):
self._bq_setup()
self._table.reload()
self._debug = self._params['debug']
page_token = self._params['bq_page_token'] or None
batch_size = self._params['bq_batch_size']
query_iterator = self.retry(self._table.fetch_data)(
max_results=batch_size,
page_token=page_token)
query_first_page = next(query_iterator.pages)
self._process_query_results(query_first_page, query_iterator.schema)
class BQMLTrainer(BQWorker):
"""Worker to run BQML SQL queries in BigQuery."""
PARAMS = [
('query', 'sql', True, '', 'Query'),
('bq_project_id', 'string', False, '', 'BQ Project ID'),
]
def _execute(self):
client = self._get_client()
job_name = '%i_%i_%s_%s' % (self._pipeline_id, self._job_id,
self.__class__.__name__, uuid.uuid4())
job = client.run_async_query(job_name, self._params['query'])
job.use_legacy_sql = False
self._begin_and_wait(job)
class AWWorker(Worker):
"""Abstract AdWords API worker."""
_MAX_ITEMS_PER_CALL = 10000
GLOBAL_SETTINGS = ['google_ads_refresh_token', 'client_id', 'client_secret',
'developer_token']
def _aw_setup(self):
"""Create AdWords API client."""
# Throw exception if one or more AdWords global params are missing.
for name in self.GLOBAL_SETTINGS:
if not name in self._params or not self._params[name]:
raise WorkerException(
"One or more AdWords API global parameters are missing.")
client_params_dict = {
'adwords': {
'client_customer_id': self._params['client_customer_id'].strip(),
'developer_token': self._params['developer_token'].strip(),
'client_id': self._params['client_id'].strip(),
'client_secret': self._params['client_secret'].strip(),
'refresh_token': self._params['google_ads_refresh_token'].strip(),
}
}
client_params_yaml = yaml.safe_dump(client_params_dict, encoding='utf-8',
allow_unicode=True)
self._aw_client = adwords.AdWordsClient.LoadFromString(client_params_yaml)
self._aw_client.cache = zeep.cache.InMemoryCache()
class BQToCM(AWWorker, BQWorker):
"""Customer Match worker."""
PARAMS = [
('bq_project_id', 'string', False, '', 'BQ Project ID'),
('bq_dataset_id', 'string', True, '', 'BQ Dataset ID'),
('bq_table_id', 'string', True, '', 'BQ Table ID'),
('client_customer_id', 'string', True, '', 'Google Ads Customer ID'),
('list_name', 'string', True, '', 'Audience List Name'),
('upload_key_type', 'string', True, 'CONTACT_INFO',
'Matching key type: CONTACT_INFO, CRM_ID, or MOBILE_ADVERTISING_ID'),
('app_id', 'string', False, '', 'Mobile application ID'),
('membership_life_span', 'number', True, 10000,
'Membership Life Span, days'),
('remove_data', 'boolean', True, False,
'Remove data from existing Audience List'),
]
# BigQuery batch size for querying results. Default to 10000.
BQ_BATCH_SIZE = int(10000)
def _get_user_list(self, user_list_service):
"""Get or create the Customer Match list."""
# Check if the list already exists.
selector = {
'fields': ['Name', 'Id'],
'predicates': [{
'field': 'Name',
'operator': 'EQUALS',
'values': self._params['list_name'],
}],
}
result = user_list_service.get(selector)
if result['entries']:
user_list = result['entries'][0]
self.log_info('User list "%s" with ID = %d was found.',
user_list['name'], user_list['id'])
return user_list['id']
# The list doesn't exist, have to create one.
user_list = {
'xsi_type': 'CrmBasedUserList',
'name': self._params['list_name'],
'description': 'This is a list of users created by CRMint',
'membershipLifeSpan': self._params['membership_life_span'],
'uploadKeyType': self._params['upload_key_type'],
}
if self._params['upload_key_type'] == 'MOBILE_ADVERTISING_ID':
user_list['appId'] = self._params['app_id']
# Create an operation to add the user list.
operations = [{'operator': 'ADD', 'operand': user_list}]
result = user_list_service.mutate(operations)
user_list = result['value'][0]
self.log_info('The user list "%s" with ID = %d has been created.',
user_list['name'], user_list['id'])
return user_list['id']
def _process_page(self, page_data):
"""Upload data fetched from BigQuery table to the Customer Match list."""
def remove_nones(obj):
"""Remove all None and empty dict values from a dict recursively."""
if not isinstance(obj, dict):
return obj
clean_obj = {}
for k in obj:
v = remove_nones(obj[k])
if v is not None:
clean_obj[k] = v
return clean_obj if clean_obj else None
members = [remove_nones(row[0]) for row in page_data]
user_list_service = self._aw_client.GetService('AdwordsUserListService',
'v201809')
user_list_id = self._get_user_list(user_list_service)
# Flow control to keep calls within usage limits.
self.log_info('Starting upload.')
for i in range(0, len(members), self._MAX_ITEMS_PER_CALL):
members_to_upload = members[i:i + self._MAX_ITEMS_PER_CALL]
mutate_members_operation = {
'operand': {
'userListId': user_list_id,
'membersList': members_to_upload,
},
}
if self._params["remove_data"]:
mutate_members_operation['operator'] = 'REMOVE'
operation_string = 'removed from'
else:
mutate_members_operation['operator'] = 'ADD'
operation_string = 'added to'
response = user_list_service.mutateMembers([mutate_members_operation])
if 'userLists' in response:
user_list = response['userLists'][0]
self.log_info(
'%d members were %s user list "%s" with ID = %d.',
len(members_to_upload), operation_string, user_list['name'],
user_list['id'])
def _execute(self):
self._aw_setup()
self._bq_setup()
self._table.reload()
page_token = self._params.get('bq_page_token', None)
page_iterator = self.retry(self._table.fetch_data)(
max_results=self.BQ_BATCH_SIZE,
page_token=page_token)
page = next(page_iterator.pages)
self._process_page(page)
# Update the page token reference for the next iteration.
page_token = page_iterator.next_page_token
if page_token:
self._params['bq_page_token'] = page_token
self._enqueue(self.__class__.__name__, self._params, 0)
class BQToAppConversionAPI(BQWorker):
"""Worker that sends app conversions to App Conversion Tracking API."""
PARAMS = [
('bq_project_id', 'string', False, '', 'BQ Project ID'),
('bq_dataset_id', 'string', True, '', 'BQ Dataset ID'),
('bq_table_id', 'string', True, '', 'BQ Table ID'),
]
GLOBAL_SETTINGS = ['app_conversion_api_developer_token']
CONTENT_TYPE = 'application/json; charset=utf-8'
HEADER_PARAMS = ['User_Agent', 'X_Forwarded_For']
BODY_PARAM = 'app_event_data'
REQUIRED_PARAMS = ['rdid', 'id_type', 'lat', 'app_version', 'os_version',
'sdk_version', 'timestamp', 'link_id', 'app_event_type']
OPTIONAL_PARAMS = ['value', 'app_event_name', 'currency_code', 'gclid']
API_URL = 'https://www.googleadservices.com/pagead/conversion/app/1.0'
# BigQuery batch size for querying results. Default to 10000.
BQ_BATCH_SIZE = int(10000)
def _send_api_requests(self, headers, params, body=None):
"""Sends app conversion and cross-network attribution requests."""
response = requests.post(self.API_URL, headers=headers, params=params,
json=body)
if response.status_code != requests.codes.ok:
self.log_warn(
'Failed to send app conversion request, status code %s.\n'
' Headers: %s\n Parameters: %s\n Body: %s'
% (response.status_code, headers, params, body)
)
return
result = json.loads(response.text)
if not result['attributed']:
self.log_warn(
'App conversion was not attributed to Google Ads.\n'
' Headers: %s\n Parameters: %s\n Body: %s\n Errors: %s'
% (headers, params, body, result['errors'])
)
return
self.log_info(
'App conversion was attributed to Google Ads.\n'
' Headers: %s\n Parameters: %s\n Body: %s\n Errors: %s'
% (headers, params, body, result['errors'])
)
params['ad_event_id'] = result['ad_events'][0]['ad_event_id']
params['attributed'] = 1
response = requests.post('%s/cross_network' % self.API_URL,
headers=headers, params=params, json=body)
if response.status_code != requests.codes.ok:
self.log_warn(
'Failed to send cross-network attribution request, status code %s.\n'
' Headers: %s\n Parameters: %s\n Body: %s'
% (response.status_code, headers, params, body)
)
def _process_page(self, page, fields):
"""Send each row of a BQ table page as a single app conversion."""
for values in page:
row = dict(zip(fields, values))
headers = {'Content-Type': self.CONTENT_TYPE}
for param in self.HEADER_PARAMS:
if row[param] is not None:
headers[param.replace('_', '-')] = row[param]
else:
self.log_warn(
'Missing value for the required header "%s" in table "%s.%s"' % (
param.replace('_', '-'), self._params['bq_dataset_id'],
self._params['bq_table_id']))
params = {'dev_token': self._params['app_conversion_api_developer_token']}
for param in self.REQUIRED_PARAMS:
if row[param] is not None:
params[param] = row[param]
else:
self.log_warn(
'Missing value for the required param "%s" in table "%s.%s"' % (
param, self._params['bq_dataset_id'],
self._params['bq_table_id']))
for param in self.OPTIONAL_PARAMS:
if param in row and row[param] is not None:
params[param] = row[param]
if self.BODY_PARAM in row and row[self.BODY_PARAM] is not None:
body = {self.BODY_PARAM: row[self.BODY_PARAM]}
else:
body = None
headers['Content-Length'] = '0'
self._send_api_requests(headers, params, body)
def _execute(self):
"""Fetch a BQ table page, process it, schedule self for the next page."""
if not self._params.get('app_conversion_api_developer_token'):
raise WorkerException('App Conversion API developer token is not '
'specified in General Settings.')
self._bq_setup()
self._table.reload()
page_token = self._params.get('bq_page_token', None)
page_iterator = self.retry(self._table.fetch_data)(
max_results=self.BQ_BATCH_SIZE,
page_token=page_token)
fields = [field.name for field in page_iterator.schema]
for param in self.REQUIRED_PARAMS + self.HEADER_PARAMS:
if param not in fields:
raise WorkerException(
'Required field "%s" not found in table "%s.%s"' % (
param, self._params['bq_dataset_id'],
self._params['bq_table_id']))
page = next(page_iterator.pages)
self._process_page(page, fields)
# Update the page token reference for the next iteration.
page_token = page_iterator.next_page_token
if page_token:
self._params['bq_page_token'] = page_token
self._enqueue(self.__class__.__name__, self._params, 0)
class AutoMLWorker(Worker):
"""Abstract AutoML worker."""
@staticmethod
def _get_automl_client():
"""Constructs a Resource for interacting with the AutoML API."""
# You might be wondering why we're using the discovery-based Google API
# client library as opposed to the more modern Google Cloud client library.
# The reason is that the modern client libraries (e.g. google-cloud-automl)
# are not supported on App Engine's Python 2 runtime.
# See: https://github.com/googleapis/google-cloud-python
credentials = ServiceAccountCredentials.from_json_keyfile_name(_KEY_FILE)
return build('automl', 'v1beta1', credentials=credentials)
@staticmethod
def _get_full_model_name(project, location, model):
"""Constructs the fully-qualified name for the given AutoML model."""
return "projects/{project}/locations/{location}/models/{model}".format(
project=project,
location=location,
model=model,
)
class AutoMLPredictor(AutoMLWorker):
"""Worker to run AutoML batch prediction jobs."""
PARAMS = [
('model_project_id', 'string', True, '', 'AutoML Project ID'),
('model_location', 'string', True, '', 'AutoML Model Location'),
('model_id', 'string', True, '', 'AutoML Model ID'),
('input_bq_uri', 'string', False, '',
'Input - BigQuery Table URI (e.g. bq://projectId/dataset/table)'),
('input_gcs_uri', 'string', False, '',
'Input - Cloud Storage CSV URI (e.g. gs://bucket/directory/file.csv)'),
('output_bq_project_uri', 'string', False, '',
'Output - BigQuery Project URI (e.g. bq://projectId)'),
('output_gcs_uri_prefix', 'string', False, '',
'Output - Cloud Storage output directory (e.g. gs://bucket/directory)'),
]
def _execute(self):
# Construct the fully-qualified model name and config for the prediction.
model_name = self._get_full_model_name(self._params['model_project_id'],
self._params['model_location'],
self._params['model_id'])
body = {
'inputConfig': self._generate_input_config(),
'outputConfig': self._generate_output_config()
}
# Launch the prediction and retrieve its operation name so we can track it.
client = self._get_automl_client()
response = client.projects().locations().models() \
.batchPredict(name=model_name, body=body).execute()
self.log_info('Launched batch prediction job: %s -> %s', body, response)
# Since the batch prediction might take more than the 10 minutes the job
# service has to serve a response to the Push Queue, we can't wait on it
# here. We thus spawn a worker that waits until the operation is completed.
operation_name = response.get('name')
self._enqueue('AutoMLWaiter', {'operation_name': operation_name}, 60)
def _generate_input_config(self):
"""Constructs the input configuration for the batch prediction request."""
input_bq_uri = self._params['input_bq_uri']
input_gcs_uri = self._params['input_gcs_uri']
if input_bq_uri and not input_gcs_uri:
return {'bigquery_source': {'input_uri': input_bq_uri}}
elif input_gcs_uri and not input_bq_uri:
return {'gcs_source': {'input_uris': [input_gcs_uri]}}
else:
raise WorkerException('Provide either a BigQuery or GCS source.')
def _generate_output_config(self):
"""Constructs the output configuration for the batch prediction request."""
output_bq_project_uri = self._params['output_bq_project_uri']
output_gcs_uri_prefix = self._params['output_gcs_uri_prefix']
if output_bq_project_uri and not output_gcs_uri_prefix:
return {'bigquery_destination': {'output_uri': output_bq_project_uri}}
elif output_gcs_uri_prefix and not output_bq_project_uri:
return {'gcs_destination': {'output_uri_prefix': output_gcs_uri_prefix}}
else:
raise WorkerException('Provide either a BigQuery or GCS destination.')
class AutoMLWaiter(AutoMLWorker):
"""Worker that keeps respawning until an AutoML operation is completed."""
def _execute(self):
client = self._get_automl_client()
operation_name = self._params['operation_name']
response = client.projects().locations().operations() \
.get(name=operation_name).execute()
if response.get('done'):
if response.get('error'):
raise WorkerException('AutoML operation failed: %s' % response)
else:
self.log_info('AutoML operation completed successfully: %s', response)
else:
self.log_info('AutoML operation still running: %s', response)
self._enqueue('AutoMLWaiter', self._params, 60)
| apache-2.0 |
zyrikby/androguard | elsim/elsim/similarity/similarity.py | 35 | 14893 | # This file is part of Elsim
#
# Copyright (C) 2012, Anthony Desnos <desnos at t0t0.fr>
# All rights reserved.
#
# Elsim is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Elsim is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with Elsim. If not, see <http://www.gnu.org/licenses/>.
import zlib, bz2
import math, json, re
def simhash(x):
import simhash
return simhash.simhash(x)
def entropy(data):
entropy = 0.0
if len(data) == 0:
return entropy
for x in range(256):
p_x = float(data.count(chr(x)))/len(data)
if p_x > 0:
entropy += - p_x*math.log(p_x, 2)
return entropy
try:
from ctypes import cdll, c_float, c_double, c_int, c_uint, c_void_p, Structure, addressof, cast, c_size_t
#struct libsimilarity {
# void *orig;
# unsigned int size_orig;
# void *cmp;
# unsigned size_cmp;
# unsigned int *corig;
# unsigned int *ccmp;
#
# float res;
#};
class LIBSIMILARITY_T(Structure):
_fields_ = [("orig", c_void_p),
("size_orig", c_size_t),
("cmp", c_void_p),
("size_cmp", c_size_t),
("corig", c_size_t),
("ccmp", c_size_t),
("res", c_float),
]
def new_zero_native():
return c_size_t( 0 )
NATIVE_LIB = True
except:
NATIVE_LIB = False
def new_zero_python():
return 0
ZLIB_COMPRESS = 0
BZ2_COMPRESS = 1
SMAZ_COMPRESS = 2
LZMA_COMPRESS = 3
XZ_COMPRESS = 4
SNAPPY_COMPRESS = 5
VCBLOCKSORT_COMPRESS = 6
H_COMPRESSOR = { "BZ2" : BZ2_COMPRESS,
"ZLIB" : ZLIB_COMPRESS,
"LZMA" : LZMA_COMPRESS,
"XZ" : XZ_COMPRESS,
"SNAPPY" : SNAPPY_COMPRESS,
}
HR_COMPRESSOR = {
BZ2_COMPRESS : "BZ2",
ZLIB_COMPRESS : "ZLIB",
LZMA_COMPRESS : "LZMA",
XZ_COMPRESS : "XZ",
SNAPPY_COMPRESS : "SNAPPY",
}
class SIMILARITYBase(object):
def __init__(self, native_lib=False):
self.ctype = ZLIB_COMPRESS
self.__caches = {
ZLIB_COMPRESS : {},
BZ2_COMPRESS : {},
SMAZ_COMPRESS : {},
LZMA_COMPRESS : {},
XZ_COMPRESS : {},
SNAPPY_COMPRESS : {},
VCBLOCKSORT_COMPRESS : {},
}
self.__rcaches = {
ZLIB_COMPRESS : {},
BZ2_COMPRESS : {},
SMAZ_COMPRESS : {},
LZMA_COMPRESS : {},
XZ_COMPRESS : {},
SNAPPY_COMPRESS : {},
VCBLOCKSORT_COMPRESS : {},
}
self.__ecaches = {}
self.level = 9
if native_lib == True:
self.new_zero = new_zero_native
else:
self.new_zero = new_zero_python
def set_level(self, level):
self.level = level
def get_in_caches(self, s):
try:
return self.__caches[ self.ctype ][ zlib.adler32( s ) ]
except KeyError:
return self.new_zero()
def get_in_rcaches(self, s1, s2):
try:
return self.__rcaches[ self.ctype ][ zlib.adler32( s1 + s2 ) ]
except KeyError:
try:
return self.__rcaches[ self.ctype ][ zlib.adler32( s2 + s1 ) ]
except KeyError:
return -1, -1
def add_in_caches(self, s, v):
h = zlib.adler32( s )
if h not in self.__caches[ self.ctype ]:
self.__caches[ self.ctype ][ h ] = v
def add_in_rcaches(self, s, v, r):
h = zlib.adler32( s )
if h not in self.__rcaches[ self.ctype ]:
self.__rcaches[ self.ctype ][ h ] = (v, r)
def clear_caches(self):
for i in self.__caches:
self.__caches[i] = {}
def add_in_ecaches(self, s, v, r):
h = zlib.adler32( s )
if h not in self.__ecaches:
self.__ecaches[ h ] = (v, r)
def get_in_ecaches(self, s1):
try:
return self.__ecaches[ zlib.adler32( s1 ) ]
except KeyError:
return -1, -1
def __nb_caches(self, caches):
nb = 0
for i in caches:
nb += len(caches[i])
return nb
def set_compress_type(self, t):
self.ctype = t
def show(self):
print "ECACHES", len(self.__ecaches)
print "RCACHES", self.__nb_caches( self.__rcaches )
print "CACHES", self.__nb_caches( self.__caches )
class SIMILARITYNative(SIMILARITYBase):
def __init__(self, path="./libsimilarity/libsimilarity.so"):
super(SIMILARITYNative, self).__init__(True)
self._u = cdll.LoadLibrary( path )
self._u.compress.restype = c_uint
self._u.ncd.restype = c_int
self._u.ncs.restype = c_int
self._u.cmid.restype = c_int
self._u.entropy.restype = c_double
self._u.levenshtein.restype = c_uint
self._u.kolmogorov.restype = c_uint
self._u.bennett.restype = c_double
self._u.RDTSC.restype = c_double
self.__libsim_t = LIBSIMILARITY_T()
self.set_compress_type( ZLIB_COMPRESS )
def raz(self):
del self._u
del self.__libsim_t
def compress(self, s1):
res = self._u.compress( self.level, cast( s1, c_void_p ), len( s1 ) )
return res
def _sim(self, s1, s2, func):
end, ret = self.get_in_rcaches( s1, s2 )
if end != -1:
return end, ret
self.__libsim_t.orig = cast( s1, c_void_p )
self.__libsim_t.size_orig = len(s1)
self.__libsim_t.cmp = cast( s2, c_void_p )
self.__libsim_t.size_cmp = len(s2)
corig = self.get_in_caches(s1)
ccmp = self.get_in_caches(s2)
self.__libsim_t.corig = addressof( corig )
self.__libsim_t.ccmp = addressof( ccmp )
ret = func( self.level, addressof( self.__libsim_t ) )
self.add_in_caches(s1, corig)
self.add_in_caches(s2, ccmp)
self.add_in_rcaches(s1+s2, self.__libsim_t.res, ret)
return self.__libsim_t.res, ret
def ncd(self, s1, s2):
return self._sim( s1, s2, self._u.ncd )
def ncs(self, s1, s2):
return self._sim( s1, s2, self._u.ncs )
def cmid(self, s1, s2):
return self._sim( s1, s2, self._u.cmid )
def kolmogorov(self, s1):
ret = self._u.kolmogorov( self.level, cast( s1, c_void_p ), len( s1 ) )
return ret, 0
def bennett(self, s1):
ret = self._u.bennett( self.level, cast( s1, c_void_p ), len( s1 ) )
return ret, 0
def entropy(self, s1):
end, ret = self.get_in_ecaches( s1 )
if end != -1:
return end, ret
res = self._u.entropy( cast( s1, c_void_p ), len( s1 ) )
self.add_in_ecaches( s1, res, 0 )
return res, 0
def RDTSC(self):
return self._u.RDTSC()
def levenshtein(self, s1, s2):
res = self._u.levenshtein( cast( s1, c_void_p ), len( s1 ), cast( s2, c_void_p ), len( s2 ) )
return res, 0
def set_compress_type(self, t):
self.ctype = t
self._u.set_compress_type(t)
class SIMILARITYPython(SIMILARITYBase):
def __init__(self):
super(SIMILARITYPython, self).__init__()
def set_compress_type(self, t):
self.ctype = t
if self.ctype != ZLIB_COMPRESS and self.ctype != BZ2_COMPRESS:
print "warning: compressor %s is not supported (use zlib default compressor)" % HR_COMPRESSOR[ t ]
self.ctype = ZLIB_COMPRESS
def compress(self, s1):
return len(self._compress(s1))
def _compress(self, s1):
if self.ctype == ZLIB_COMPRESS:
return zlib.compress( s1, self.level )
elif self.ctype == BZ2_COMPRESS:
return bz2.compress( s1, self.level )
def _sim(self, s1, s2, func):
end, ret = self.get_in_rcaches( s1, s2 )
if end != -1:
return end, ret
corig = self.get_in_caches(s1)
ccmp = self.get_in_caches(s2)
res, corig, ccmp, ret = func( s1, s2, corig, ccmp )
self.add_in_caches(s1, corig)
self.add_in_caches(s2, ccmp)
self.add_in_rcaches(s1+s2, res, ret)
return res, ret
def _ncd(self, s1, s2, s1size=0, s2size=0):
if s1size == 0:
s1size = self.compress(s1)
if s2size == 0:
s2size = self.compress(s2)
s3size = self.compress(s1+s2)
smax = max(s1size, s2size)
smin = min(s1size, s2size)
res = (abs(s3size - smin)) / float(smax)
if res > 1.0:
res = 1.0
return res, s1size, s2size, 0
def ncd(self, s1, s2):
return self._sim( s1, s2, self._ncd )
def ncs(self, s1, s2):
return self._sim( s1, s2, self._u.ncs )
def entropy(self, s1):
end, ret = self.get_in_ecaches( s1 )
if end != -1:
return end, ret
res = entropy( s1 )
self.add_in_ecaches( s1, res, 0 )
return res, 0
def levenshtein(self, a, b):
"Calculates the Levenshtein distance between a and b."
n, m = len(a), len(b)
if n > m:
# Make sure n <= m, to use O(min(n,m)) space
a,b = b,a
n,m = m,n
current = range(n+1)
for i in range(1,m+1):
previous, current = current, [i]+[0]*n
for j in range(1,n+1):
add, delete = previous[j]+1, current[j-1]+1
change = previous[j-1]
if a[j-1] != b[i-1]:
change = change + 1
current[j] = min(add, delete, change)
return current[n]
class SIMILARITY(object):
def __init__(self, path="./libsimilarity/libsimilarity.so", native_lib=True):
if native_lib == True and NATIVE_LIB == True:
try:
self.s = SIMILARITYNative( path )
except:
self.s = SIMILARITYPython()
else:
self.s = SIMILARITYPython()
def raz(self):
return self.s.raz()
def set_level(self, level):
return self.s.set_level(level)
def compress(self, s1):
return self.s.compress(s1)
def ncd(self, s1, s2):
return self.s.ncd(s1, s2)
def ncs(self, s1, s2):
return self.s.ncs(s1, s2)
def cmid(self, s1, s2):
return self.s.cmid(s1, s2)
def kolmogorov(self, s1):
return self.s.kolmogorov(s1)
def bennett(self, s1):
return self.s.bennett(s1)
def entropy(self, s1):
return self.s.entropy(s1)
def RDTSC(self):
return self.s.RDTSC()
def levenshtein(self, s1, s2):
return self.s.levenshtein(s1, s2)
def set_compress_type(self, t):
return self.s.set_compress_type(t)
def show(self):
self.s.show()
class DBFormat(object):
def __init__(self, filename):
self.filename = filename
self.D = {}
fd = None
try:
with open(self.filename, "r+") as fd:
self.D = json.load( fd )
except IOError:
print "Impossible to open filename: " + filename
self.D = {}
self.H = {}
self.N = {}
for i in self.D:
self.H[i] = {}
for j in self.D[i]:
if j == "NAME":
self.N[ i ] = re.compile( self.D[i][j] )
continue
self.H[i][j] = {}
for k in self.D[i][j]:
if isinstance(self.D[i][j][k], dict):
self.H[i][j][k] = set()
for e in self.D[i][j][k].keys():
self.H[i][j][k].add( long(e) )
def add_name(self, name, value):
if name not in self.D:
self.D[ name ] = {}
self.D[ name ]["NAME"] = value
def add_element(self, name, sname, sclass, size, elem):
try:
if elem not in self.D[ name ][ sname ][ sclass ]:
self.D[ name ][ sname ][ sclass ][ elem ] = size
self.D[ name ][ sname ][ "SIZE" ] += size
except KeyError:
if name not in self.D:
self.D[ name ] = {}
self.D[ name ][ sname ] = {}
self.D[ name ][ sname ][ "SIZE" ] = 0
self.D[ name ][ sname ][ sclass ] = {}
elif sname not in self.D[ name ]:
self.D[ name ][ sname ] = {}
self.D[ name ][ sname ][ "SIZE" ] = 0
self.D[ name ][ sname ][ sclass ] = {}
elif sclass not in self.D[ name ][ sname ]:
self.D[ name ][ sname ][ sclass ] = {}
self.D[ name ][ sname ][ "SIZE" ] += size
self.D[ name ][ sname ][ sclass ][ elem ] = size
def is_present(self, elem):
for i in self.D:
if elem in self.D[i]:
return True, i
return False, None
def elems_are_presents(self, elems):
ret = {}
info = {}
for i in self.H:
ret[i] = {}
info[i] = {}
for j in self.H[i]:
ret[i][j] = {}
info[i][j] = {}
for k in self.H[i][j]:
val = [self.H[i][j][k].intersection(elems), len(self.H[i][j][k]), 0, 0]
size = 0
for z in val[0]:
size += self.D[i][j][k][str(z)]
val[2] = (float(len(val[0]))/(val[1])) * 100
val[3] = size
if val[3] != 0:
ret[i][j][k] = val
info[i][j][ "SIZE" ] = self.D[i][j]["SIZE"]
return ret, info
def classes_are_presents(self, classes):
m = set()
for j in classes:
for i in self.N:
if self.N[i].search(j) != None:
m.add( i )
return m
def show(self):
for i in self.D:
print i, ":"
for j in self.D[i]:
print "\t", j, len(self.D[i][j])
for k in self.D[i][j]:
print "\t\t", k, len(self.D[i][j][k])
def save(self):
with open(self.filename, "w") as fd:
json.dump(self.D, fd)
| apache-2.0 |
lihui7115/ChromiumGStreamerBackend | chrome/tools/inconsistent-eol.py | 185 | 4330 | #!/usr/bin/env python
# Copyright (c) 2011 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Find and fix files with inconsistent line endings.
This script requires 'dos2unix.exe' and 'unix2dos.exe' from Cygwin; they
must be in the user's PATH.
Arg: Either one or more files to examine, or (with --file-list) one or more
files that themselves contain lists of files. The argument(s) passed to
this script, as well as the paths in the file if any, may be relative or
absolute Windows-style paths (with either type of slash). The list might
be generated with 'find -type f' or extracted from a gcl change listing,
for example.
"""
import errno
import logging
import optparse
import subprocess
import sys
# Whether to produce excessive debugging output for each file in the list.
DEBUGGING = False
class Error(Exception):
"""Local exception class."""
pass
def CountChars(text, str):
"""Count the number of instances of the given string in the text."""
split = text.split(str)
logging.debug(len(split) - 1)
return len(split) - 1
def PrevailingEOLName(crlf, cr, lf):
"""Describe the most common line ending.
Args:
crlf: How many CRLF (\r\n) sequences are in the file.
cr: How many CR (\r) characters are in the file, excluding CRLF sequences.
lf: How many LF (\n) characters are in the file, excluding CRLF sequences.
Returns:
A string describing the most common of the three line endings.
"""
most = max(crlf, cr, lf)
if most == cr:
return 'cr'
if most == crlf:
return 'crlf'
return 'lf'
def FixEndings(file, crlf, cr, lf):
"""Change the file's line endings to CRLF or LF, whichever is more common."""
most = max(crlf, cr, lf)
if most == crlf:
result = subprocess.call('unix2dos.exe %s' % file, shell=True)
if result:
raise Error('Error running unix2dos.exe %s' % file)
else:
result = subprocess.call('dos2unix.exe %s' % file, shell=True)
if result:
raise Error('Error running dos2unix.exe %s' % file)
def ProcessFiles(filelist):
"""Fix line endings in each file in the filelist list."""
for filename in filelist:
filename = filename.strip()
logging.debug(filename)
try:
# Open in binary mode to preserve existing line endings.
text = open(filename, 'rb').read()
except IOError, e:
if e.errno != errno.ENOENT:
raise
logging.warning('File %s not found.' % filename)
continue
crlf = CountChars(text, '\r\n')
cr = CountChars(text, '\r') - crlf
lf = CountChars(text, '\n') - crlf
if options.force_lf:
if crlf > 0 or cr > 0:
print '%s: forcing to LF' % filename
# Fudge the counts to force switching to LF.
FixEndings(filename, 0, 0, 1)
else:
if ((crlf > 0 and cr > 0) or
(crlf > 0 and lf > 0) or
( lf > 0 and cr > 0)):
print '%s: mostly %s' % (filename, PrevailingEOLName(crlf, cr, lf))
FixEndings(filename, crlf, cr, lf)
def process(options, args):
"""Process the files."""
if not args or len(args) < 1:
raise Error('No files given.')
if options.file_list:
for arg in args:
filelist = open(arg, 'r').readlines()
ProcessFiles(filelist)
else:
filelist = args
ProcessFiles(filelist)
return 0
def main():
if DEBUGGING:
debug_level = logging.DEBUG
else:
debug_level = logging.INFO
logging.basicConfig(level=debug_level,
format='%(asctime)s %(levelname)-7s: %(message)s',
datefmt='%H:%M:%S')
option_parser = optparse.OptionParser()
option_parser.add_option("", "--file-list", action="store_true",
default=False,
help="Treat the arguments as files containing "
"lists of files to examine, rather than as "
"the files to be checked.")
option_parser.add_option("", "--force-lf", action="store_true",
default=False,
help="Force any files with CRLF to LF instead.")
options, args = option_parser.parse_args()
return process(options, args)
if '__main__' == __name__:
sys.exit(main())
| bsd-3-clause |
signed/intellij-community | python/lib/Lib/site-packages/django/contrib/gis/tests/geo3d/models.py | 404 | 1835 | from django.contrib.gis.db import models
class City3D(models.Model):
name = models.CharField(max_length=30)
point = models.PointField(dim=3)
objects = models.GeoManager()
def __unicode__(self):
return self.name
class Interstate2D(models.Model):
name = models.CharField(max_length=30)
line = models.LineStringField(srid=4269)
objects = models.GeoManager()
def __unicode__(self):
return self.name
class Interstate3D(models.Model):
name = models.CharField(max_length=30)
line = models.LineStringField(dim=3, srid=4269)
objects = models.GeoManager()
def __unicode__(self):
return self.name
class InterstateProj2D(models.Model):
name = models.CharField(max_length=30)
line = models.LineStringField(srid=32140)
objects = models.GeoManager()
def __unicode__(self):
return self.name
class InterstateProj3D(models.Model):
name = models.CharField(max_length=30)
line = models.LineStringField(dim=3, srid=32140)
objects = models.GeoManager()
def __unicode__(self):
return self.name
class Polygon2D(models.Model):
name = models.CharField(max_length=30)
poly = models.PolygonField(srid=32140)
objects = models.GeoManager()
def __unicode__(self):
return self.name
class Polygon3D(models.Model):
name = models.CharField(max_length=30)
poly = models.PolygonField(dim=3, srid=32140)
objects = models.GeoManager()
def __unicode__(self):
return self.name
class Point2D(models.Model):
point = models.PointField()
objects = models.GeoManager()
class Point3D(models.Model):
point = models.PointField(dim=3)
objects = models.GeoManager()
class MultiPoint3D(models.Model):
mpoint = models.MultiPointField(dim=3)
objects = models.GeoManager()
| apache-2.0 |
TrossSoftwareAndTech/webvt | lib/node-v7.2.0/deps/v8_inspector/third_party/jinja2/jinja2/loaders.py | 333 | 17380 | # -*- coding: utf-8 -*-
"""
jinja2.loaders
~~~~~~~~~~~~~~
Jinja loader classes.
:copyright: (c) 2010 by the Jinja Team.
:license: BSD, see LICENSE for more details.
"""
import os
import sys
import weakref
from types import ModuleType
from os import path
from hashlib import sha1
from jinja2.exceptions import TemplateNotFound
from jinja2.utils import open_if_exists, internalcode
from jinja2._compat import string_types, iteritems
def split_template_path(template):
"""Split a path into segments and perform a sanity check. If it detects
'..' in the path it will raise a `TemplateNotFound` error.
"""
pieces = []
for piece in template.split('/'):
if path.sep in piece \
or (path.altsep and path.altsep in piece) or \
piece == path.pardir:
raise TemplateNotFound(template)
elif piece and piece != '.':
pieces.append(piece)
return pieces
class BaseLoader(object):
"""Baseclass for all loaders. Subclass this and override `get_source` to
implement a custom loading mechanism. The environment provides a
`get_template` method that calls the loader's `load` method to get the
:class:`Template` object.
A very basic example for a loader that looks up templates on the file
system could look like this::
from jinja2 import BaseLoader, TemplateNotFound
from os.path import join, exists, getmtime
class MyLoader(BaseLoader):
def __init__(self, path):
self.path = path
def get_source(self, environment, template):
path = join(self.path, template)
if not exists(path):
raise TemplateNotFound(template)
mtime = getmtime(path)
with file(path) as f:
source = f.read().decode('utf-8')
return source, path, lambda: mtime == getmtime(path)
"""
#: if set to `False` it indicates that the loader cannot provide access
#: to the source of templates.
#:
#: .. versionadded:: 2.4
has_source_access = True
def get_source(self, environment, template):
"""Get the template source, filename and reload helper for a template.
It's passed the environment and template name and has to return a
tuple in the form ``(source, filename, uptodate)`` or raise a
`TemplateNotFound` error if it can't locate the template.
The source part of the returned tuple must be the source of the
template as unicode string or a ASCII bytestring. The filename should
be the name of the file on the filesystem if it was loaded from there,
otherwise `None`. The filename is used by python for the tracebacks
if no loader extension is used.
The last item in the tuple is the `uptodate` function. If auto
reloading is enabled it's always called to check if the template
changed. No arguments are passed so the function must store the
old state somewhere (for example in a closure). If it returns `False`
the template will be reloaded.
"""
if not self.has_source_access:
raise RuntimeError('%s cannot provide access to the source' %
self.__class__.__name__)
raise TemplateNotFound(template)
def list_templates(self):
"""Iterates over all templates. If the loader does not support that
it should raise a :exc:`TypeError` which is the default behavior.
"""
raise TypeError('this loader cannot iterate over all templates')
@internalcode
def load(self, environment, name, globals=None):
"""Loads a template. This method looks up the template in the cache
or loads one by calling :meth:`get_source`. Subclasses should not
override this method as loaders working on collections of other
loaders (such as :class:`PrefixLoader` or :class:`ChoiceLoader`)
will not call this method but `get_source` directly.
"""
code = None
if globals is None:
globals = {}
# first we try to get the source for this template together
# with the filename and the uptodate function.
source, filename, uptodate = self.get_source(environment, name)
# try to load the code from the bytecode cache if there is a
# bytecode cache configured.
bcc = environment.bytecode_cache
if bcc is not None:
bucket = bcc.get_bucket(environment, name, filename, source)
code = bucket.code
# if we don't have code so far (not cached, no longer up to
# date) etc. we compile the template
if code is None:
code = environment.compile(source, name, filename)
# if the bytecode cache is available and the bucket doesn't
# have a code so far, we give the bucket the new code and put
# it back to the bytecode cache.
if bcc is not None and bucket.code is None:
bucket.code = code
bcc.set_bucket(bucket)
return environment.template_class.from_code(environment, code,
globals, uptodate)
class FileSystemLoader(BaseLoader):
"""Loads templates from the file system. This loader can find templates
in folders on the file system and is the preferred way to load them.
The loader takes the path to the templates as string, or if multiple
locations are wanted a list of them which is then looked up in the
given order::
>>> loader = FileSystemLoader('/path/to/templates')
>>> loader = FileSystemLoader(['/path/to/templates', '/other/path'])
Per default the template encoding is ``'utf-8'`` which can be changed
by setting the `encoding` parameter to something else.
To follow symbolic links, set the *followlinks* parameter to ``True``::
>>> loader = FileSystemLoader('/path/to/templates', followlinks=True)
.. versionchanged:: 2.8+
The *followlinks* parameter was added.
"""
def __init__(self, searchpath, encoding='utf-8', followlinks=False):
if isinstance(searchpath, string_types):
searchpath = [searchpath]
self.searchpath = list(searchpath)
self.encoding = encoding
self.followlinks = followlinks
def get_source(self, environment, template):
pieces = split_template_path(template)
for searchpath in self.searchpath:
filename = path.join(searchpath, *pieces)
f = open_if_exists(filename)
if f is None:
continue
try:
contents = f.read().decode(self.encoding)
finally:
f.close()
mtime = path.getmtime(filename)
def uptodate():
try:
return path.getmtime(filename) == mtime
except OSError:
return False
return contents, filename, uptodate
raise TemplateNotFound(template)
def list_templates(self):
found = set()
for searchpath in self.searchpath:
walk_dir = os.walk(searchpath, followlinks=self.followlinks)
for dirpath, dirnames, filenames in walk_dir:
for filename in filenames:
template = os.path.join(dirpath, filename) \
[len(searchpath):].strip(os.path.sep) \
.replace(os.path.sep, '/')
if template[:2] == './':
template = template[2:]
if template not in found:
found.add(template)
return sorted(found)
class PackageLoader(BaseLoader):
"""Load templates from python eggs or packages. It is constructed with
the name of the python package and the path to the templates in that
package::
loader = PackageLoader('mypackage', 'views')
If the package path is not given, ``'templates'`` is assumed.
Per default the template encoding is ``'utf-8'`` which can be changed
by setting the `encoding` parameter to something else. Due to the nature
of eggs it's only possible to reload templates if the package was loaded
from the file system and not a zip file.
"""
def __init__(self, package_name, package_path='templates',
encoding='utf-8'):
from pkg_resources import DefaultProvider, ResourceManager, \
get_provider
provider = get_provider(package_name)
self.encoding = encoding
self.manager = ResourceManager()
self.filesystem_bound = isinstance(provider, DefaultProvider)
self.provider = provider
self.package_path = package_path
def get_source(self, environment, template):
pieces = split_template_path(template)
p = '/'.join((self.package_path,) + tuple(pieces))
if not self.provider.has_resource(p):
raise TemplateNotFound(template)
filename = uptodate = None
if self.filesystem_bound:
filename = self.provider.get_resource_filename(self.manager, p)
mtime = path.getmtime(filename)
def uptodate():
try:
return path.getmtime(filename) == mtime
except OSError:
return False
source = self.provider.get_resource_string(self.manager, p)
return source.decode(self.encoding), filename, uptodate
def list_templates(self):
path = self.package_path
if path[:2] == './':
path = path[2:]
elif path == '.':
path = ''
offset = len(path)
results = []
def _walk(path):
for filename in self.provider.resource_listdir(path):
fullname = path + '/' + filename
if self.provider.resource_isdir(fullname):
_walk(fullname)
else:
results.append(fullname[offset:].lstrip('/'))
_walk(path)
results.sort()
return results
class DictLoader(BaseLoader):
"""Loads a template from a python dict. It's passed a dict of unicode
strings bound to template names. This loader is useful for unittesting:
>>> loader = DictLoader({'index.html': 'source here'})
Because auto reloading is rarely useful this is disabled per default.
"""
def __init__(self, mapping):
self.mapping = mapping
def get_source(self, environment, template):
if template in self.mapping:
source = self.mapping[template]
return source, None, lambda: source == self.mapping.get(template)
raise TemplateNotFound(template)
def list_templates(self):
return sorted(self.mapping)
class FunctionLoader(BaseLoader):
"""A loader that is passed a function which does the loading. The
function receives the name of the template and has to return either
an unicode string with the template source, a tuple in the form ``(source,
filename, uptodatefunc)`` or `None` if the template does not exist.
>>> def load_template(name):
... if name == 'index.html':
... return '...'
...
>>> loader = FunctionLoader(load_template)
The `uptodatefunc` is a function that is called if autoreload is enabled
and has to return `True` if the template is still up to date. For more
details have a look at :meth:`BaseLoader.get_source` which has the same
return value.
"""
def __init__(self, load_func):
self.load_func = load_func
def get_source(self, environment, template):
rv = self.load_func(template)
if rv is None:
raise TemplateNotFound(template)
elif isinstance(rv, string_types):
return rv, None, None
return rv
class PrefixLoader(BaseLoader):
"""A loader that is passed a dict of loaders where each loader is bound
to a prefix. The prefix is delimited from the template by a slash per
default, which can be changed by setting the `delimiter` argument to
something else::
loader = PrefixLoader({
'app1': PackageLoader('mypackage.app1'),
'app2': PackageLoader('mypackage.app2')
})
By loading ``'app1/index.html'`` the file from the app1 package is loaded,
by loading ``'app2/index.html'`` the file from the second.
"""
def __init__(self, mapping, delimiter='/'):
self.mapping = mapping
self.delimiter = delimiter
def get_loader(self, template):
try:
prefix, name = template.split(self.delimiter, 1)
loader = self.mapping[prefix]
except (ValueError, KeyError):
raise TemplateNotFound(template)
return loader, name
def get_source(self, environment, template):
loader, name = self.get_loader(template)
try:
return loader.get_source(environment, name)
except TemplateNotFound:
# re-raise the exception with the correct fileame here.
# (the one that includes the prefix)
raise TemplateNotFound(template)
@internalcode
def load(self, environment, name, globals=None):
loader, local_name = self.get_loader(name)
try:
return loader.load(environment, local_name, globals)
except TemplateNotFound:
# re-raise the exception with the correct fileame here.
# (the one that includes the prefix)
raise TemplateNotFound(name)
def list_templates(self):
result = []
for prefix, loader in iteritems(self.mapping):
for template in loader.list_templates():
result.append(prefix + self.delimiter + template)
return result
class ChoiceLoader(BaseLoader):
"""This loader works like the `PrefixLoader` just that no prefix is
specified. If a template could not be found by one loader the next one
is tried.
>>> loader = ChoiceLoader([
... FileSystemLoader('/path/to/user/templates'),
... FileSystemLoader('/path/to/system/templates')
... ])
This is useful if you want to allow users to override builtin templates
from a different location.
"""
def __init__(self, loaders):
self.loaders = loaders
def get_source(self, environment, template):
for loader in self.loaders:
try:
return loader.get_source(environment, template)
except TemplateNotFound:
pass
raise TemplateNotFound(template)
@internalcode
def load(self, environment, name, globals=None):
for loader in self.loaders:
try:
return loader.load(environment, name, globals)
except TemplateNotFound:
pass
raise TemplateNotFound(name)
def list_templates(self):
found = set()
for loader in self.loaders:
found.update(loader.list_templates())
return sorted(found)
class _TemplateModule(ModuleType):
"""Like a normal module but with support for weak references"""
class ModuleLoader(BaseLoader):
"""This loader loads templates from precompiled templates.
Example usage:
>>> loader = ChoiceLoader([
... ModuleLoader('/path/to/compiled/templates'),
... FileSystemLoader('/path/to/templates')
... ])
Templates can be precompiled with :meth:`Environment.compile_templates`.
"""
has_source_access = False
def __init__(self, path):
package_name = '_jinja2_module_templates_%x' % id(self)
# create a fake module that looks for the templates in the
# path given.
mod = _TemplateModule(package_name)
if isinstance(path, string_types):
path = [path]
else:
path = list(path)
mod.__path__ = path
sys.modules[package_name] = weakref.proxy(mod,
lambda x: sys.modules.pop(package_name, None))
# the only strong reference, the sys.modules entry is weak
# so that the garbage collector can remove it once the
# loader that created it goes out of business.
self.module = mod
self.package_name = package_name
@staticmethod
def get_template_key(name):
return 'tmpl_' + sha1(name.encode('utf-8')).hexdigest()
@staticmethod
def get_module_filename(name):
return ModuleLoader.get_template_key(name) + '.py'
@internalcode
def load(self, environment, name, globals=None):
key = self.get_template_key(name)
module = '%s.%s' % (self.package_name, key)
mod = getattr(self.module, module, None)
if mod is None:
try:
mod = __import__(module, None, None, ['root'])
except ImportError:
raise TemplateNotFound(name)
# remove the entry from sys.modules, we only want the attribute
# on the module object we have stored on the loader.
sys.modules.pop(module, None)
return environment.template_class.from_module_dict(
environment, mod.__dict__, globals)
| gpl-3.0 |
ikcam/django-skeleton | core/templatetags/djaneiro.py | 1 | 11728 | from django import template
from django.apps import apps
from django.conf import settings
from django.contrib.contenttypes.models import ContentType
from django.core.validators import EMPTY_VALUES
from django.urls import reverse_lazy
from django.utils.html import mark_safe
from django.utils.translation import ugettext_lazy as _
from boilerplate.templatetags.boilerplate import url_replace
from core.constants import ACTIONS
from core.models import Company
register = template.Library()
def render(tag, content=None, **kwargs):
attrs = ''
for attr in kwargs:
attrs += ' {attr}="{value}"'.format(attr=attr, value=kwargs[attr])
response = '<{tag} {attrs}'
response += '>{content}</{tag}>' if content is not None else ' />'
return response.format(attrs=attrs, content=content, tag=tag)
def build_breadcrumb_item(**kwargs):
kwargs.update({'class': 'breadcrumb-item'})
if kwargs.get('href', None):
content = render('a', kwargs.pop('content'), href=kwargs.pop('href'))
else:
content = render('strong', kwargs.pop('content'))
return render('li', content, **kwargs)
def build_icon(icon):
class_ = 'glyphicon glyphicon-{}'.format(icon)
return render('span', '', **{'class': class_})
def build_button(**kwargs):
action_details = ACTIONS[kwargs['action']]
href = build_url(**kwargs)
icon = build_icon(action_details['icon'])
title = action_details['title']
class_ = 'btn btn-%(size)s btn-%(level)s' % dict(
size=kwargs['size'],
level=action_details['level'],
)
content = '{} {}'.format(icon, title)
inline = kwargs.get('inline', False)
index = kwargs.get('index')
total = kwargs.get('total')
if inline:
return render(
'a',
content=content,
**{'class': class_, 'href': href}
)
else:
if index == 0:
button_first = render(
'a',
content=content,
**{
'class': class_,
'href': href,
}
)
button_second = render(
'button',
content=render('span', ''),
**{
'class': class_ + ' dropdown-toggle',
'data-toggle': 'dropdown',
}
)
if total > 1:
return button_first + button_second
else:
return button_first
else:
return render(
'li', content=render('a', content=content, href=href)
)
def build_url(
app_name, action, object=None, object_list=None, parent_object=None,
**kwargs
):
args = []
url_name = app_name + ':'
if parent_object:
url_name += parent_object.__class__.__name__.lower() + '_'
args.append(parent_object.pk)
if object:
url_name += object.__class__.__name__.lower()
args.append(object.pk)
elif object_list is not None:
url_name += object_list.model.__name__.lower()
url_name += '_' + action
return reverse_lazy(url_name, args=args)
@register.simple_tag(takes_context=True)
def action_buttons(context, **kwargs):
object = (
kwargs.get('object', None) or
context.get('object', None)
)
object_list = (
kwargs.get('object_list', None) or
context.get('object_list', None)
)
parent_object = (
kwargs.get('parent_object', None) or
context.get('parent_object', None)
)
if object is not None and hasattr(object, 'action_list'):
action_list = object.action_list
elif object_list is not None:
action_list = (
kwargs.get('action_list', None) or
context.get('action_list', None)
)
else:
action_list = None
if callable(action_list):
action_list = action_list()
if action_list in EMPTY_VALUES:
return ''
app_name = (
kwargs.get('app_name', None) or
context['request'].resolver_match.app_name
)
inline = True if kwargs.get('inline') else False
index = 0
size = kwargs.get('size', 'size-default')
response = ''
response_items = []
for action in action_list:
if object:
model_app = object._meta.app_label.lower()
model = object.__class__.__name__.lower()
elif object_list is not None:
model_app = object_list.model._meta.app_label.lower()
model = object_list.model.__name__.lower()
action_details = ACTIONS[action] if action else None
permission_name = '{model_app}:{prefix}_{model}'.format(
model_app=model_app,
model=model,
prefix=action_details['permission_prefix'],
)
if not context['user'].has_company_perm(
context['request'].company, permission_name
):
continue
button = build_button(
action=action,
app_name=app_name,
inline=inline,
index=index,
object=object,
object_list=object_list,
parent_object=parent_object,
size=size,
total=len(action_list),
)
if inline or index == 0:
response += button + ' '
else:
response_items.append(button)
index += 1
if inline:
return mark_safe(response)
if len(response_items) == 0:
response_items = ''
else:
response_items = render(
'ul',
content=''.join(response_items),
**{'class': 'dropdown-menu dropdown-menu-right'}
)
return mark_safe(
render(
'div',
content=(response + response_items),
**{'class': 'btn-group'}
)
)
@register.simple_tag(takes_context=True)
def breadcrumb(context, **kwargs):
action = (
kwargs.get('action', None) or
context.get('action', None)
)
action_details = ACTIONS[action] if action else None
object = (
kwargs.get('object', None) or
context.get('object', None)
)
object_list = (
kwargs.get('object_list', None) or
context.get('object_list', None)
)
form = (
kwargs.get('form', None) or
context.get('form', None)
)
parent_object = (
kwargs.get('parent_object', None) or
context.get('parent_object', None)
)
response = list()
# Home
href = settings.LOGIN_REDIRECT_URL
content = _("Home")
response.append(
build_breadcrumb_item(content=content, href=href)
)
# Current app
app_name = (
kwargs.get('app_name', None) or
context['request'].resolver_match.app_name
)
if app_name != 'public':
href = reverse_lazy('{}:index'.format(app_name))
if parent_object:
content = parent_object._meta.app_config.verbose_name
elif object:
content = object._meta.app_config.verbose_name
elif object_list is not None:
content = object_list.model._meta.app_config.verbose_name
elif form and hasattr(form, '_meta'):
content = form._meta.model._meta.app_config.verbose_name
else:
content = apps.get_app_config(app_name).verbose_name
response.append(
build_breadcrumb_item(content=content, href=href)
)
if parent_object:
# Add list
model_name = parent_object.__class__.__name__.lower()
href = reverse_lazy('{}:{}_list'.format(app_name, model_name))
content = parent_object._meta.verbose_name_plural
response.append(
build_breadcrumb_item(content=content, href=href)
)
# Add detail
response.append(
build_breadcrumb_item(
content=parent_object,
href=parent_object.get_absolute_url()
)
)
if object_list is not None:
content = object_list.model._meta.verbose_name_plural
response.append(
build_breadcrumb_item(content=content)
)
elif object:
# Add list
model_name = object.__class__.__name__.lower()
content = object._meta.verbose_name_plural
if not parent_object:
href = reverse_lazy('{}:{}_list'.format(app_name, model_name))
response.append(
build_breadcrumb_item(content=content, href=href)
)
else:
response.append(
build_breadcrumb_item(content=content)
)
if form:
# Add detail
response.append(
build_breadcrumb_item(
content=object,
href=object.get_absolute_url()
)
)
if action:
content = ACTIONS[action]['title']
response.append(
build_breadcrumb_item(content=content)
)
elif action:
response.append(
build_breadcrumb_item(
content=object, href=object.get_absolute_url()
)
)
response.append(
build_breadcrumb_item(content=action_details['title'])
)
else:
response.append(
build_breadcrumb_item(content=object)
)
elif form:
if hasattr(form, '_meta'):
# Add list
model_name = form._meta.model.__name__.lower()
content = form._meta.model._meta.verbose_name_plural
if parent_object:
response.append(
build_breadcrumb_item(content=content)
)
else:
href = reverse_lazy('{}:{}_list'.format(app_name, model_name))
response.append(
build_breadcrumb_item(content=content, href=href)
)
if action:
content = ACTIONS[action]['title']
response.append(
build_breadcrumb_item(content=content)
)
return mark_safe(
render('ol', ''.join(response), **{'class': 'breadcrumb'})
)
@register.simple_tag(takes_context=True)
def sortable_column(context, **kwargs):
request = context['request']
field = kwargs.get('field')
title = kwargs.get('title')
if 'o' in request.GET and request.GET['o'] == field:
href = url_replace(request, 'o', '-{}'.format(field))
icon = render('i', '', **{'class': 'fa fa-sort-asc'})
elif 'o' in request.GET and request.GET['o'] == '-{}'.format(field):
href = url_replace(request, 'o', '{}'.format(field))
icon = render('i', '', **{'class': 'fa fa-sort-desc'})
else:
href = url_replace(request, 'o', '{}'.format(field))
icon = render('i', '', **{'class': 'fa fa-sort'})
return mark_safe(
render(
'a',
'{} {}'.format(_(title), icon),
href='?{}'.format(href)
)
)
@register.filter(name='has_module')
def has_module(company, module):
if not isinstance(company, Company):
return False
return company.has_module(module)
@register.filter(name='module_price')
def module_price(company, module):
if not isinstance(company, Company):
return False
return company.module_price(module)
@register.filter(name='event_url')
def event_url(object):
ct = ContentType.objects.get_for_model(object)
slug = '{}-{}'.format(ct.id, object.id)
return reverse_lazy('public:event_add_object', args=[slug])
| bsd-3-clause |
rajul/Pydev | plugins/org.python.pydev.jython/Lib/base64.py | 229 | 11357 | #! /usr/bin/env python
"""RFC 3548: Base16, Base32, Base64 Data Encodings"""
# Modified 04-Oct-1995 by Jack Jansen to use binascii module
# Modified 30-Dec-2003 by Barry Warsaw to add full RFC 3548 support
import re
import struct
import binascii
__all__ = [
# Legacy interface exports traditional RFC 1521 Base64 encodings
'encode', 'decode', 'encodestring', 'decodestring',
# Generalized interface for other encodings
'b64encode', 'b64decode', 'b32encode', 'b32decode',
'b16encode', 'b16decode',
# Standard Base64 encoding
'standard_b64encode', 'standard_b64decode',
# Some common Base64 alternatives. As referenced by RFC 3458, see thread
# starting at:
#
# http://zgp.org/pipermail/p2p-hackers/2001-September/000316.html
'urlsafe_b64encode', 'urlsafe_b64decode',
]
_translation = [chr(_x) for _x in range(256)]
EMPTYSTRING = ''
def _translate(s, altchars):
translation = _translation[:]
for k, v in altchars.items():
translation[ord(k)] = v
return s.translate(''.join(translation))
# Base64 encoding/decoding uses binascii
def b64encode(s, altchars=None):
"""Encode a string using Base64.
s is the string to encode. Optional altchars must be a string of at least
length 2 (additional characters are ignored) which specifies an
alternative alphabet for the '+' and '/' characters. This allows an
application to e.g. generate url or filesystem safe Base64 strings.
The encoded string is returned.
"""
# Strip off the trailing newline
encoded = binascii.b2a_base64(s)[:-1]
if altchars is not None:
return _translate(encoded, {'+': altchars[0], '/': altchars[1]})
return encoded
def b64decode(s, altchars=None):
"""Decode a Base64 encoded string.
s is the string to decode. Optional altchars must be a string of at least
length 2 (additional characters are ignored) which specifies the
alternative alphabet used instead of the '+' and '/' characters.
The decoded string is returned. A TypeError is raised if s were
incorrectly padded or if there are non-alphabet characters present in the
string.
"""
if altchars is not None:
s = _translate(s, {altchars[0]: '+', altchars[1]: '/'})
try:
return binascii.a2b_base64(s)
except binascii.Error, msg:
# Transform this exception for consistency
raise TypeError(msg)
def standard_b64encode(s):
"""Encode a string using the standard Base64 alphabet.
s is the string to encode. The encoded string is returned.
"""
return b64encode(s)
def standard_b64decode(s):
"""Decode a string encoded with the standard Base64 alphabet.
s is the string to decode. The decoded string is returned. A TypeError
is raised if the string is incorrectly padded or if there are non-alphabet
characters present in the string.
"""
return b64decode(s)
def urlsafe_b64encode(s):
"""Encode a string using a url-safe Base64 alphabet.
s is the string to encode. The encoded string is returned. The alphabet
uses '-' instead of '+' and '_' instead of '/'.
"""
return b64encode(s, '-_')
def urlsafe_b64decode(s):
"""Decode a string encoded with the standard Base64 alphabet.
s is the string to decode. The decoded string is returned. A TypeError
is raised if the string is incorrectly padded or if there are non-alphabet
characters present in the string.
The alphabet uses '-' instead of '+' and '_' instead of '/'.
"""
return b64decode(s, '-_')
# Base32 encoding/decoding must be done in Python
_b32alphabet = {
0: 'A', 9: 'J', 18: 'S', 27: '3',
1: 'B', 10: 'K', 19: 'T', 28: '4',
2: 'C', 11: 'L', 20: 'U', 29: '5',
3: 'D', 12: 'M', 21: 'V', 30: '6',
4: 'E', 13: 'N', 22: 'W', 31: '7',
5: 'F', 14: 'O', 23: 'X',
6: 'G', 15: 'P', 24: 'Y',
7: 'H', 16: 'Q', 25: 'Z',
8: 'I', 17: 'R', 26: '2',
}
_b32tab = _b32alphabet.items()
_b32tab.sort()
_b32tab = [v for k, v in _b32tab]
_b32rev = dict([(v, long(k)) for k, v in _b32alphabet.items()])
def b32encode(s):
"""Encode a string using Base32.
s is the string to encode. The encoded string is returned.
"""
parts = []
quanta, leftover = divmod(len(s), 5)
# Pad the last quantum with zero bits if necessary
if leftover:
s += ('\0' * (5 - leftover))
quanta += 1
for i in range(quanta):
# c1 and c2 are 16 bits wide, c3 is 8 bits wide. The intent of this
# code is to process the 40 bits in units of 5 bits. So we take the 1
# leftover bit of c1 and tack it onto c2. Then we take the 2 leftover
# bits of c2 and tack them onto c3. The shifts and masks are intended
# to give us values of exactly 5 bits in width.
c1, c2, c3 = struct.unpack('!HHB', s[i*5:(i+1)*5])
c2 += (c1 & 1) << 16 # 17 bits wide
c3 += (c2 & 3) << 8 # 10 bits wide
parts.extend([_b32tab[c1 >> 11], # bits 1 - 5
_b32tab[(c1 >> 6) & 0x1f], # bits 6 - 10
_b32tab[(c1 >> 1) & 0x1f], # bits 11 - 15
_b32tab[c2 >> 12], # bits 16 - 20 (1 - 5)
_b32tab[(c2 >> 7) & 0x1f], # bits 21 - 25 (6 - 10)
_b32tab[(c2 >> 2) & 0x1f], # bits 26 - 30 (11 - 15)
_b32tab[c3 >> 5], # bits 31 - 35 (1 - 5)
_b32tab[c3 & 0x1f], # bits 36 - 40 (1 - 5)
])
encoded = EMPTYSTRING.join(parts)
# Adjust for any leftover partial quanta
if leftover == 1:
return encoded[:-6] + '======'
elif leftover == 2:
return encoded[:-4] + '===='
elif leftover == 3:
return encoded[:-3] + '==='
elif leftover == 4:
return encoded[:-1] + '='
return encoded
def b32decode(s, casefold=False, map01=None):
"""Decode a Base32 encoded string.
s is the string to decode. Optional casefold is a flag specifying whether
a lowercase alphabet is acceptable as input. For security purposes, the
default is False.
RFC 3548 allows for optional mapping of the digit 0 (zero) to the letter O
(oh), and for optional mapping of the digit 1 (one) to either the letter I
(eye) or letter L (el). The optional argument map01 when not None,
specifies which letter the digit 1 should be mapped to (when map01 is not
None, the digit 0 is always mapped to the letter O). For security
purposes the default is None, so that 0 and 1 are not allowed in the
input.
The decoded string is returned. A TypeError is raised if s were
incorrectly padded or if there are non-alphabet characters present in the
string.
"""
quanta, leftover = divmod(len(s), 8)
if leftover:
raise TypeError('Incorrect padding')
# Handle section 2.4 zero and one mapping. The flag map01 will be either
# False, or the character to map the digit 1 (one) to. It should be
# either L (el) or I (eye).
if map01:
s = _translate(s, {'0': 'O', '1': map01})
if casefold:
s = s.upper()
# Strip off pad characters from the right. We need to count the pad
# characters because this will tell us how many null bytes to remove from
# the end of the decoded string.
padchars = 0
mo = re.search('(?P<pad>[=]*)$', s)
if mo:
padchars = len(mo.group('pad'))
if padchars > 0:
s = s[:-padchars]
# Now decode the full quanta
parts = []
acc = 0
shift = 35
for c in s:
val = _b32rev.get(c)
if val is None:
raise TypeError('Non-base32 digit found')
acc += _b32rev[c] << shift
shift -= 5
if shift < 0:
parts.append(binascii.unhexlify('%010x' % acc))
acc = 0
shift = 35
# Process the last, partial quanta
last = binascii.unhexlify('%010x' % acc)
if padchars == 0:
last = '' # No characters
elif padchars == 1:
last = last[:-1]
elif padchars == 3:
last = last[:-2]
elif padchars == 4:
last = last[:-3]
elif padchars == 6:
last = last[:-4]
else:
raise TypeError('Incorrect padding')
parts.append(last)
return EMPTYSTRING.join(parts)
# RFC 3548, Base 16 Alphabet specifies uppercase, but hexlify() returns
# lowercase. The RFC also recommends against accepting input case
# insensitively.
def b16encode(s):
"""Encode a string using Base16.
s is the string to encode. The encoded string is returned.
"""
return binascii.hexlify(s).upper()
def b16decode(s, casefold=False):
"""Decode a Base16 encoded string.
s is the string to decode. Optional casefold is a flag specifying whether
a lowercase alphabet is acceptable as input. For security purposes, the
default is False.
The decoded string is returned. A TypeError is raised if s were
incorrectly padded or if there are non-alphabet characters present in the
string.
"""
if casefold:
s = s.upper()
if re.search('[^0-9A-F]', s):
raise TypeError('Non-base16 digit found')
return binascii.unhexlify(s)
# Legacy interface. This code could be cleaned up since I don't believe
# binascii has any line length limitations. It just doesn't seem worth it
# though.
MAXLINESIZE = 76 # Excluding the CRLF
MAXBINSIZE = (MAXLINESIZE//4)*3
def encode(input, output):
"""Encode a file."""
while True:
s = input.read(MAXBINSIZE)
if not s:
break
while len(s) < MAXBINSIZE:
ns = input.read(MAXBINSIZE-len(s))
if not ns:
break
s += ns
line = binascii.b2a_base64(s)
output.write(line)
def decode(input, output):
"""Decode a file."""
while True:
line = input.readline()
if not line:
break
s = binascii.a2b_base64(line)
output.write(s)
def encodestring(s):
"""Encode a string into multiple lines of base-64 data."""
pieces = []
for i in range(0, len(s), MAXBINSIZE):
chunk = s[i : i + MAXBINSIZE]
pieces.append(binascii.b2a_base64(chunk))
return "".join(pieces)
def decodestring(s):
"""Decode a string."""
return binascii.a2b_base64(s)
# Useable as a script...
def test():
"""Small test program"""
import sys, getopt
try:
opts, args = getopt.getopt(sys.argv[1:], 'deut')
except getopt.error, msg:
sys.stdout = sys.stderr
print msg
print """usage: %s [-d|-e|-u|-t] [file|-]
-d, -u: decode
-e: encode (default)
-t: encode and decode string 'Aladdin:open sesame'"""%sys.argv[0]
sys.exit(2)
func = encode
for o, a in opts:
if o == '-e': func = encode
if o == '-d': func = decode
if o == '-u': func = decode
if o == '-t': test1(); return
if args and args[0] != '-':
with open(args[0], 'rb') as f:
func(f, sys.stdout)
else:
func(sys.stdin, sys.stdout)
def test1():
s0 = "Aladdin:open sesame"
s1 = encodestring(s0)
s2 = decodestring(s1)
print s0, repr(s1), s2
if __name__ == '__main__':
test()
| epl-1.0 |
sathieu/samba | python/samba/sites.py | 1 | 4049 | # python site manipulation code
# Copyright Matthieu Patou <mat@matws.net> 2011
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
"""Manipulating sites."""
import ldb
from ldb import FLAG_MOD_ADD, LdbError
class SiteException(Exception):
"""Base element for Sites errors"""
def __init__(self, value):
self.value = value
def __str__(self):
return "%s: %s" % (self.__class__.__name__, self.value)
class SiteNotFoundException(SiteException):
"""Raised when the site is not found and it's expected to exists."""
class SiteAlreadyExistsException(SiteException):
"""Raised when the site is not found and it's expected not to exists."""
class SiteServerNotEmptyException(SiteException):
"""Raised when the site still has servers attached."""
def create_site(samdb, configDn, siteName):
"""
Create a site
:param samdb: A samdb connection
:param configDn: The DN of the configuration partition
:param siteName: Name of the site to create
:return: True upon success
:raise SiteAlreadyExists: if the site to be created already exists.
"""
ret = samdb.search(base=configDn, scope=ldb.SCOPE_SUBTREE,
expression='(&(objectclass=Site)(cn=%s))' % siteName)
if len(ret) != 0:
raise SiteAlreadyExistsException('A site with the name %s already exists' % siteName)
m = ldb.Message()
m.dn = ldb.Dn(samdb, "Cn=%s,CN=Sites,%s" % (siteName, str(configDn)))
m["objectclass"] = ldb.MessageElement("site", FLAG_MOD_ADD, "objectclass")
samdb.add(m)
m2 = ldb.Message()
m2.dn = ldb.Dn(samdb, "Cn=NTDS Site Settings,%s" % str(m.dn))
m2["objectclass"] = ldb.MessageElement("nTDSSiteSettings", FLAG_MOD_ADD, "objectclass")
samdb.add(m2)
m3 = ldb.Message()
m3.dn = ldb.Dn(samdb, "Cn=Servers,%s" % str(m.dn))
m3["objectclass"] = ldb.MessageElement("serversContainer", FLAG_MOD_ADD, "objectclass")
samdb.add(m3)
return True
def delete_site(samdb, configDn, siteName):
"""
Delete a site
:param samdb: A samdb connection
:param configDn: The DN of the configuration partition
:param siteName: Name of the site to delete
:return: True upon success
:raise SiteNotFoundException: if the site to be deleted do not exists.
:raise SiteServerNotEmpty: if the site has still servers in it.
"""
dnsite = ldb.Dn(samdb, "CN=Sites")
if dnsite.add_base(configDn) == False:
raise SiteException("dnsites.add_base() failed")
if dnsite.add_child("CN=X") == False:
raise SiteException("dnsites.add_child() failed")
dnsite.set_component(0, "CN", siteName)
dnservers = ldb.Dn(samdb, "CN=Servers")
dnservers.add_base(dnsite)
try:
ret = samdb.search(base=dnsite, scope=ldb.SCOPE_BASE,
expression="objectClass=site")
if len(ret) != 1:
raise SiteNotFoundException('Site %s does not exist' % siteName)
except LdbError as e:
(enum, estr) = e.args
if enum == ldb.ERR_NO_SUCH_OBJECT:
raise SiteNotFoundException('Site %s does not exist' % siteName)
ret = samdb.search(base=dnservers, scope=ldb.SCOPE_ONELEVEL,
expression='(objectclass=server)')
if len(ret) != 0:
raise SiteServerNotEmptyException('Site %s still has servers in it, move them before removal' % siteName)
samdb.delete(dnsite, ["tree_delete:0"])
return True
| gpl-3.0 |
google-research/google-research | lista_design_space/lista_models.py | 1 | 3858 | # coding=utf-8
# Copyright 2021 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Model definition for LISTA."""
import numpy as np
import tensorflow as tf
def shrink(data, theta):
theta = tf.nn.relu(theta)
return tf.sign(data) * tf.nn.relu(tf.abs(data) - theta)
def shrink_free(data, theta):
return tf.sign(data) * tf.nn.relu(tf.abs(data) - theta)
class ListaCell(tf.keras.layers.Layer):
"""Lista cell."""
def __init__(self, layer_id, learnable_matrix_x, learnable_matrix_b, theta,
input_indices, dict_shape, neuron='soft', name=None):
super(ListaCell, self).__init__(name=name)
self.layer_id = layer_id
self.learnable_matrix_b = learnable_matrix_b
self.theta = tf.Variable(theta, trainable=True, name=name + '/theta')
self.input_indices = input_indices
if layer_id != 0:
self.learnable_matrix_x = learnable_matrix_x
self.coefficient = tf.Variable(
tf.ones((len(input_indices) + 1,)),
trainable=True,
name=name + '/coefficient')
self.dict_shape = dict_shape
self.neuron = neuron
def call(self, inputs):
side_connection = tf.matmul(inputs[:, :self.dict_shape[0]],
self.learnable_matrix_b)
if self.layer_id == 0:
output = side_connection
else:
new_inputs = [inputs[:, -self.dict_shape[1]:]]
for idx in self.input_indices:
new_inputs.append(inputs[:, self.dict_shape[0] +
idx * self.dict_shape[1]:self.dict_shape[0] +
(idx + 1) * self.dict_shape[1]])
new_inputs = tf.stack(new_inputs, axis=-1) * self.coefficient
new_inputs = tf.reduce_mean(new_inputs, axis=-1)
output = tf.matmul(new_inputs, self.learnable_matrix_x) + side_connection
if self.neuron == 'soft':
output = shrink_free(output, self.theta)
return tf.concat([inputs, output], 1)
class Lista(tf.keras.Sequential):
"""Lista model."""
def __init__(self, dictionary, lam, arch_str, num_layers=16, name='Lista'):
super(Lista, self).__init__(name=name)
self.dictionary = dictionary.astype(np.float32)
self.scale = 1.001 * np.linalg.norm(dictionary, ord=2) ** 2
self.theta = (lam / self.scale).astype(np.float32)
matrix_b = np.transpose(dictionary) / self.scale
matrix_x = np.eye(dictionary.shape[1]) - np.matmul(matrix_b, dictionary)
self.learnable_matrix_b = tf.Variable(
np.transpose(matrix_b.astype(np.float32)), trainable=True, name='W_b')
self.learnable_matrix_x = tf.Variable(
np.transpose(matrix_x.astype(np.float32)), trainable=True, name='W_x')
if not arch_str:
arch = [0] * num_layers
else:
arch = [int(s) for s in arch_str.split('_')]
for i in range(num_layers):
self.create_cell(arch[i], i)
def create_cell(self, arch, layer_id):
indices = []
i = 2
while arch != 0:
if arch % 2 != 0:
indices.append(layer_id - i)
arch //= 2
i += 1
cell = ListaCell(
layer_id=layer_id,
learnable_matrix_x=self.learnable_matrix_x,
learnable_matrix_b=self.learnable_matrix_b,
theta=self.theta,
input_indices=indices,
dict_shape=self.dictionary.shape,
name='Lista_layer_{}'.format(layer_id + 1))
self.add(cell)
| apache-2.0 |
glovebx/odoo | addons/auth_oauth/controllers/main.py | 55 | 8005 | import functools
import logging
import simplejson
import urlparse
import werkzeug.utils
from werkzeug.exceptions import BadRequest
import openerp
from openerp import SUPERUSER_ID
from openerp import http
from openerp.http import request
from openerp.addons.web.controllers.main import db_monodb, ensure_db, set_cookie_and_redirect, login_and_redirect
from openerp.addons.auth_signup.controllers.main import AuthSignupHome as Home
from openerp.modules.registry import RegistryManager
from openerp.tools.translate import _
_logger = logging.getLogger(__name__)
#----------------------------------------------------------
# helpers
#----------------------------------------------------------
def fragment_to_query_string(func):
@functools.wraps(func)
def wrapper(self, *a, **kw):
kw.pop('debug', False)
if not kw:
return """<html><head><script>
var l = window.location;
var q = l.hash.substring(1);
var r = l.pathname + l.search;
if(q.length !== 0) {
var s = l.search ? (l.search === '?' ? '' : '&') : '?';
r = l.pathname + l.search + s + q;
}
if (r == l.pathname) {
r = '/';
}
window.location = r;
</script></head><body></body></html>"""
return func(self, *a, **kw)
return wrapper
#----------------------------------------------------------
# Controller
#----------------------------------------------------------
class OAuthLogin(Home):
def list_providers(self):
try:
provider_obj = request.registry.get('auth.oauth.provider')
providers = provider_obj.search_read(request.cr, SUPERUSER_ID, [('enabled', '=', True), ('auth_endpoint', '!=', False), ('validation_endpoint', '!=', False)])
# TODO in forwardport: remove conditions on 'auth_endpoint' and 'validation_endpoint' when these fields will be 'required' in model
except Exception:
providers = []
for provider in providers:
return_url = request.httprequest.url_root + 'auth_oauth/signin'
state = self.get_state(provider)
params = dict(
response_type='token',
client_id=provider['client_id'],
redirect_uri=return_url,
scope=provider['scope'],
state=simplejson.dumps(state),
)
provider['auth_link'] = provider['auth_endpoint'] + '?' + werkzeug.url_encode(params)
return providers
def get_state(self, provider):
redirect = request.params.get('redirect') or 'web'
if not redirect.startswith(('//', 'http://', 'https://')):
redirect = '%s%s' % (request.httprequest.url_root, redirect[1:] if redirect[0] == '/' else redirect)
state = dict(
d=request.session.db,
p=provider['id'],
r=werkzeug.url_quote_plus(redirect),
)
token = request.params.get('token')
if token:
state['t'] = token
return state
@http.route()
def web_login(self, *args, **kw):
ensure_db()
if request.httprequest.method == 'GET' and request.session.uid and request.params.get('redirect'):
# Redirect if already logged in and redirect param is present
return http.redirect_with_hash(request.params.get('redirect'))
providers = self.list_providers()
response = super(OAuthLogin, self).web_login(*args, **kw)
if response.is_qweb:
error = request.params.get('oauth_error')
if error == '1':
error = _("Sign up is not allowed on this database.")
elif error == '2':
error = _("Access Denied")
elif error == '3':
error = _("You do not have access to this database or your invitation has expired. Please ask for an invitation and be sure to follow the link in your invitation email.")
else:
error = None
response.qcontext['providers'] = providers
if error:
response.qcontext['error'] = error
return response
@http.route()
def web_auth_signup(self, *args, **kw):
providers = self.list_providers()
if len(providers) == 1:
werkzeug.exceptions.abort(werkzeug.utils.redirect(providers[0]['auth_link'], 303))
response = super(OAuthLogin, self).web_auth_signup(*args, **kw)
response.qcontext.update(providers=providers)
return response
@http.route()
def web_auth_reset_password(self, *args, **kw):
providers = self.list_providers()
if len(providers) == 1:
werkzeug.exceptions.abort(werkzeug.utils.redirect(providers[0]['auth_link'], 303))
response = super(OAuthLogin, self).web_auth_reset_password(*args, **kw)
response.qcontext.update(providers=providers)
return response
class OAuthController(http.Controller):
@http.route('/auth_oauth/signin', type='http', auth='none')
@fragment_to_query_string
def signin(self, **kw):
state = simplejson.loads(kw['state'])
dbname = state['d']
provider = state['p']
context = state.get('c', {})
registry = RegistryManager.get(dbname)
with registry.cursor() as cr:
try:
u = registry.get('res.users')
credentials = u.auth_oauth(cr, SUPERUSER_ID, provider, kw, context=context)
cr.commit()
action = state.get('a')
menu = state.get('m')
redirect = werkzeug.url_unquote_plus(state['r']) if state.get('r') else False
url = '/web'
if redirect:
url = redirect
elif action:
url = '/web#action=%s' % action
elif menu:
url = '/web#menu_id=%s' % menu
return login_and_redirect(*credentials, redirect_url=url)
except AttributeError:
# auth_signup is not installed
_logger.error("auth_signup not installed on database %s: oauth sign up cancelled." % (dbname,))
url = "/web/login?oauth_error=1"
except openerp.exceptions.AccessDenied:
# oauth credentials not valid, user could be on a temporary session
_logger.info('OAuth2: access denied, redirect to main page in case a valid session exists, without setting cookies')
url = "/web/login?oauth_error=3"
redirect = werkzeug.utils.redirect(url, 303)
redirect.autocorrect_location_header = False
return redirect
except Exception, e:
# signup error
_logger.exception("OAuth2: %s" % str(e))
url = "/web/login?oauth_error=2"
return set_cookie_and_redirect(url)
@http.route('/auth_oauth/oea', type='http', auth='none')
def oea(self, **kw):
"""login user via Odoo Account provider"""
dbname = kw.pop('db', None)
if not dbname:
dbname = db_monodb()
if not dbname:
return BadRequest()
registry = RegistryManager.get(dbname)
with registry.cursor() as cr:
IMD = registry['ir.model.data']
try:
model, provider_id = IMD.get_object_reference(cr, SUPERUSER_ID, 'auth_oauth', 'provider_openerp')
except ValueError:
return set_cookie_and_redirect('/web?db=%s' % dbname)
assert model == 'auth.oauth.provider'
state = {
'd': dbname,
'p': provider_id,
'c': {'no_user_creation': True},
}
kw['state'] = simplejson.dumps(state)
return self.signin(**kw)
# vim:expandtab:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
vongochung/buiquocviet | django/template/loaders/app_directories.py | 88 | 2337 | """
Wrapper for loading templates from "templates" directories in INSTALLED_APPS
packages.
"""
import os
import sys
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
from django.template.base import TemplateDoesNotExist
from django.template.loader import BaseLoader
from django.utils._os import safe_join
from django.utils.importlib import import_module
# At compile time, cache the directories to search.
fs_encoding = sys.getfilesystemencoding() or sys.getdefaultencoding()
app_template_dirs = []
for app in settings.INSTALLED_APPS:
try:
mod = import_module(app)
except ImportError, e:
raise ImproperlyConfigured('ImportError %s: %s' % (app, e.args[0]))
template_dir = os.path.join(os.path.dirname(mod.__file__), 'templates')
if os.path.isdir(template_dir):
app_template_dirs.append(template_dir.decode(fs_encoding))
# It won't change, so convert it to a tuple to save memory.
app_template_dirs = tuple(app_template_dirs)
class Loader(BaseLoader):
is_usable = True
def get_template_sources(self, template_name, template_dirs=None):
"""
Returns the absolute paths to "template_name", when appended to each
directory in "template_dirs". Any paths that don't lie inside one of the
template dirs are excluded from the result set, for security reasons.
"""
if not template_dirs:
template_dirs = app_template_dirs
for template_dir in template_dirs:
try:
yield safe_join(template_dir, template_name)
except UnicodeDecodeError:
# The template dir name was a bytestring that wasn't valid UTF-8.
raise
except ValueError:
# The joined path was located outside of template_dir.
pass
def load_template_source(self, template_name, template_dirs=None):
for filepath in self.get_template_sources(template_name, template_dirs):
try:
file = open(filepath)
try:
return (file.read().decode(settings.FILE_CHARSET), filepath)
finally:
file.close()
except IOError:
pass
raise TemplateDoesNotExist(template_name)
_loader = Loader()
| bsd-3-clause |
ingokegel/intellij-community | python/helpers/py2only/docutils/utils/error_reporting.py | 104 | 7765 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# :Id: $Id: error_reporting.py 7668 2013-06-04 12:46:30Z milde $
# :Copyright: © 2011 Günter Milde.
# :License: Released under the terms of the `2-Clause BSD license`_, in short:
#
# Copying and distribution of this file, with or without modification,
# are permitted in any medium without royalty provided the copyright
# notice and this notice are preserved.
# This file is offered as-is, without any warranty.
#
# .. _2-Clause BSD license: http://www.spdx.org/licenses/BSD-2-Clause
"""
Error reporting should be safe from encoding/decoding errors.
However, implicit conversions of strings and exceptions like
>>> u'%s world: %s' % ('H\xe4llo', Exception(u'H\xe4llo')
fail in some Python versions:
* In Python <= 2.6, ``unicode(<exception instance>)`` uses
`__str__` and fails with non-ASCII chars in`unicode` arguments.
(work around http://bugs.python.org/issue2517):
* In Python 2, unicode(<exception instance>) fails, with non-ASCII
chars in arguments. (Use case: in some locales, the errstr
argument of IOError contains non-ASCII chars.)
* In Python 2, str(<exception instance>) fails, with non-ASCII chars
in `unicode` arguments.
The `SafeString`, `ErrorString` and `ErrorOutput` classes handle
common exceptions.
"""
import sys, codecs
# Guess the locale's encoding.
# If no valid guess can be made, locale_encoding is set to `None`:
try:
import locale # module missing in Jython
except ImportError:
locale_encoding = None
else:
locale_encoding = locale.getlocale()[1] or locale.getdefaultlocale()[1]
# locale.getpreferredencoding([do_setlocale=True|False])
# has side-effects | might return a wrong guess.
# (cf. Update 1 in http://stackoverflow.com/questions/4082645/using-python-2-xs-locale-module-to-format-numbers-and-currency)
try:
codecs.lookup(locale_encoding or '') # None -> ''
except LookupError:
locale_encoding = None
class SafeString(object):
"""
A wrapper providing robust conversion to `str` and `unicode`.
"""
def __init__(self, data, encoding=None, encoding_errors='backslashreplace',
decoding_errors='replace'):
self.data = data
self.encoding = (encoding or getattr(data, 'encoding', None) or
locale_encoding or 'ascii')
self.encoding_errors = encoding_errors
self.decoding_errors = decoding_errors
def __str__(self):
try:
return str(self.data)
except UnicodeEncodeError, err:
if isinstance(self.data, Exception):
args = [str(SafeString(arg, self.encoding,
self.encoding_errors))
for arg in self.data.args]
return ', '.join(args)
if isinstance(self.data, unicode):
if sys.version_info > (3,0):
return self.data
else:
return self.data.encode(self.encoding,
self.encoding_errors)
raise
def __unicode__(self):
"""
Return unicode representation of `self.data`.
Try ``unicode(self.data)``, catch `UnicodeError` and
* if `self.data` is an Exception instance, work around
http://bugs.python.org/issue2517 with an emulation of
Exception.__unicode__,
* else decode with `self.encoding` and `self.decoding_errors`.
"""
try:
u = unicode(self.data)
if isinstance(self.data, EnvironmentError):
u = u.replace(": u'", ": '") # normalize filename quoting
return u
except UnicodeError, error: # catch ..Encode.. and ..Decode.. errors
if isinstance(self.data, EnvironmentError):
return u"[Errno %s] %s: '%s'" % (self.data.errno,
SafeString(self.data.strerror, self.encoding,
self.decoding_errors),
SafeString(self.data.filename, self.encoding,
self.decoding_errors))
if isinstance(self.data, Exception):
args = [unicode(SafeString(arg, self.encoding,
decoding_errors=self.decoding_errors))
for arg in self.data.args]
return u', '.join(args)
if isinstance(error, UnicodeDecodeError):
return unicode(self.data, self.encoding, self.decoding_errors)
raise
class ErrorString(SafeString):
"""
Safely report exception type and message.
"""
def __str__(self):
return '%s: %s' % (self.data.__class__.__name__,
super(ErrorString, self).__str__())
def __unicode__(self):
return u'%s: %s' % (self.data.__class__.__name__,
super(ErrorString, self).__unicode__())
class ErrorOutput(object):
"""
Wrapper class for file-like error streams with
failsave de- and encoding of `str`, `bytes`, `unicode` and
`Exception` instances.
"""
def __init__(self, stream=None, encoding=None,
encoding_errors='backslashreplace',
decoding_errors='replace'):
"""
:Parameters:
- `stream`: a file-like object,
a string (path to a file),
`None` (write to `sys.stderr`, default), or
evaluating to `False` (write() requests are ignored).
- `encoding`: `stream` text encoding. Guessed if None.
- `encoding_errors`: how to treat encoding errors.
"""
if stream is None:
stream = sys.stderr
elif not(stream):
stream = False
# if `stream` is a file name, open it
elif isinstance(stream, str):
stream = open(stream, 'w')
elif isinstance(stream, unicode):
stream = open(stream.encode(sys.getfilesystemencoding()), 'w')
self.stream = stream
"""Where warning output is sent."""
self.encoding = (encoding or getattr(stream, 'encoding', None) or
locale_encoding or 'ascii')
"""The output character encoding."""
self.encoding_errors = encoding_errors
"""Encoding error handler."""
self.decoding_errors = decoding_errors
"""Decoding error handler."""
def write(self, data):
"""
Write `data` to self.stream. Ignore, if self.stream is False.
`data` can be a `string`, `unicode`, or `Exception` instance.
"""
if self.stream is False:
return
if isinstance(data, Exception):
data = unicode(SafeString(data, self.encoding,
self.encoding_errors, self.decoding_errors))
try:
self.stream.write(data)
except UnicodeEncodeError:
self.stream.write(data.encode(self.encoding, self.encoding_errors))
except TypeError: # in Python 3, stderr expects unicode
if self.stream in (sys.stderr, sys.stdout):
self.stream.buffer.write(data) # write bytes to raw stream
else:
self.stream.write(unicode(data, self.encoding,
self.decoding_errors))
def close(self):
"""
Close the error-output stream.
Ignored if the stream is` sys.stderr` or `sys.stdout` or has no
close() method.
"""
if self.stream in (sys.stdout, sys.stderr):
return
try:
self.stream.close()
except AttributeError:
pass
| apache-2.0 |
ingokegel/intellij-community | plugins/hg4idea/testData/bin/hgext/largefiles/overrides.py | 90 | 46667 | # Copyright 2009-2010 Gregory P. Ward
# Copyright 2009-2010 Intelerad Medical Systems Incorporated
# Copyright 2010-2011 Fog Creek Software
# Copyright 2010-2011 Unity Technologies
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
'''Overridden Mercurial commands and functions for the largefiles extension'''
import os
import copy
from mercurial import hg, commands, util, cmdutil, scmutil, match as match_, \
node, archival, error, merge, discovery
from mercurial.i18n import _
from mercurial.node import hex
from hgext import rebase
import lfutil
import lfcommands
import basestore
# -- Utility functions: commonly/repeatedly needed functionality ---------------
def installnormalfilesmatchfn(manifest):
'''overrides scmutil.match so that the matcher it returns will ignore all
largefiles'''
oldmatch = None # for the closure
def overridematch(ctx, pats=[], opts={}, globbed=False,
default='relpath'):
match = oldmatch(ctx, pats, opts, globbed, default)
m = copy.copy(match)
notlfile = lambda f: not (lfutil.isstandin(f) or lfutil.standin(f) in
manifest)
m._files = filter(notlfile, m._files)
m._fmap = set(m._files)
m._always = False
origmatchfn = m.matchfn
m.matchfn = lambda f: notlfile(f) and origmatchfn(f) or None
return m
oldmatch = installmatchfn(overridematch)
def installmatchfn(f):
oldmatch = scmutil.match
setattr(f, 'oldmatch', oldmatch)
scmutil.match = f
return oldmatch
def restorematchfn():
'''restores scmutil.match to what it was before installnormalfilesmatchfn
was called. no-op if scmutil.match is its original function.
Note that n calls to installnormalfilesmatchfn will require n calls to
restore matchfn to reverse'''
scmutil.match = getattr(scmutil.match, 'oldmatch', scmutil.match)
def addlargefiles(ui, repo, *pats, **opts):
large = opts.pop('large', None)
lfsize = lfutil.getminsize(
ui, lfutil.islfilesrepo(repo), opts.pop('lfsize', None))
lfmatcher = None
if lfutil.islfilesrepo(repo):
lfpats = ui.configlist(lfutil.longname, 'patterns', default=[])
if lfpats:
lfmatcher = match_.match(repo.root, '', list(lfpats))
lfnames = []
m = scmutil.match(repo[None], pats, opts)
m.bad = lambda x, y: None
wctx = repo[None]
for f in repo.walk(m):
exact = m.exact(f)
lfile = lfutil.standin(f) in wctx
nfile = f in wctx
exists = lfile or nfile
# Don't warn the user when they attempt to add a normal tracked file.
# The normal add code will do that for us.
if exact and exists:
if lfile:
ui.warn(_('%s already a largefile\n') % f)
continue
if (exact or not exists) and not lfutil.isstandin(f):
wfile = repo.wjoin(f)
# In case the file was removed previously, but not committed
# (issue3507)
if not os.path.exists(wfile):
continue
abovemin = (lfsize and
os.lstat(wfile).st_size >= lfsize * 1024 * 1024)
if large or abovemin or (lfmatcher and lfmatcher(f)):
lfnames.append(f)
if ui.verbose or not exact:
ui.status(_('adding %s as a largefile\n') % m.rel(f))
bad = []
standins = []
# Need to lock, otherwise there could be a race condition between
# when standins are created and added to the repo.
wlock = repo.wlock()
try:
if not opts.get('dry_run'):
lfdirstate = lfutil.openlfdirstate(ui, repo)
for f in lfnames:
standinname = lfutil.standin(f)
lfutil.writestandin(repo, standinname, hash='',
executable=lfutil.getexecutable(repo.wjoin(f)))
standins.append(standinname)
if lfdirstate[f] == 'r':
lfdirstate.normallookup(f)
else:
lfdirstate.add(f)
lfdirstate.write()
bad += [lfutil.splitstandin(f)
for f in repo[None].add(standins)
if f in m.files()]
finally:
wlock.release()
return bad
def removelargefiles(ui, repo, *pats, **opts):
after = opts.get('after')
if not pats and not after:
raise util.Abort(_('no files specified'))
m = scmutil.match(repo[None], pats, opts)
try:
repo.lfstatus = True
s = repo.status(match=m, clean=True)
finally:
repo.lfstatus = False
manifest = repo[None].manifest()
modified, added, deleted, clean = [[f for f in list
if lfutil.standin(f) in manifest]
for list in [s[0], s[1], s[3], s[6]]]
def warn(files, msg):
for f in files:
ui.warn(msg % m.rel(f))
return int(len(files) > 0)
result = 0
if after:
remove, forget = deleted, []
result = warn(modified + added + clean,
_('not removing %s: file still exists\n'))
else:
remove, forget = deleted + clean, []
result = warn(modified, _('not removing %s: file is modified (use -f'
' to force removal)\n'))
result = warn(added, _('not removing %s: file has been marked for add'
' (use forget to undo)\n')) or result
for f in sorted(remove + forget):
if ui.verbose or not m.exact(f):
ui.status(_('removing %s\n') % m.rel(f))
# Need to lock because standin files are deleted then removed from the
# repository and we could race in-between.
wlock = repo.wlock()
try:
lfdirstate = lfutil.openlfdirstate(ui, repo)
for f in remove:
if not after:
# If this is being called by addremove, notify the user that we
# are removing the file.
if getattr(repo, "_isaddremove", False):
ui.status(_('removing %s\n') % f)
util.unlinkpath(repo.wjoin(f), ignoremissing=True)
lfdirstate.remove(f)
lfdirstate.write()
forget = [lfutil.standin(f) for f in forget]
remove = [lfutil.standin(f) for f in remove]
repo[None].forget(forget)
# If this is being called by addremove, let the original addremove
# function handle this.
if not getattr(repo, "_isaddremove", False):
for f in remove:
util.unlinkpath(repo.wjoin(f), ignoremissing=True)
repo[None].forget(remove)
finally:
wlock.release()
return result
# For overriding mercurial.hgweb.webcommands so that largefiles will
# appear at their right place in the manifests.
def decodepath(orig, path):
return lfutil.splitstandin(path) or path
# -- Wrappers: modify existing commands --------------------------------
# Add works by going through the files that the user wanted to add and
# checking if they should be added as largefiles. Then it makes a new
# matcher which matches only the normal files and runs the original
# version of add.
def overrideadd(orig, ui, repo, *pats, **opts):
normal = opts.pop('normal')
if normal:
if opts.get('large'):
raise util.Abort(_('--normal cannot be used with --large'))
return orig(ui, repo, *pats, **opts)
bad = addlargefiles(ui, repo, *pats, **opts)
installnormalfilesmatchfn(repo[None].manifest())
result = orig(ui, repo, *pats, **opts)
restorematchfn()
return (result == 1 or bad) and 1 or 0
def overrideremove(orig, ui, repo, *pats, **opts):
installnormalfilesmatchfn(repo[None].manifest())
result = orig(ui, repo, *pats, **opts)
restorematchfn()
return removelargefiles(ui, repo, *pats, **opts) or result
def overridestatusfn(orig, repo, rev2, **opts):
try:
repo._repo.lfstatus = True
return orig(repo, rev2, **opts)
finally:
repo._repo.lfstatus = False
def overridestatus(orig, ui, repo, *pats, **opts):
try:
repo.lfstatus = True
return orig(ui, repo, *pats, **opts)
finally:
repo.lfstatus = False
def overridedirty(orig, repo, ignoreupdate=False):
try:
repo._repo.lfstatus = True
return orig(repo, ignoreupdate)
finally:
repo._repo.lfstatus = False
def overridelog(orig, ui, repo, *pats, **opts):
def overridematch(ctx, pats=[], opts={}, globbed=False,
default='relpath'):
"""Matcher that merges root directory with .hglf, suitable for log.
It is still possible to match .hglf directly.
For any listed files run log on the standin too.
matchfn tries both the given filename and with .hglf stripped.
"""
match = oldmatch(ctx, pats, opts, globbed, default)
m = copy.copy(match)
standins = [lfutil.standin(f) for f in m._files]
m._files.extend(standins)
m._fmap = set(m._files)
m._always = False
origmatchfn = m.matchfn
def lfmatchfn(f):
lf = lfutil.splitstandin(f)
if lf is not None and origmatchfn(lf):
return True
r = origmatchfn(f)
return r
m.matchfn = lfmatchfn
return m
oldmatch = installmatchfn(overridematch)
try:
repo.lfstatus = True
return orig(ui, repo, *pats, **opts)
finally:
repo.lfstatus = False
restorematchfn()
def overrideverify(orig, ui, repo, *pats, **opts):
large = opts.pop('large', False)
all = opts.pop('lfa', False)
contents = opts.pop('lfc', False)
result = orig(ui, repo, *pats, **opts)
if large or all or contents:
result = result or lfcommands.verifylfiles(ui, repo, all, contents)
return result
def overridedebugstate(orig, ui, repo, *pats, **opts):
large = opts.pop('large', False)
if large:
lfcommands.debugdirstate(ui, repo)
else:
orig(ui, repo, *pats, **opts)
# Override needs to refresh standins so that update's normal merge
# will go through properly. Then the other update hook (overriding repo.update)
# will get the new files. Filemerge is also overridden so that the merge
# will merge standins correctly.
def overrideupdate(orig, ui, repo, *pats, **opts):
lfdirstate = lfutil.openlfdirstate(ui, repo)
s = lfdirstate.status(match_.always(repo.root, repo.getcwd()), [], False,
False, False)
(unsure, modified, added, removed, missing, unknown, ignored, clean) = s
# Need to lock between the standins getting updated and their
# largefiles getting updated
wlock = repo.wlock()
try:
if opts['check']:
mod = len(modified) > 0
for lfile in unsure:
standin = lfutil.standin(lfile)
if repo['.'][standin].data().strip() != \
lfutil.hashfile(repo.wjoin(lfile)):
mod = True
else:
lfdirstate.normal(lfile)
lfdirstate.write()
if mod:
raise util.Abort(_('uncommitted local changes'))
# XXX handle removed differently
if not opts['clean']:
for lfile in unsure + modified + added:
lfutil.updatestandin(repo, lfutil.standin(lfile))
finally:
wlock.release()
return orig(ui, repo, *pats, **opts)
# Before starting the manifest merge, merge.updates will call
# _checkunknown to check if there are any files in the merged-in
# changeset that collide with unknown files in the working copy.
#
# The largefiles are seen as unknown, so this prevents us from merging
# in a file 'foo' if we already have a largefile with the same name.
#
# The overridden function filters the unknown files by removing any
# largefiles. This makes the merge proceed and we can then handle this
# case further in the overridden manifestmerge function below.
def overridecheckunknownfile(origfn, repo, wctx, mctx, f):
if lfutil.standin(repo.dirstate.normalize(f)) in wctx:
return False
return origfn(repo, wctx, mctx, f)
# The manifest merge handles conflicts on the manifest level. We want
# to handle changes in largefile-ness of files at this level too.
#
# The strategy is to run the original manifestmerge and then process
# the action list it outputs. There are two cases we need to deal with:
#
# 1. Normal file in p1, largefile in p2. Here the largefile is
# detected via its standin file, which will enter the working copy
# with a "get" action. It is not "merge" since the standin is all
# Mercurial is concerned with at this level -- the link to the
# existing normal file is not relevant here.
#
# 2. Largefile in p1, normal file in p2. Here we get a "merge" action
# since the largefile will be present in the working copy and
# different from the normal file in p2. Mercurial therefore
# triggers a merge action.
#
# In both cases, we prompt the user and emit new actions to either
# remove the standin (if the normal file was kept) or to remove the
# normal file and get the standin (if the largefile was kept). The
# default prompt answer is to use the largefile version since it was
# presumably changed on purpose.
#
# Finally, the merge.applyupdates function will then take care of
# writing the files into the working copy and lfcommands.updatelfiles
# will update the largefiles.
def overridemanifestmerge(origfn, repo, p1, p2, pa, branchmerge, force,
partial, acceptremote=False):
overwrite = force and not branchmerge
actions = origfn(repo, p1, p2, pa, branchmerge, force, partial,
acceptremote)
processed = []
for action in actions:
if overwrite:
processed.append(action)
continue
f, m, args, msg = action
choices = (_('&Largefile'), _('&Normal file'))
splitstandin = lfutil.splitstandin(f)
if (m == "g" and splitstandin is not None and
splitstandin in p1 and f in p2):
# Case 1: normal file in the working copy, largefile in
# the second parent
lfile = splitstandin
standin = f
msg = _('%s has been turned into a largefile\n'
'use (l)argefile or keep as (n)ormal file?') % lfile
if repo.ui.promptchoice(msg, choices, 0) == 0:
processed.append((lfile, "r", None, msg))
processed.append((standin, "g", (p2.flags(standin),), msg))
else:
processed.append((standin, "r", None, msg))
elif m == "g" and lfutil.standin(f) in p1 and f in p2:
# Case 2: largefile in the working copy, normal file in
# the second parent
standin = lfutil.standin(f)
lfile = f
msg = _('%s has been turned into a normal file\n'
'keep as (l)argefile or use (n)ormal file?') % lfile
if repo.ui.promptchoice(msg, choices, 0) == 0:
processed.append((lfile, "r", None, msg))
else:
processed.append((standin, "r", None, msg))
processed.append((lfile, "g", (p2.flags(lfile),), msg))
else:
processed.append(action)
return processed
# Override filemerge to prompt the user about how they wish to merge
# largefiles. This will handle identical edits, and copy/rename +
# edit without prompting the user.
def overridefilemerge(origfn, repo, mynode, orig, fcd, fco, fca):
# Use better variable names here. Because this is a wrapper we cannot
# change the variable names in the function declaration.
fcdest, fcother, fcancestor = fcd, fco, fca
if not lfutil.isstandin(orig):
return origfn(repo, mynode, orig, fcdest, fcother, fcancestor)
else:
if not fcother.cmp(fcdest): # files identical?
return None
# backwards, use working dir parent as ancestor
if fcancestor == fcother:
fcancestor = fcdest.parents()[0]
if orig != fcother.path():
repo.ui.status(_('merging %s and %s to %s\n')
% (lfutil.splitstandin(orig),
lfutil.splitstandin(fcother.path()),
lfutil.splitstandin(fcdest.path())))
else:
repo.ui.status(_('merging %s\n')
% lfutil.splitstandin(fcdest.path()))
if fcancestor.path() != fcother.path() and fcother.data() == \
fcancestor.data():
return 0
if fcancestor.path() != fcdest.path() and fcdest.data() == \
fcancestor.data():
repo.wwrite(fcdest.path(), fcother.data(), fcother.flags())
return 0
if repo.ui.promptchoice(_('largefile %s has a merge conflict\n'
'keep (l)ocal or take (o)ther?') %
lfutil.splitstandin(orig),
(_('&Local'), _('&Other')), 0) == 0:
return 0
else:
repo.wwrite(fcdest.path(), fcother.data(), fcother.flags())
return 0
# Copy first changes the matchers to match standins instead of
# largefiles. Then it overrides util.copyfile in that function it
# checks if the destination largefile already exists. It also keeps a
# list of copied files so that the largefiles can be copied and the
# dirstate updated.
def overridecopy(orig, ui, repo, pats, opts, rename=False):
# doesn't remove largefile on rename
if len(pats) < 2:
# this isn't legal, let the original function deal with it
return orig(ui, repo, pats, opts, rename)
def makestandin(relpath):
path = scmutil.canonpath(repo.root, repo.getcwd(), relpath)
return os.path.join(repo.wjoin(lfutil.standin(path)))
fullpats = scmutil.expandpats(pats)
dest = fullpats[-1]
if os.path.isdir(dest):
if not os.path.isdir(makestandin(dest)):
os.makedirs(makestandin(dest))
# This could copy both lfiles and normal files in one command,
# but we don't want to do that. First replace their matcher to
# only match normal files and run it, then replace it to just
# match largefiles and run it again.
nonormalfiles = False
nolfiles = False
try:
try:
installnormalfilesmatchfn(repo[None].manifest())
result = orig(ui, repo, pats, opts, rename)
except util.Abort, e:
if str(e) != _('no files to copy'):
raise e
else:
nonormalfiles = True
result = 0
finally:
restorematchfn()
# The first rename can cause our current working directory to be removed.
# In that case there is nothing left to copy/rename so just quit.
try:
repo.getcwd()
except OSError:
return result
try:
try:
# When we call orig below it creates the standins but we don't add
# them to the dir state until later so lock during that time.
wlock = repo.wlock()
manifest = repo[None].manifest()
oldmatch = None # for the closure
def overridematch(ctx, pats=[], opts={}, globbed=False,
default='relpath'):
newpats = []
# The patterns were previously mangled to add the standin
# directory; we need to remove that now
for pat in pats:
if match_.patkind(pat) is None and lfutil.shortname in pat:
newpats.append(pat.replace(lfutil.shortname, ''))
else:
newpats.append(pat)
match = oldmatch(ctx, newpats, opts, globbed, default)
m = copy.copy(match)
lfile = lambda f: lfutil.standin(f) in manifest
m._files = [lfutil.standin(f) for f in m._files if lfile(f)]
m._fmap = set(m._files)
m._always = False
origmatchfn = m.matchfn
m.matchfn = lambda f: (lfutil.isstandin(f) and
(f in manifest) and
origmatchfn(lfutil.splitstandin(f)) or
None)
return m
oldmatch = installmatchfn(overridematch)
listpats = []
for pat in pats:
if match_.patkind(pat) is not None:
listpats.append(pat)
else:
listpats.append(makestandin(pat))
try:
origcopyfile = util.copyfile
copiedfiles = []
def overridecopyfile(src, dest):
if (lfutil.shortname in src and
dest.startswith(repo.wjoin(lfutil.shortname))):
destlfile = dest.replace(lfutil.shortname, '')
if not opts['force'] and os.path.exists(destlfile):
raise IOError('',
_('destination largefile already exists'))
copiedfiles.append((src, dest))
origcopyfile(src, dest)
util.copyfile = overridecopyfile
result += orig(ui, repo, listpats, opts, rename)
finally:
util.copyfile = origcopyfile
lfdirstate = lfutil.openlfdirstate(ui, repo)
for (src, dest) in copiedfiles:
if (lfutil.shortname in src and
dest.startswith(repo.wjoin(lfutil.shortname))):
srclfile = src.replace(repo.wjoin(lfutil.standin('')), '')
destlfile = dest.replace(repo.wjoin(lfutil.standin('')), '')
destlfiledir = os.path.dirname(repo.wjoin(destlfile)) or '.'
if not os.path.isdir(destlfiledir):
os.makedirs(destlfiledir)
if rename:
os.rename(repo.wjoin(srclfile), repo.wjoin(destlfile))
lfdirstate.remove(srclfile)
else:
util.copyfile(repo.wjoin(srclfile),
repo.wjoin(destlfile))
lfdirstate.add(destlfile)
lfdirstate.write()
except util.Abort, e:
if str(e) != _('no files to copy'):
raise e
else:
nolfiles = True
finally:
restorematchfn()
wlock.release()
if nolfiles and nonormalfiles:
raise util.Abort(_('no files to copy'))
return result
# When the user calls revert, we have to be careful to not revert any
# changes to other largefiles accidentally. This means we have to keep
# track of the largefiles that are being reverted so we only pull down
# the necessary largefiles.
#
# Standins are only updated (to match the hash of largefiles) before
# commits. Update the standins then run the original revert, changing
# the matcher to hit standins instead of largefiles. Based on the
# resulting standins update the largefiles. Then return the standins
# to their proper state
def overriderevert(orig, ui, repo, *pats, **opts):
# Because we put the standins in a bad state (by updating them)
# and then return them to a correct state we need to lock to
# prevent others from changing them in their incorrect state.
wlock = repo.wlock()
try:
lfdirstate = lfutil.openlfdirstate(ui, repo)
(modified, added, removed, missing, unknown, ignored, clean) = \
lfutil.lfdirstatestatus(lfdirstate, repo, repo['.'].rev())
lfdirstate.write()
for lfile in modified:
lfutil.updatestandin(repo, lfutil.standin(lfile))
for lfile in missing:
if (os.path.exists(repo.wjoin(lfutil.standin(lfile)))):
os.unlink(repo.wjoin(lfutil.standin(lfile)))
try:
ctx = scmutil.revsingle(repo, opts.get('rev'))
oldmatch = None # for the closure
def overridematch(ctx, pats=[], opts={}, globbed=False,
default='relpath'):
match = oldmatch(ctx, pats, opts, globbed, default)
m = copy.copy(match)
def tostandin(f):
if lfutil.standin(f) in ctx:
return lfutil.standin(f)
elif lfutil.standin(f) in repo[None]:
return None
return f
m._files = [tostandin(f) for f in m._files]
m._files = [f for f in m._files if f is not None]
m._fmap = set(m._files)
m._always = False
origmatchfn = m.matchfn
def matchfn(f):
if lfutil.isstandin(f):
# We need to keep track of what largefiles are being
# matched so we know which ones to update later --
# otherwise we accidentally revert changes to other
# largefiles. This is repo-specific, so duckpunch the
# repo object to keep the list of largefiles for us
# later.
if origmatchfn(lfutil.splitstandin(f)) and \
(f in repo[None] or f in ctx):
lfileslist = getattr(repo, '_lfilestoupdate', [])
lfileslist.append(lfutil.splitstandin(f))
repo._lfilestoupdate = lfileslist
return True
else:
return False
return origmatchfn(f)
m.matchfn = matchfn
return m
oldmatch = installmatchfn(overridematch)
scmutil.match
matches = overridematch(repo[None], pats, opts)
orig(ui, repo, *pats, **opts)
finally:
restorematchfn()
lfileslist = getattr(repo, '_lfilestoupdate', [])
lfcommands.updatelfiles(ui, repo, filelist=lfileslist,
printmessage=False)
# empty out the largefiles list so we start fresh next time
repo._lfilestoupdate = []
for lfile in modified:
if lfile in lfileslist:
if os.path.exists(repo.wjoin(lfutil.standin(lfile))) and lfile\
in repo['.']:
lfutil.writestandin(repo, lfutil.standin(lfile),
repo['.'][lfile].data().strip(),
'x' in repo['.'][lfile].flags())
lfdirstate = lfutil.openlfdirstate(ui, repo)
for lfile in added:
standin = lfutil.standin(lfile)
if standin not in ctx and (standin in matches or opts.get('all')):
if lfile in lfdirstate:
lfdirstate.drop(lfile)
util.unlinkpath(repo.wjoin(standin))
lfdirstate.write()
finally:
wlock.release()
def hgupdaterepo(orig, repo, node, overwrite):
if not overwrite:
# Only call updatelfiles on the standins that have changed to save time
oldstandins = lfutil.getstandinsstate(repo)
result = orig(repo, node, overwrite)
filelist = None
if not overwrite:
newstandins = lfutil.getstandinsstate(repo)
filelist = lfutil.getlfilestoupdate(oldstandins, newstandins)
lfcommands.updatelfiles(repo.ui, repo, filelist=filelist)
return result
def hgmerge(orig, repo, node, force=None, remind=True):
result = orig(repo, node, force, remind)
lfcommands.updatelfiles(repo.ui, repo)
return result
# When we rebase a repository with remotely changed largefiles, we need to
# take some extra care so that the largefiles are correctly updated in the
# working copy
def overridepull(orig, ui, repo, source=None, **opts):
revsprepull = len(repo)
if not source:
source = 'default'
repo.lfpullsource = source
if opts.get('rebase', False):
repo._isrebasing = True
try:
if opts.get('update'):
del opts['update']
ui.debug('--update and --rebase are not compatible, ignoring '
'the update flag\n')
del opts['rebase']
cmdutil.bailifchanged(repo)
origpostincoming = commands.postincoming
def _dummy(*args, **kwargs):
pass
commands.postincoming = _dummy
try:
result = commands.pull(ui, repo, source, **opts)
finally:
commands.postincoming = origpostincoming
revspostpull = len(repo)
if revspostpull > revsprepull:
result = result or rebase.rebase(ui, repo)
finally:
repo._isrebasing = False
else:
result = orig(ui, repo, source, **opts)
revspostpull = len(repo)
lfrevs = opts.get('lfrev', [])
if opts.get('all_largefiles'):
lfrevs.append('pulled()')
if lfrevs and revspostpull > revsprepull:
numcached = 0
repo.firstpulled = revsprepull # for pulled() revset expression
try:
for rev in scmutil.revrange(repo, lfrevs):
ui.note(_('pulling largefiles for revision %s\n') % rev)
(cached, missing) = lfcommands.cachelfiles(ui, repo, rev)
numcached += len(cached)
finally:
del repo.firstpulled
ui.status(_("%d largefiles cached\n") % numcached)
return result
def pulledrevsetsymbol(repo, subset, x):
"""``pulled()``
Changesets that just has been pulled.
Only available with largefiles from pull --lfrev expressions.
.. container:: verbose
Some examples:
- pull largefiles for all new changesets::
hg pull -lfrev "pulled()"
- pull largefiles for all new branch heads::
hg pull -lfrev "head(pulled()) and not closed()"
"""
try:
firstpulled = repo.firstpulled
except AttributeError:
raise util.Abort(_("pulled() only available in --lfrev"))
return [r for r in subset if r >= firstpulled]
def overrideclone(orig, ui, source, dest=None, **opts):
d = dest
if d is None:
d = hg.defaultdest(source)
if opts.get('all_largefiles') and not hg.islocal(d):
raise util.Abort(_(
'--all-largefiles is incompatible with non-local destination %s' %
d))
return orig(ui, source, dest, **opts)
def hgclone(orig, ui, opts, *args, **kwargs):
result = orig(ui, opts, *args, **kwargs)
if result is not None:
sourcerepo, destrepo = result
repo = destrepo.local()
# Caching is implicitly limited to 'rev' option, since the dest repo was
# truncated at that point. The user may expect a download count with
# this option, so attempt whether or not this is a largefile repo.
if opts.get('all_largefiles'):
success, missing = lfcommands.downloadlfiles(ui, repo, None)
if missing != 0:
return None
return result
def overriderebase(orig, ui, repo, **opts):
repo._isrebasing = True
try:
return orig(ui, repo, **opts)
finally:
repo._isrebasing = False
def overridearchive(orig, repo, dest, node, kind, decode=True, matchfn=None,
prefix=None, mtime=None, subrepos=None):
# No need to lock because we are only reading history and
# largefile caches, neither of which are modified.
lfcommands.cachelfiles(repo.ui, repo, node)
if kind not in archival.archivers:
raise util.Abort(_("unknown archive type '%s'") % kind)
ctx = repo[node]
if kind == 'files':
if prefix:
raise util.Abort(
_('cannot give prefix when archiving to files'))
else:
prefix = archival.tidyprefix(dest, kind, prefix)
def write(name, mode, islink, getdata):
if matchfn and not matchfn(name):
return
data = getdata()
if decode:
data = repo.wwritedata(name, data)
archiver.addfile(prefix + name, mode, islink, data)
archiver = archival.archivers[kind](dest, mtime or ctx.date()[0])
if repo.ui.configbool("ui", "archivemeta", True):
def metadata():
base = 'repo: %s\nnode: %s\nbranch: %s\n' % (
hex(repo.changelog.node(0)), hex(node), ctx.branch())
tags = ''.join('tag: %s\n' % t for t in ctx.tags()
if repo.tagtype(t) == 'global')
if not tags:
repo.ui.pushbuffer()
opts = {'template': '{latesttag}\n{latesttagdistance}',
'style': '', 'patch': None, 'git': None}
cmdutil.show_changeset(repo.ui, repo, opts).show(ctx)
ltags, dist = repo.ui.popbuffer().split('\n')
tags = ''.join('latesttag: %s\n' % t for t in ltags.split(':'))
tags += 'latesttagdistance: %s\n' % dist
return base + tags
write('.hg_archival.txt', 0644, False, metadata)
for f in ctx:
ff = ctx.flags(f)
getdata = ctx[f].data
if lfutil.isstandin(f):
path = lfutil.findfile(repo, getdata().strip())
if path is None:
raise util.Abort(
_('largefile %s not found in repo store or system cache')
% lfutil.splitstandin(f))
f = lfutil.splitstandin(f)
def getdatafn():
fd = None
try:
fd = open(path, 'rb')
return fd.read()
finally:
if fd:
fd.close()
getdata = getdatafn
write(f, 'x' in ff and 0755 or 0644, 'l' in ff, getdata)
if subrepos:
for subpath in sorted(ctx.substate):
sub = ctx.sub(subpath)
submatch = match_.narrowmatcher(subpath, matchfn)
sub.archive(repo.ui, archiver, prefix, submatch)
archiver.done()
def hgsubrepoarchive(orig, repo, ui, archiver, prefix, match=None):
repo._get(repo._state + ('hg',))
rev = repo._state[1]
ctx = repo._repo[rev]
lfcommands.cachelfiles(ui, repo._repo, ctx.node())
def write(name, mode, islink, getdata):
# At this point, the standin has been replaced with the largefile name,
# so the normal matcher works here without the lfutil variants.
if match and not match(f):
return
data = getdata()
archiver.addfile(prefix + repo._path + '/' + name, mode, islink, data)
for f in ctx:
ff = ctx.flags(f)
getdata = ctx[f].data
if lfutil.isstandin(f):
path = lfutil.findfile(repo._repo, getdata().strip())
if path is None:
raise util.Abort(
_('largefile %s not found in repo store or system cache')
% lfutil.splitstandin(f))
f = lfutil.splitstandin(f)
def getdatafn():
fd = None
try:
fd = open(os.path.join(prefix, path), 'rb')
return fd.read()
finally:
if fd:
fd.close()
getdata = getdatafn
write(f, 'x' in ff and 0755 or 0644, 'l' in ff, getdata)
for subpath in sorted(ctx.substate):
sub = ctx.sub(subpath)
submatch = match_.narrowmatcher(subpath, match)
sub.archive(ui, archiver, os.path.join(prefix, repo._path) + '/',
submatch)
# If a largefile is modified, the change is not reflected in its
# standin until a commit. cmdutil.bailifchanged() raises an exception
# if the repo has uncommitted changes. Wrap it to also check if
# largefiles were changed. This is used by bisect and backout.
def overridebailifchanged(orig, repo):
orig(repo)
repo.lfstatus = True
modified, added, removed, deleted = repo.status()[:4]
repo.lfstatus = False
if modified or added or removed or deleted:
raise util.Abort(_('outstanding uncommitted changes'))
# Fetch doesn't use cmdutil.bailifchanged so override it to add the check
def overridefetch(orig, ui, repo, *pats, **opts):
repo.lfstatus = True
modified, added, removed, deleted = repo.status()[:4]
repo.lfstatus = False
if modified or added or removed or deleted:
raise util.Abort(_('outstanding uncommitted changes'))
return orig(ui, repo, *pats, **opts)
def overrideforget(orig, ui, repo, *pats, **opts):
installnormalfilesmatchfn(repo[None].manifest())
result = orig(ui, repo, *pats, **opts)
restorematchfn()
m = scmutil.match(repo[None], pats, opts)
try:
repo.lfstatus = True
s = repo.status(match=m, clean=True)
finally:
repo.lfstatus = False
forget = sorted(s[0] + s[1] + s[3] + s[6])
forget = [f for f in forget if lfutil.standin(f) in repo[None].manifest()]
for f in forget:
if lfutil.standin(f) not in repo.dirstate and not \
os.path.isdir(m.rel(lfutil.standin(f))):
ui.warn(_('not removing %s: file is already untracked\n')
% m.rel(f))
result = 1
for f in forget:
if ui.verbose or not m.exact(f):
ui.status(_('removing %s\n') % m.rel(f))
# Need to lock because standin files are deleted then removed from the
# repository and we could race in-between.
wlock = repo.wlock()
try:
lfdirstate = lfutil.openlfdirstate(ui, repo)
for f in forget:
if lfdirstate[f] == 'a':
lfdirstate.drop(f)
else:
lfdirstate.remove(f)
lfdirstate.write()
standins = [lfutil.standin(f) for f in forget]
for f in standins:
util.unlinkpath(repo.wjoin(f), ignoremissing=True)
repo[None].forget(standins)
finally:
wlock.release()
return result
def getoutgoinglfiles(ui, repo, dest=None, **opts):
dest = ui.expandpath(dest or 'default-push', dest or 'default')
dest, branches = hg.parseurl(dest, opts.get('branch'))
revs, checkout = hg.addbranchrevs(repo, repo, branches, opts.get('rev'))
if revs:
revs = [repo.lookup(rev) for rev in scmutil.revrange(repo, revs)]
try:
remote = hg.peer(repo, opts, dest)
except error.RepoError:
return None
outgoing = discovery.findcommonoutgoing(repo, remote.peer(), force=False)
if not outgoing.missing:
return outgoing.missing
o = repo.changelog.nodesbetween(outgoing.missing, revs)[0]
if opts.get('newest_first'):
o.reverse()
toupload = set()
for n in o:
parents = [p for p in repo.changelog.parents(n) if p != node.nullid]
ctx = repo[n]
files = set(ctx.files())
if len(parents) == 2:
mc = ctx.manifest()
mp1 = ctx.parents()[0].manifest()
mp2 = ctx.parents()[1].manifest()
for f in mp1:
if f not in mc:
files.add(f)
for f in mp2:
if f not in mc:
files.add(f)
for f in mc:
if mc[f] != mp1.get(f, None) or mc[f] != mp2.get(f, None):
files.add(f)
toupload = toupload.union(
set([f for f in files if lfutil.isstandin(f) and f in ctx]))
return sorted(toupload)
def overrideoutgoing(orig, ui, repo, dest=None, **opts):
result = orig(ui, repo, dest, **opts)
if opts.pop('large', None):
toupload = getoutgoinglfiles(ui, repo, dest, **opts)
if toupload is None:
ui.status(_('largefiles: No remote repo\n'))
elif not toupload:
ui.status(_('largefiles: no files to upload\n'))
else:
ui.status(_('largefiles to upload:\n'))
for file in toupload:
ui.status(lfutil.splitstandin(file) + '\n')
ui.status('\n')
return result
def overridesummary(orig, ui, repo, *pats, **opts):
try:
repo.lfstatus = True
orig(ui, repo, *pats, **opts)
finally:
repo.lfstatus = False
if opts.pop('large', None):
toupload = getoutgoinglfiles(ui, repo, None, **opts)
if toupload is None:
# i18n: column positioning for "hg summary"
ui.status(_('largefiles: (no remote repo)\n'))
elif not toupload:
# i18n: column positioning for "hg summary"
ui.status(_('largefiles: (no files to upload)\n'))
else:
# i18n: column positioning for "hg summary"
ui.status(_('largefiles: %d to upload\n') % len(toupload))
def scmutiladdremove(orig, repo, pats=[], opts={}, dry_run=None,
similarity=None):
if not lfutil.islfilesrepo(repo):
return orig(repo, pats, opts, dry_run, similarity)
# Get the list of missing largefiles so we can remove them
lfdirstate = lfutil.openlfdirstate(repo.ui, repo)
s = lfdirstate.status(match_.always(repo.root, repo.getcwd()), [], False,
False, False)
(unsure, modified, added, removed, missing, unknown, ignored, clean) = s
# Call into the normal remove code, but the removing of the standin, we want
# to have handled by original addremove. Monkey patching here makes sure
# we don't remove the standin in the largefiles code, preventing a very
# confused state later.
if missing:
m = [repo.wjoin(f) for f in missing]
repo._isaddremove = True
removelargefiles(repo.ui, repo, *m, **opts)
repo._isaddremove = False
# Call into the normal add code, and any files that *should* be added as
# largefiles will be
addlargefiles(repo.ui, repo, *pats, **opts)
# Now that we've handled largefiles, hand off to the original addremove
# function to take care of the rest. Make sure it doesn't do anything with
# largefiles by installing a matcher that will ignore them.
installnormalfilesmatchfn(repo[None].manifest())
result = orig(repo, pats, opts, dry_run, similarity)
restorematchfn()
return result
# Calling purge with --all will cause the largefiles to be deleted.
# Override repo.status to prevent this from happening.
def overridepurge(orig, ui, repo, *dirs, **opts):
# XXX large file status is buggy when used on repo proxy.
# XXX this needs to be investigate.
repo = repo.unfiltered()
oldstatus = repo.status
def overridestatus(node1='.', node2=None, match=None, ignored=False,
clean=False, unknown=False, listsubrepos=False):
r = oldstatus(node1, node2, match, ignored, clean, unknown,
listsubrepos)
lfdirstate = lfutil.openlfdirstate(ui, repo)
modified, added, removed, deleted, unknown, ignored, clean = r
unknown = [f for f in unknown if lfdirstate[f] == '?']
ignored = [f for f in ignored if lfdirstate[f] == '?']
return modified, added, removed, deleted, unknown, ignored, clean
repo.status = overridestatus
orig(ui, repo, *dirs, **opts)
repo.status = oldstatus
def overriderollback(orig, ui, repo, **opts):
result = orig(ui, repo, **opts)
merge.update(repo, node=None, branchmerge=False, force=True,
partial=lfutil.isstandin)
wlock = repo.wlock()
try:
lfdirstate = lfutil.openlfdirstate(ui, repo)
lfiles = lfutil.listlfiles(repo)
oldlfiles = lfutil.listlfiles(repo, repo[None].parents()[0].rev())
for file in lfiles:
if file in oldlfiles:
lfdirstate.normallookup(file)
else:
lfdirstate.add(file)
lfdirstate.write()
finally:
wlock.release()
return result
def overridetransplant(orig, ui, repo, *revs, **opts):
try:
oldstandins = lfutil.getstandinsstate(repo)
repo._istransplanting = True
result = orig(ui, repo, *revs, **opts)
newstandins = lfutil.getstandinsstate(repo)
filelist = lfutil.getlfilestoupdate(oldstandins, newstandins)
lfcommands.updatelfiles(repo.ui, repo, filelist=filelist,
printmessage=True)
finally:
repo._istransplanting = False
return result
def overridecat(orig, ui, repo, file1, *pats, **opts):
ctx = scmutil.revsingle(repo, opts.get('rev'))
err = 1
notbad = set()
m = scmutil.match(ctx, (file1,) + pats, opts)
origmatchfn = m.matchfn
def lfmatchfn(f):
lf = lfutil.splitstandin(f)
if lf is None:
return origmatchfn(f)
notbad.add(lf)
return origmatchfn(lf)
m.matchfn = lfmatchfn
origbadfn = m.bad
def lfbadfn(f, msg):
if not f in notbad:
return origbadfn(f, msg)
m.bad = lfbadfn
for f in ctx.walk(m):
fp = cmdutil.makefileobj(repo, opts.get('output'), ctx.node(),
pathname=f)
lf = lfutil.splitstandin(f)
if lf is None:
# duplicating unreachable code from commands.cat
data = ctx[f].data()
if opts.get('decode'):
data = repo.wwritedata(f, data)
fp.write(data)
else:
hash = lfutil.readstandin(repo, lf, ctx.rev())
if not lfutil.inusercache(repo.ui, hash):
store = basestore._openstore(repo)
success, missing = store.get([(lf, hash)])
if len(success) != 1:
raise util.Abort(
_('largefile %s is not in cache and could not be '
'downloaded') % lf)
path = lfutil.usercachepath(repo.ui, hash)
fpin = open(path, "rb")
for chunk in util.filechunkiter(fpin, 128 * 1024):
fp.write(chunk)
fpin.close()
fp.close()
err = 0
return err
def mercurialsinkbefore(orig, sink):
sink.repo._isconverting = True
orig(sink)
def mercurialsinkafter(orig, sink):
sink.repo._isconverting = False
orig(sink)
| apache-2.0 |
annarev/tensorflow | tensorflow/python/training/monitored_session_test.py | 7 | 95032 | # pylint: disable=g-bad-file-header
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for monitored_session."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import glob
import os
import sys
import threading
import time
import traceback
from tensorflow.core.protobuf import config_pb2
from tensorflow.core.protobuf import debug_pb2
from tensorflow.python.client import session as session_lib
from tensorflow.python.distribute import collective_all_reduce_strategy
from tensorflow.python.distribute import distribute_coordinator
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import errors_impl
from tensorflow.python.framework import ops
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
from tensorflow.python.summary import summary
from tensorflow.python.training import basic_session_run_hooks
from tensorflow.python.training import checkpoint_management
from tensorflow.python.training import coordinator
from tensorflow.python.training import monitored_session
from tensorflow.python.training import saver as saver_lib
from tensorflow.python.training import session_run_hook
from tensorflow.python.training import summary_io
from tensorflow.python.training import training_util
def latest_summaries(base_dir):
"""Parse summary events from latest event file in base_dir."""
file_paths = glob.glob(os.path.join(base_dir, 'events.*'))
file_path = sorted(file_paths)[-1] if file_paths else None
latest_events = summary_io.summary_iterator(file_path) if file_path else []
return [e for e in latest_events if e.HasField('summary')]
class ScaffoldTest(test.TestCase):
"""Scaffold tests."""
def test_nothing_created_before_finalize(self):
with ops.Graph().as_default():
scaffold = monitored_session.Scaffold()
self.assertEqual(None, scaffold.init_op)
self.assertEqual(None, scaffold.init_feed_dict)
self.assertEqual(None, scaffold.init_fn)
self.assertEqual(None, scaffold.ready_op)
self.assertEqual(None, scaffold.ready_for_local_init_op)
self.assertEqual(None, scaffold.local_init_op)
self.assertEqual(None, scaffold.saver)
def test_defaults_empty_graph(self):
with ops.Graph().as_default():
scaffold = monitored_session.Scaffold()
variables.VariableV1(1, name='my_var')
variables.VariableV1(
2, name='my_local_var', collections=[ops.GraphKeys.LOCAL_VARIABLES])
scaffold.finalize()
self.assertTrue(isinstance(scaffold.init_op, ops.Operation))
self.assertEqual(None, scaffold.init_feed_dict)
self.assertEqual(None, scaffold.init_fn)
self.assertTrue(isinstance(scaffold.ready_op, ops.Tensor))
self.assertTrue(isinstance(scaffold.ready_for_local_init_op, ops.Tensor))
self.assertTrue(isinstance(scaffold.local_init_op, ops.Operation))
self.assertEqual(None, scaffold.local_init_feed_dict)
self.assertTrue(isinstance(scaffold.saver, saver_lib.Saver))
with self.cached_session() as sess:
self.assertItemsEqual([b'my_var', b'my_local_var'],
sess.run(scaffold.ready_op))
self.assertItemsEqual([b'my_var'],
sess.run(scaffold.ready_for_local_init_op))
sess.run(scaffold.init_op)
self.assertEqual(0, len(sess.run(scaffold.ready_for_local_init_op)))
sess.run(scaffold.local_init_op)
self.assertEqual(0, len(sess.run(scaffold.ready_op)))
def test_defaults_no_variables(self):
with ops.Graph().as_default():
scaffold = monitored_session.Scaffold()
constant_op.constant(1, name='my_const')
scaffold.finalize()
self.assertTrue(isinstance(scaffold.init_op, ops.Operation))
self.assertEqual(None, scaffold.init_feed_dict)
self.assertEqual(None, scaffold.init_fn)
self.assertTrue(isinstance(scaffold.ready_op, ops.Tensor))
self.assertTrue(isinstance(scaffold.ready_for_local_init_op, ops.Tensor))
self.assertTrue(isinstance(scaffold.local_init_op, ops.Operation))
self.assertEqual(None, scaffold.local_init_feed_dict)
self.assertTrue(isinstance(scaffold.saver, saver_lib.Saver))
def test_caches_values(self):
with ops.Graph().as_default():
variables.VariableV1([1])
scaffold1 = monitored_session.Scaffold()
scaffold1.finalize()
scaffold2 = monitored_session.Scaffold()
scaffold2.finalize()
self.assertEqual(scaffold1.init_op, scaffold2.init_op)
self.assertEqual(scaffold1.ready_op, scaffold2.ready_op)
self.assertEqual(scaffold1.ready_for_local_init_op,
scaffold2.ready_for_local_init_op)
self.assertEqual(scaffold1.local_init_op, scaffold2.local_init_op)
self.assertEqual(scaffold1.saver, scaffold2.saver)
def test_raise_error_if_more_than_one_cached_item(self):
with ops.Graph().as_default():
variables.VariableV1([1])
ops.add_to_collection(ops.GraphKeys.SAVERS, saver_lib.Saver())
ops.add_to_collection(ops.GraphKeys.SAVERS, saver_lib.Saver())
with self.assertRaisesRegex(RuntimeError, 'More than one item'):
monitored_session.Scaffold().finalize()
def test_uses_passed_values(self):
with ops.Graph().as_default():
variables.VariableV1([1])
saver = saver_lib.Saver()
scaffold = monitored_session.Scaffold(
init_op=2,
init_feed_dict=3,
init_fn=lambda scaffold, sess: 4,
ready_op=5,
ready_for_local_init_op=6,
local_init_op=7,
local_init_feed_dict=8,
saver=saver)
scaffold.finalize()
self.assertEqual(2, scaffold.init_op)
self.assertEqual(3, scaffold.init_feed_dict)
self.assertTrue(callable(scaffold.init_fn))
self.assertEqual(5, scaffold.ready_op)
self.assertEqual(6, scaffold.ready_for_local_init_op)
self.assertEqual(7, scaffold.local_init_op)
self.assertEqual(8, scaffold.local_init_feed_dict)
self.assertEqual(saver, scaffold.saver)
def test_graph_is_finalized(self):
with ops.Graph().as_default():
variables.VariableV1([1])
monitored_session.Scaffold().finalize()
with self.assertRaisesRegex(RuntimeError,
'Graph is finalized and cannot be modified'):
constant_op.constant([0])
def test_new_scaffold_from_default_scaffold(self):
scaffold1 = monitored_session.Scaffold()
with ops.Graph().as_default():
variables.VariableV1([1])
saver = saver_lib.Saver()
scaffold2 = monitored_session.Scaffold(
init_op=2,
init_feed_dict=3,
init_fn=lambda scaffold, sess: 4,
ready_op=5,
ready_for_local_init_op=6,
local_init_op=7,
local_init_feed_dict=8,
saver=saver,
copy_from_scaffold=scaffold1)
scaffold2.finalize()
self.assertEqual(2, scaffold2.init_op)
self.assertEqual(3, scaffold2.init_feed_dict)
self.assertTrue(callable(scaffold2.init_fn))
self.assertEqual(5, scaffold2.ready_op)
self.assertEqual(6, scaffold2.ready_for_local_init_op)
self.assertEqual(7, scaffold2.local_init_op)
self.assertEqual(8, scaffold2.local_init_feed_dict)
self.assertEqual(saver, scaffold2.saver)
def test_new_scaffold_from_existing_scaffold(self):
with ops.Graph().as_default():
variables.VariableV1([1])
saver = saver_lib.Saver()
scaffold1 = monitored_session.Scaffold(
init_op=2,
init_feed_dict=3,
init_fn=lambda scaffold, sess: 4,
ready_op=5,
ready_for_local_init_op=6,
local_init_op=7,
local_init_feed_dict=8,
saver=saver)
scaffold2 = monitored_session.Scaffold(
init_op=4,
init_feed_dict=6,
init_fn=lambda scaffold, sess: 8,
ready_op=10,
ready_for_local_init_op=12,
local_init_op=14,
local_init_feed_dict=15,
saver=saver,
copy_from_scaffold=scaffold1)
scaffold2.finalize()
self.assertEqual(4, scaffold2.init_op)
self.assertEqual(6, scaffold2.init_feed_dict)
self.assertTrue(callable(scaffold2.init_fn))
self.assertEqual(10, scaffold2.ready_op)
self.assertEqual(12, scaffold2.ready_for_local_init_op)
self.assertEqual(14, scaffold2.local_init_op)
self.assertEqual(15, scaffold2.local_init_feed_dict)
self.assertEqual(saver, scaffold2.saver)
def test_copy_from_scaffold_is_scaffold(self):
with ops.Graph().as_default():
with self.assertRaisesRegex(
TypeError, 'copy_from_scaffold is not a Scaffold instance'):
monitored_session.Scaffold(copy_from_scaffold=1)
def _test_dir(temp_dir, test_name):
"""Create an empty dir to use for tests.
Args:
temp_dir: Tmp directory path.
test_name: Name of the test.
Returns:
Absolute path to the test directory.
"""
test_dir = os.path.join(temp_dir, test_name)
if os.path.isdir(test_dir):
for f in glob.glob('%s/*' % test_dir):
os.remove(f)
else:
os.makedirs(test_dir)
return test_dir
class FakeHook(session_run_hook.SessionRunHook):
def __init__(self):
self.should_stop = False
self.request = None
self.call_counter = collections.Counter()
self.last_run_context = None
self.last_run_values = None
def begin(self):
self.call_counter['begin'] += 1
def after_create_session(self, session, coord): # pylint: disable=unused-argument
self.call_counter['after_create_session'] += 1
def before_run(self, run_context):
self.call_counter['before_run'] += 1
self.last_run_context = run_context
return self.request
def after_run(self, run_context, run_values):
self.call_counter['after_run'] += 1
self.last_run_values = run_values
if self.should_stop:
run_context.request_stop()
def end(self, session):
self.call_counter['end'] += 1
class MonitoredTrainingSessionTest(test.TestCase):
"""Tests MonitoredTrainingSession."""
def test_saving_restoring_checkpoint(self):
logdir = _test_dir(self.get_temp_dir(), 'test_saving_restoring_checkpoint')
with ops.Graph().as_default():
gstep = training_util.get_or_create_global_step()
do_step = state_ops.assign_add(gstep, 1)
with monitored_session.MonitoredTrainingSession(
is_chief=True, checkpoint_dir=logdir) as session:
self.assertEqual(0, session.run(gstep))
self.assertEqual(1, session.run(do_step))
self.assertEqual(2, session.run(do_step))
# A restart will find the checkpoint and recover automatically.
with monitored_session.MonitoredTrainingSession(
is_chief=True, checkpoint_dir=logdir) as session:
self.assertEqual(2, session.run(gstep))
def test_save_checkpoint_steps(self):
logdir = _test_dir(self.get_temp_dir(), 'test_save_checkpoint_steps')
with ops.Graph().as_default():
gstep = training_util.get_or_create_global_step()
new_gstep = state_ops.assign_add(gstep, 1)
with monitored_session.MonitoredTrainingSession(
is_chief=True,
checkpoint_dir=logdir,
save_checkpoint_steps=100,
log_step_count_steps=10) as session:
for _ in range(100):
session.run(new_gstep)
# A restart will find the checkpoint and recover automatically.
with monitored_session.MonitoredTrainingSession(
is_chief=True, checkpoint_dir=logdir) as session:
self.assertEqual(100, session.run(gstep))
def test_save_checkpoint_secs(self):
logdir = _test_dir(self.get_temp_dir(), 'test_save_checkpoint_secs')
with ops.Graph().as_default():
gstep = training_util.get_or_create_global_step()
new_gstep = state_ops.assign_add(gstep, 1)
with monitored_session.MonitoredTrainingSession(
is_chief=True,
checkpoint_dir=logdir,
save_checkpoint_secs=0.1,
log_step_count_steps=10) as session:
session.run(new_gstep)
time.sleep(0.2)
for _ in range(10):
session.run(new_gstep)
# A restart will find the checkpoint and recover automatically.
with monitored_session.MonitoredTrainingSession(
is_chief=True, checkpoint_dir=logdir) as session:
self.assertEqual(11, session.run(gstep))
def test_summaries_steps(self):
logdir = _test_dir(self.get_temp_dir(), 'test_summaries_steps')
with ops.Graph().as_default():
gstep = training_util.get_or_create_global_step()
new_gstep = state_ops.assign_add(gstep, 1)
summary.scalar('my_summary_tag', new_gstep * 2)
with monitored_session.MonitoredTrainingSession(
is_chief=True,
checkpoint_dir=logdir,
save_summaries_steps=100,
log_step_count_steps=10) as session:
for _ in range(101):
session.run(new_gstep)
summaries = latest_summaries(logdir)
tags = [s.summary.value[0].tag for s in summaries]
self.assertIn('my_summary_tag', tags)
self.assertIn('global_step/sec', tags)
def test_summaries_secs(self):
logdir = _test_dir(self.get_temp_dir(), 'test_summaries_secs')
with ops.Graph().as_default():
gstep = training_util.get_or_create_global_step()
new_gstep = state_ops.assign_add(gstep, 1)
summary.scalar('my_summary_tag', new_gstep * 2)
with monitored_session.MonitoredTrainingSession(
is_chief=True,
checkpoint_dir=logdir,
save_summaries_steps=None,
save_summaries_secs=0.1,
log_step_count_steps=10) as session:
session.run(new_gstep)
time.sleep(0.2)
for _ in range(101):
session.run(new_gstep)
summaries = latest_summaries(logdir)
tags = [s.summary.value[0].tag for s in summaries]
self.assertIn('my_summary_tag', tags)
self.assertIn('global_step/sec', tags)
def test_custom_saving(self):
logdir = _test_dir(self.get_temp_dir(), 'test_saving_restoring_checkpoint')
fake_hook = FakeHook()
with ops.Graph().as_default():
gstep = training_util.get_or_create_global_step()
do_step = state_ops.assign_add(gstep, 1)
with monitored_session.MonitoredTrainingSession(
is_chief=True,
checkpoint_dir=logdir,
chief_only_hooks=[fake_hook],
save_checkpoint_secs=0) as session:
self.assertEqual(0, session.run(gstep))
self.assertEqual(1, session.run(do_step))
self.assertEqual(2, session.run(do_step))
# Check whether custom hook called or not
self.assertEqual(1, fake_hook.call_counter['begin'])
# A restart will not find the checkpoint, since we didn't save.
with monitored_session.MonitoredTrainingSession(
is_chief=True, checkpoint_dir=logdir) as session:
self.assertEqual(0, session.run(gstep))
def test_save_graph_def(self):
logdir = _test_dir(self.get_temp_dir(), 'test_save_graph_def')
with ops.Graph().as_default():
gstep = training_util.get_or_create_global_step()
new_gstep = state_ops.assign_add(gstep, 1)
with monitored_session.MonitoredTrainingSession(
is_chief=True,
checkpoint_dir=logdir,
save_checkpoint_steps=1,
save_graph_def=True) as session:
self.assertIn('graph.pbtxt', os.listdir(logdir))
self.assertLen(glob.glob(os.path.join(logdir, '*.meta')), 1)
session.run(new_gstep)
self.assertLen(glob.glob(os.path.join(logdir, '*.meta')), 2)
def test_save_graph_def_false(self):
logdir = _test_dir(self.get_temp_dir(), 'test_save_graph_def')
with ops.Graph().as_default():
gstep = training_util.get_or_create_global_step()
new_gstep = state_ops.assign_add(gstep, 1)
with monitored_session.MonitoredTrainingSession(
is_chief=True,
checkpoint_dir=logdir,
save_checkpoint_steps=1,
save_graph_def=False) as session:
self.assertNotIn('graph.pbtxt', os.listdir(logdir))
self.assertEmpty(glob.glob(os.path.join(logdir, '*.meta')))
session.run(new_gstep)
self.assertEmpty(glob.glob(os.path.join(logdir, '*.meta')))
class MockExtended(object):
def __init__(self, between_graph, should_init, should_checkpoint,
should_save_summary):
self.experimental_between_graph = between_graph
self.experimental_should_init = should_init
self.should_checkpoint = should_checkpoint
self.should_save_summary = should_save_summary
class MockStrategy(object):
def __init__(self,
between_graph=False,
should_init=True,
should_checkpoint=None,
should_save_summary=None):
self.extended = MockExtended(between_graph, should_init, should_checkpoint,
should_save_summary)
class MonitoredTrainingSessionWithDistributeCoordinatorTest(test.TestCase):
"""Test distribute coordinator controls summary saving and checkpointing."""
def test_summary_hook_enabled(self):
context = distribute_coordinator._WorkerContext(
MockStrategy(should_save_summary=True), None, None, None)
logdir = _test_dir(self.get_temp_dir(), 'test_summaries_enabled')
with ops.Graph().as_default():
gstep = training_util.get_or_create_global_step()
new_gstep = state_ops.assign_add(gstep, 1)
summary.scalar('my_summary_tag', new_gstep * 2)
with context, monitored_session.MonitoredTrainingSession(
checkpoint_dir=logdir,
save_summaries_steps=100,
log_step_count_steps=10) as session:
for _ in range(101):
session.run(new_gstep)
summaries = latest_summaries(logdir)
tags = [s.summary.value[0].tag for s in summaries]
self.assertIn('my_summary_tag', tags)
self.assertIn('global_step/sec', tags)
def test_summary_hook_disabled(self):
context = distribute_coordinator._WorkerContext(
MockStrategy(should_save_summary=False), None, None, None)
logdir = _test_dir(self.get_temp_dir(), 'test_summaries_disabled')
with ops.Graph().as_default():
gstep = training_util.get_or_create_global_step()
new_gstep = state_ops.assign_add(gstep, 1)
summary.scalar('my_summary_tag', new_gstep * 2)
with context, monitored_session.MonitoredTrainingSession(
checkpoint_dir=logdir,
save_summaries_steps=100,
log_step_count_steps=10) as session:
for _ in range(101):
session.run(new_gstep)
# No summary is saved.
summaries = latest_summaries(logdir)
self.assertEqual(len(summaries), 0)
def test_checkpoint_hook_enabled(self):
context = distribute_coordinator._WorkerContext(
MockStrategy(should_checkpoint=True), None, None, None)
logdir = _test_dir(self.get_temp_dir(), 'test_save_checkpoint_enabled')
with ops.Graph().as_default():
gstep = training_util.get_or_create_global_step()
new_gstep = state_ops.assign_add(gstep, 1)
with context, monitored_session.MonitoredTrainingSession(
checkpoint_dir=logdir,
save_checkpoint_steps=100,
log_step_count_steps=10) as session:
for _ in range(100):
session.run(new_gstep)
# A restart will find the checkpoint and recover automatically.
with monitored_session.MonitoredTrainingSession(
is_chief=True, checkpoint_dir=logdir) as session:
self.assertEqual(100, session.run(gstep))
def test_checkpoint_hook_disabled(self):
context = distribute_coordinator._WorkerContext(
MockStrategy(should_checkpoint=False), None, None, None)
logdir = _test_dir(self.get_temp_dir(), 'test_save_checkpoint_disabled')
with ops.Graph().as_default():
gstep = training_util.get_or_create_global_step()
new_gstep = state_ops.assign_add(gstep, 1)
with context, monitored_session.MonitoredTrainingSession(
checkpoint_dir=logdir,
save_checkpoint_steps=100,
log_step_count_steps=10) as session:
for _ in range(100):
session.run(new_gstep)
# No checkpoint is saved.
checkpoint = checkpoint_management.latest_checkpoint(logdir)
self.assertIsNone(checkpoint)
def test_checkpoint_hook_enable_on_non_chief_with_collective_ops(self):
strategy = collective_all_reduce_strategy.CollectiveAllReduceStrategy()
strategy.extended._is_chief = False
context = distribute_coordinator._WorkerContext(strategy, None, 'worker', 1)
logdir = _test_dir(self.get_temp_dir(), 'test_save_checkpoint_disabled')
with ops.Graph().as_default():
gstep = training_util.get_or_create_global_step()
new_gstep = state_ops.assign_add(gstep, 1)
with context, monitored_session.MonitoredTrainingSession(
checkpoint_dir=logdir,
save_checkpoint_steps=100,
log_step_count_steps=10) as session:
for _ in range(100):
session.run(new_gstep)
# No checkpoint is saved.
checkpoint = checkpoint_management.latest_checkpoint(logdir)
self.assertIsNone(checkpoint)
# But saved to a temporary directory.
checkpoint = checkpoint_management.latest_checkpoint(
os.path.join(logdir, 'tmp_worker_1'))
self.assertIsNotNone(checkpoint)
class StopAtNSession(monitored_session._WrappedSession):
"""A wrapped session that stops at the N-th call to _check_stop."""
def __init__(self, sess, n):
super(StopAtNSession, self).__init__(sess)
self._count = n
def _check_stop(self):
if self._count == 0:
return True
self._count -= 1
return False
class WrappedSessionTest(test.TestCase):
"""_WrappedSession tests."""
@test_util.run_deprecated_v1
def test_properties(self):
with self.cached_session() as sess:
constant_op.constant(0.0)
wrapped_sess = monitored_session._WrappedSession(sess)
self.assertEqual(sess.graph, wrapped_sess.graph)
self.assertEqual(sess.sess_str, wrapped_sess.sess_str)
@test_util.run_deprecated_v1
def test_should_stop_on_close(self):
with self.cached_session() as sess:
wrapped_sess = monitored_session._WrappedSession(sess)
self.assertFalse(wrapped_sess.should_stop())
wrapped_sess.close()
self.assertTrue(wrapped_sess.should_stop())
@test_util.run_deprecated_v1
def test_should_stop_uses_check_stop(self):
with self.cached_session() as sess:
wrapped_sess = StopAtNSession(sess, 3)
self.assertFalse(wrapped_sess.should_stop())
self.assertFalse(wrapped_sess.should_stop())
self.assertFalse(wrapped_sess.should_stop())
self.assertTrue(wrapped_sess.should_stop())
@test_util.run_deprecated_v1
def test_should_stop_delegates_to_wrapped_session(self):
with self.cached_session() as sess:
wrapped_sess0 = StopAtNSession(sess, 4)
wrapped_sess1 = monitored_session._WrappedSession(wrapped_sess0)
self.assertFalse(wrapped_sess1.should_stop())
self.assertFalse(wrapped_sess1.should_stop())
self.assertFalse(wrapped_sess1.should_stop())
self.assertFalse(wrapped_sess1.should_stop())
self.assertTrue(wrapped_sess1.should_stop())
@test_util.run_deprecated_v1
def test_close_twice(self):
with self.cached_session() as sess:
wrapped_sess = monitored_session._WrappedSession(sess)
wrapped_sess.close()
self.assertTrue(wrapped_sess.should_stop())
wrapped_sess.close()
self.assertTrue(wrapped_sess.should_stop())
@test_util.run_deprecated_v1
def test_run(self):
with self.cached_session() as sess:
c = constant_op.constant(0)
v = array_ops.identity(c)
self.assertEqual(42, sess.run(v, feed_dict={c: 42}))
wrapped_sess = monitored_session._WrappedSession(sess)
self.assertEqual(51, wrapped_sess.run(v, feed_dict={c: 51}))
def busy_wait_for_coord_stop(coord):
while not coord.should_stop():
time.sleep(0.001)
class CoordinatedSessionTest(test.TestCase):
"""_CoordinatedSession tests."""
@test_util.run_deprecated_v1
def test_properties(self):
with self.cached_session() as sess:
constant_op.constant(0.0)
coord = coordinator.Coordinator()
coord_sess = monitored_session._CoordinatedSession(sess, coord)
self.assertEqual(sess.graph, coord_sess.graph)
self.assertEqual(sess.sess_str, coord_sess.sess_str)
@test_util.run_deprecated_v1
def test_run(self):
with self.cached_session() as sess:
c = constant_op.constant(0)
v = array_ops.identity(c)
coord = coordinator.Coordinator()
coord_sess = monitored_session._CoordinatedSession(sess, coord)
self.assertEqual(42, coord_sess.run(v, feed_dict={c: 42}))
@test_util.run_deprecated_v1
def test_should_stop_on_close(self):
with self.cached_session() as sess:
coord = coordinator.Coordinator()
coord_sess = monitored_session._CoordinatedSession(sess, coord)
self.assertFalse(coord_sess.should_stop())
coord_sess.close()
self.assertTrue(coord_sess.should_stop())
@test_util.run_deprecated_v1
def test_should_stop_on_coord_stop(self):
with self.cached_session() as sess:
coord = coordinator.Coordinator()
coord_sess = monitored_session._CoordinatedSession(sess, coord)
self.assertFalse(coord_sess.should_stop())
coord.request_stop()
self.assertTrue(coord_sess.should_stop())
@test_util.run_deprecated_v1
def test_dont_request_stop_on_exception_in_main_thread(self):
with self.cached_session() as sess:
c = constant_op.constant(0)
v = array_ops.identity(c)
coord = coordinator.Coordinator()
coord_sess = monitored_session._CoordinatedSession(sess, coord)
self.assertFalse(coord_sess.should_stop())
self.assertEqual(0, coord_sess.run(c))
self.assertEqual(1, coord_sess.run(v, feed_dict={c: 1}))
with self.assertRaisesRegex(TypeError, 'None has invalid type'):
coord_sess.run([None], feed_dict={c: 2})
self.assertFalse(coord.should_stop())
self.assertFalse(coord_sess.should_stop())
@test_util.run_deprecated_v1
def test_stop_threads_on_close_after_exception(self):
with self.cached_session() as sess:
c = constant_op.constant(0)
v = array_ops.identity(c)
coord = coordinator.Coordinator()
threads = [
threading.Thread(
target=busy_wait_for_coord_stop, args=(coord,)) for _ in range(3)
]
for t in threads:
coord.register_thread(t)
t.start()
coord_sess = monitored_session._CoordinatedSession(sess, coord)
self.assertFalse(coord_sess.should_stop())
for t in threads:
self.assertTrue(t.is_alive())
self.assertEqual(0, coord_sess.run(c))
for t in threads:
self.assertTrue(t.is_alive())
self.assertEqual(1, coord_sess.run(v, feed_dict={c: 1}))
for t in threads:
self.assertTrue(t.is_alive())
with self.assertRaisesRegex(TypeError, 'None has invalid type'):
coord_sess.run([None], feed_dict={c: 2})
coord_sess.close()
for t in threads:
self.assertFalse(t.is_alive())
self.assertTrue(coord.should_stop())
self.assertTrue(coord_sess.should_stop())
def test_stop_threads_on_close(self):
with self.cached_session() as sess:
coord = coordinator.Coordinator()
threads = [
threading.Thread(
target=busy_wait_for_coord_stop, args=(coord,)) for _ in range(3)
]
for t in threads:
coord.register_thread(t)
t.start()
coord_sess = monitored_session._CoordinatedSession(sess, coord)
coord_sess.close()
for t in threads:
self.assertFalse(t.is_alive())
self.assertTrue(coord.should_stop())
self.assertTrue(coord_sess.should_stop())
@test_util.run_deprecated_v1
def test_propagates_exception_trace(self):
assertion = control_flow_ops.Assert(False, ['This should fail.'])
with self.cached_session() as sess:
coord = coordinator.Coordinator(clean_stop_exception_types=())
coord_sess = monitored_session._CoordinatedSession(sess, coord)
try:
coord_sess.run([assertion])
self.fail('No exception was raised by assertion.')
except errors_impl.InvalidArgumentError:
# Extract the name of the file where the exception was first raised.
_, _, exc_traceback = sys.exc_info()
tb = traceback.extract_tb(exc_traceback)
exc_source_file = tb[-1][0]
exc_source_basename = os.path.basename(exc_source_file)
# If it's monitored_session.py then the original stack trace was not
# correctly propagated.
self.assertIn(
exc_source_basename, ['session.py', 'monitored_session.py'],
'The exception was raised from an unrecognized file. This unit '
'test probably needs to be updated. Traceback:\n%s\n' % tb)
self.assertEqual(
exc_source_basename, 'session.py',
'Original stack trace was not propagated by MonitoredSession. '
'Traceback:\n%s' % tb)
class AbortAtNSession(object):
"""A mock session that aborts at the N-th run call."""
def __init__(self, sess, n):
self._sess = sess
self._count = n
def close(self):
pass
def run(self, *args, **kwargs):
if self._count == 0:
raise errors_impl.AbortedError('Aborted at N', None, None)
self._count -= 1
return self._sess.run(*args, **kwargs)
class StopCoordinatorWithException(session_run_hook.SessionRunHook):
"""With this hook Coordinator throws an exception after N-runs."""
def __init__(self, calls_before_stopping, exception_to_raise=None):
self._started_the_side_thread_already = False
self._lock = threading.Lock()
self._stored_exception_event = threading.Event()
self._calls_before_stopping = calls_before_stopping
self._exception_to_raise = (exception_to_raise or errors_impl.AbortedError(
None, None, 'Aborted at N'))
def _maybe_stop_with_exception(self, coord):
while True:
with self._lock:
if self._calls_before_stopping == 0:
try:
raise self._exception_to_raise
except Exception as e: # pylint: disable=broad-except
coord.request_stop(e)
self._stored_exception_event.set()
break
def after_create_session(self, session, coord):
if self._started_the_side_thread_already:
return
separate_thread = threading.Thread(
target=self._maybe_stop_with_exception, args=(coord,))
coord.register_thread(separate_thread)
separate_thread.start()
self._started_the_side_thread_already = True
# Coordinator will take care of joining `separate_thread`.
def after_run(self, run_context, run_values):
stopping_now = False
with self._lock:
self._calls_before_stopping -= 1
if self._calls_before_stopping == 0:
stopping_now = True
if stopping_now:
self._stored_exception_event.wait()
class FailTrainingAfterCoordinatorStopped(StopCoordinatorWithException):
"""With this hook training encounters an exception after N-runs."""
def __init__(self, calls_before_stopping):
StopCoordinatorWithException.__init__(self, calls_before_stopping)
self._coord = None
def after_create_session(self, session, coord):
self._coord = coord
return StopCoordinatorWithException.after_create_session(
self, session, coord)
def after_run(self, run_context, run_values):
StopCoordinatorWithException.after_run(self, run_context, run_values)
try:
# After a `run`, an exception could have been stored inside the
# coordinator.
self._coord.raise_requested_exception()
except errors_impl.AbortedError:
# In real world, the main thread may or may not know about the exception
# that stopped the coordinator. Because the coordinator has stopped, the
# main thread could have gotten stuck as well (for example, the
# coordinator was supposed to execute `FIFOQueue.enqueue` while the main
# thread is executing a blocking `FIFOQueue.dequeue`). After it got stuck,
# the session is going to get garbage collected after some time with:
raise errors_impl.CancelledError(None, None,
'Session got garbage-collected.')
class CountingSessionCreator(object):
"""A creator that counts the number of created sessions."""
def __init__(self, session):
self._initial_session = session
# We only have one session per test case. We can't re-create it, thus
# it shouldn't be closed.
self._initial_session.close = lambda *args: None
self._create_session_calls = 0
@property
def number_of_sessions_created(self):
return self._create_session_calls
def create_session(self):
self._create_session_calls += 1
return self._initial_session
class RecoverableSessionTest(test.TestCase):
"""_RecoverableSession tests."""
class _SessionReturner(object):
def __init__(self, sess):
self._sess = sess
def create_session(self):
return self._sess
@test_util.run_deprecated_v1
def test_properties(self):
with self.cached_session() as sess:
constant_op.constant(0.0)
recoverable_sess = monitored_session._RecoverableSession(
self._SessionReturner(sess))
self.assertEqual(sess.graph, recoverable_sess.graph)
self.assertEqual(sess.sess_str, recoverable_sess.sess_str)
@test_util.run_deprecated_v1
def test_run(self):
with self.cached_session() as sess:
c = constant_op.constant(0)
v = array_ops.identity(c)
recoverable_sess = monitored_session._RecoverableSession(
self._SessionReturner(sess))
self.assertEqual(51, recoverable_sess.run(v, feed_dict={c: 51}))
@test_util.run_deprecated_v1
def test_recovery(self):
with self.cached_session() as sess:
class StackSessionCreator(object):
def __init__(self, sess):
self.sessions_to_use = [
AbortAtNSession(sess, x + 1) for x in range(3)
]
def create_session(self):
return self.sessions_to_use.pop(0)
c = constant_op.constant(0)
v = array_ops.identity(c)
session_creator = StackSessionCreator(sess)
# List of 3 sessions to use for recovery. The first one aborts
# after 1 run() call, the second after 2 run calls, the third
# after 3 run calls.
self.assertEqual(3, len(session_creator.sessions_to_use))
# Make the recoverable session uses these 3 sessions in sequence by
# passing a factory that pops from the session_to_use list.
recoverable_sess = monitored_session._RecoverableSession(session_creator)
self.assertEqual(
2, len(session_creator.sessions_to_use)) # One session popped.
# Using first session.
self.assertEqual(51, recoverable_sess.run(v, feed_dict={c: 51}))
self.assertEqual(
2, len(session_creator.sessions_to_use)) # Still 2 sessions available
# This will fail and recover by picking up the second session.
self.assertEqual(42, recoverable_sess.run(v, feed_dict={c: 42}))
self.assertEqual(
1, len(session_creator.sessions_to_use)) # Still 1 session available
self.assertEqual(33, recoverable_sess.run(v, feed_dict={c: 33}))
self.assertEqual(
1, len(session_creator.sessions_to_use)) # Still 1 session available
# This will fail and recover by picking up the last session.
self.assertEqual(24, recoverable_sess.run(v, feed_dict={c: 24}))
self.assertEqual(
0, len(session_creator.sessions_to_use)) # All sessions used.
self.assertEqual(11, recoverable_sess.run(v, feed_dict={c: 11}))
self.assertEqual(0, recoverable_sess.run(v, feed_dict={c: 0}))
# This will fail and throw a real error as the pop() will fail.
with self.assertRaisesRegex(IndexError, 'pop from empty list'):
recoverable_sess.run(v, feed_dict={c: -12})
@test_util.run_deprecated_v1
def test_recovery_from_coordinator_exception(self):
with self.cached_session() as test_session:
session_creator = CountingSessionCreator(test_session)
session = monitored_session.MonitoredSession(
session_creator,
[StopCoordinatorWithException(calls_before_stopping=2)])
self.assertEqual(1, session_creator.number_of_sessions_created)
self.assertFalse(session.should_stop())
c = constant_op.constant(0)
v = array_ops.identity(c)
# The coordinator will not abort during this call, since it's the call
# number 0.
self.assertEqual(51, session.run(v, feed_dict={c: 51}))
self.assertFalse(session.should_stop())
# The coordinator will abort during the next call, since it's the call
# number 1.
self.assertEqual(42, session.run(v, feed_dict={c: 42}))
# Even though the coordinator was asked to stop, the underlying session is
# recreated and is to be continued.
self.assertFalse(session.should_stop())
self.assertEqual(2, session_creator.number_of_sessions_created)
@test_util.run_deprecated_v1
def test_recovery_from_non_preemption_in_coordinator(self):
with self.cached_session() as test_session:
session_creator = CountingSessionCreator(test_session)
hook = StopCoordinatorWithException(
calls_before_stopping=2,
exception_to_raise=errors_impl.UnknownError(
None, None, 'Some fatal exception inside the coordinator.'))
session = monitored_session.MonitoredSession(session_creator, [hook])
self.assertEqual(1, session_creator.number_of_sessions_created)
self.assertFalse(session.should_stop())
c = constant_op.constant(0)
v = array_ops.identity(c)
# The coordinator will not abort during this call, since it's the call
# number 0.
self.assertEqual(51, session.run(v, feed_dict={c: 51}))
self.assertFalse(session.should_stop())
# The coordinator will abort during the next call, since it's the call
# number 1.
self.assertEqual(42, session.run(v, feed_dict={c: 42}))
# The coordinator was asked to stop due to non-redeemable error. Training
# should stop and the session should not be recreated.
self.assertTrue(session.should_stop())
self.assertEqual(1, session_creator.number_of_sessions_created)
with self.assertRaises(errors_impl.UnknownError):
session.close()
@test_util.run_deprecated_v1
def test_recovery_from_session_getting_stuck(self):
with self.cached_session() as test_session:
session_creator = CountingSessionCreator(test_session)
session = monitored_session.MonitoredSession(
session_creator,
[FailTrainingAfterCoordinatorStopped(calls_before_stopping=2)])
self.assertEqual(1, session_creator.number_of_sessions_created)
self.assertFalse(session.should_stop())
c = constant_op.constant(0)
v = array_ops.identity(c)
# Training will not fail, since it's the call number 0.
self.assertEqual(51, session.run(v, feed_dict={c: 51}))
self.assertFalse(session.should_stop())
# Training will fail during the next call, since it's the call
# number 1.
self.assertEqual(42, session.run(v, feed_dict={c: 42}))
# Even though the coordinator stopped which and training failed, the
# underlying session is recreated and training is to be continued.
self.assertFalse(session.should_stop())
self.assertEqual(2, session_creator.number_of_sessions_created)
@test_util.run_deprecated_v1
def test_step_fn_recovery_from_coordinator_exception_when_run_hooks(self):
with self.cached_session() as test_session:
session_creator = CountingSessionCreator(test_session)
session = monitored_session.MonitoredSession(
session_creator,
[StopCoordinatorWithException(calls_before_stopping=2)])
self.assertEqual(1, session_creator.number_of_sessions_created)
self.assertFalse(session.should_stop())
c = constant_op.constant(0)
v = array_ops.identity(c)
def feed_step_fn(value):
def step_fn(step_context):
return step_context.run_with_hooks(fetches=v, feed_dict={c: value})
return step_fn
# The coordinator will not abort during this call, since it's the call
# number 0.
self.assertEqual(51, session.run_step_fn(feed_step_fn(51)))
self.assertFalse(session.should_stop())
# The coordinator will abort during the next call, since it's the call
# number 1.
self.assertEqual(42, session.run_step_fn(feed_step_fn(42)))
# Even though the coordinator was asked to stop, the underlying session is
# recreated and is to be continued.
self.assertFalse(session.should_stop())
self.assertEqual(2, session_creator.number_of_sessions_created)
@test_util.run_deprecated_v1
def test_recovery_from_non_preemption_in_coordinator_when_run_hooks(self):
with self.cached_session() as test_session:
session_creator = CountingSessionCreator(test_session)
hook = StopCoordinatorWithException(
calls_before_stopping=2,
exception_to_raise=errors_impl.UnknownError(
None, None, 'Some fatal exception inside the coordinator.'))
session = monitored_session.MonitoredSession(session_creator, [hook])
self.assertEqual(1, session_creator.number_of_sessions_created)
self.assertFalse(session.should_stop())
c = constant_op.constant(0)
v = array_ops.identity(c)
def feed_step_fn(value):
def step_fn(step_context):
return step_context.run_with_hooks(fetches=v, feed_dict={c: value})
return step_fn
# The coordinator will not abort during this call, since it's the call
# number 0.
self.assertEqual(51, session.run_step_fn(feed_step_fn(51)))
self.assertFalse(session.should_stop())
# The coordinator will abort during the next call, since it's the call
# number 1.
self.assertEqual(42, session.run_step_fn(feed_step_fn(42)))
# The coordinator was asked to stop due to non-redeemable error. Training
# should stop and the session should not be recreated.
self.assertTrue(session.should_stop())
self.assertEqual(1, session_creator.number_of_sessions_created)
with self.assertRaises(errors_impl.UnknownError):
session.close()
@test_util.run_deprecated_v1
def test_recovery_from_session_getting_stuck_when_run_hooks(self):
with self.cached_session() as test_session:
session_creator = CountingSessionCreator(test_session)
session = monitored_session.MonitoredSession(
session_creator,
[FailTrainingAfterCoordinatorStopped(calls_before_stopping=2)])
self.assertEqual(1, session_creator.number_of_sessions_created)
self.assertFalse(session.should_stop())
c = constant_op.constant(0)
v = array_ops.identity(c)
def feed_step_fn(value):
def step_fn(step_context):
return step_context.run_with_hooks(fetches=v, feed_dict={c: value})
return step_fn
# Training will not fail, since it's the call number 0.
self.assertEqual(51, session.run_step_fn(feed_step_fn(51)))
self.assertFalse(session.should_stop())
# Training will fail during the next call, since it's the call
# number 1.
self.assertEqual(42, session.run_step_fn(feed_step_fn(42)))
# Even though the coordinator stopped which and training failed, the
# underlying session is recreated and training is to be continued.
self.assertFalse(session.should_stop())
self.assertEqual(2, session_creator.number_of_sessions_created)
def create_raw_session_with_failing_coordinator(self, session_creator, hook):
"""Return MonitoredSession that triggers coordinator failures."""
session = monitored_session.MonitoredSession(session_creator, [hook])
# We would like to test a situation where during fetches through the
# raw session, the coordinator fails with an exception. To do that, we
# are going to use (raw_session + StopCoordinatorWithException) hook
# combination that is stored in
# `MonitoredSession._RecoverableSession._CoordinatedSession._sess`
# at this point:
session._tf_sess = lambda: session._sess._sess._sess
# `run()` on such a session is equivalent to `run()` on the raw session
# with separate coordinator threads independently stopping with an
# exception.
return session
@test_util.run_deprecated_v1
def test_step_fn_recovery_from_coordinator_exception_with_raw_session(self):
with self.cached_session() as test_session:
session_creator = CountingSessionCreator(test_session)
session = self.create_raw_session_with_failing_coordinator(
session_creator,
StopCoordinatorWithException(calls_before_stopping=2))
self.assertEqual(1, session_creator.number_of_sessions_created)
self.assertFalse(session.should_stop())
c = constant_op.constant(0)
v = array_ops.identity(c)
def feed_step_fn(value):
def step_fn(step_context):
return step_context.session.run(fetches=v, feed_dict={c: value})
return step_fn
# The coordinator will not abort during this call, since it's the call
# number 0.
self.assertEqual(51, session.run_step_fn(feed_step_fn(51)))
self.assertFalse(session.should_stop())
# The coordinator will abort during the next call, since it's the call
# number 1.
self.assertEqual(42, session.run_step_fn(feed_step_fn(42)))
# Even though the coordinator was asked to stop, the underlying session is
# recreated and is to be continued.
self.assertFalse(session.should_stop())
self.assertEqual(2, session_creator.number_of_sessions_created)
@test_util.run_deprecated_v1
def test_recovery_from_non_preemption_in_coordinator_with_raw_session(self):
with self.cached_session() as test_session:
session_creator = CountingSessionCreator(test_session)
session = self.create_raw_session_with_failing_coordinator(
session_creator,
StopCoordinatorWithException(
calls_before_stopping=2,
exception_to_raise=errors_impl.UnknownError(
None, None, 'Some fatal exception inside the coordinator.')))
self.assertEqual(1, session_creator.number_of_sessions_created)
self.assertFalse(session.should_stop())
c = constant_op.constant(0)
v = array_ops.identity(c)
def feed_step_fn(value):
def step_fn(step_context):
return step_context.run_with_hooks(fetches=v, feed_dict={c: value})
return step_fn
# The coordinator will not abort during this call, since it's the call
# number 0.
self.assertEqual(51, session.run_step_fn(feed_step_fn(51)))
self.assertFalse(session.should_stop())
# The coordinator will abort during the next call, since it's the call
# number 1.
self.assertEqual(42, session.run_step_fn(feed_step_fn(42)))
# The coordinator was asked to stop due to non-redeemable error. Training
# should stop and the session should not be recreated.
self.assertTrue(session.should_stop())
self.assertEqual(1, session_creator.number_of_sessions_created)
with self.assertRaises(errors_impl.UnknownError):
session.close()
@test_util.run_deprecated_v1
def test_recovery_from_session_getting_stuck_with_raw_session(self):
with self.cached_session() as test_session:
session_creator = CountingSessionCreator(test_session)
session = self.create_raw_session_with_failing_coordinator(
session_creator,
FailTrainingAfterCoordinatorStopped(calls_before_stopping=2))
self.assertEqual(1, session_creator.number_of_sessions_created)
self.assertFalse(session.should_stop())
c = constant_op.constant(0)
v = array_ops.identity(c)
def feed_step_fn(value):
def step_fn(step_context):
return step_context.run_with_hooks(fetches=v, feed_dict={c: value})
return step_fn
# Training will not fail, since it's the call number 0.
self.assertEqual(51, session.run_step_fn(feed_step_fn(51)))
self.assertFalse(session.should_stop())
# Training will fail during the next call, since it's the call
# number 1.
self.assertEqual(42, session.run_step_fn(feed_step_fn(42)))
# Even though the coordinator stopped which and training failed, the
# underlying session is recreated and training is to be continued.
self.assertFalse(session.should_stop())
self.assertEqual(2, session_creator.number_of_sessions_created)
class FakeSession(monitored_session._WrappedSession):
def __init__(self, sess):
monitored_session._WrappedSession.__init__(self, sess)
self.args_called = {}
def run(self, fetches, **kwargs):
self.args_called = dict(kwargs)
# Call run only with fetches since we directly pass other arguments.
return monitored_session._WrappedSession.run(self, fetches)
class HookedSessionTest(test.TestCase):
"""Tests of _HookedSession."""
def testRunPassesAllArguments(self):
with ops.Graph().as_default(), session_lib.Session() as sess:
mock_run = FakeSession(sess)
mon_sess = monitored_session._HookedSession(sess=mock_run, hooks=[])
a_tensor = constant_op.constant([0], name='a_tensor')
self.evaluate(variables.global_variables_initializer())
output = mon_sess.run(fetches=a_tensor,
feed_dict='a_feed',
options='an_option',
run_metadata='a_metadata')
self.assertEqual(output, [0])
self.assertEqual(mock_run.args_called, {
'feed_dict': 'a_feed',
'options': 'an_option',
'run_metadata': 'a_metadata'
})
def testCallsHooksBeginEnd(self):
with ops.Graph().as_default(), session_lib.Session() as sess:
mock_hook = FakeHook()
mock_hook2 = FakeHook()
mon_sess = monitored_session._HookedSession(
sess=sess, hooks=[mock_hook, mock_hook2])
a_tensor = constant_op.constant([0], name='a_tensor')
self.evaluate(variables.global_variables_initializer())
mon_sess.run(a_tensor)
for hook in [mock_hook, mock_hook2]:
self.assertEqual(
hook.last_run_values,
session_run_hook.SessionRunValues(
results=None,
options=config_pb2.RunOptions(),
run_metadata=config_pb2.RunMetadata()))
self.assertEqual(hook.last_run_context.original_args,
session_run_hook.SessionRunArgs(a_tensor))
self.assertEqual(hook.last_run_context.session, sess)
self.assertEqual(hook.call_counter['begin'], 0)
self.assertEqual(hook.call_counter['after_create_session'], 0)
self.assertEqual(hook.call_counter['before_run'], 1)
self.assertEqual(hook.call_counter['after_run'], 1)
def testShouldStop(self):
with ops.Graph().as_default(), session_lib.Session() as sess:
mock_hook = FakeHook()
mock_hook2 = FakeHook()
mon_sess = monitored_session._HookedSession(
sess=sess, hooks=[mock_hook, mock_hook2])
constant_op.constant([0], name='a_tensor')
self.evaluate(variables.global_variables_initializer())
mon_sess.run(fetches='a_tensor')
self.assertFalse(mon_sess.should_stop())
mock_hook.should_stop = True
mon_sess.run(fetches='a_tensor')
self.assertTrue(mon_sess.should_stop())
def testFetchesHookRequests(self):
with ops.Graph().as_default(), session_lib.Session() as sess:
mock_hook = FakeHook()
mock_hook2 = FakeHook()
mon_sess = monitored_session._HookedSession(
sess=sess, hooks=[mock_hook, mock_hook2])
a_tensor = constant_op.constant([0], name='a_tensor')
another_tensor = constant_op.constant([5], name='another_tensor')
third_tensor = constant_op.constant([10], name='third_tensor')
mock_hook.request = session_run_hook.SessionRunArgs([another_tensor])
mock_hook2.request = session_run_hook.SessionRunArgs([third_tensor])
self.evaluate(variables.global_variables_initializer())
output = mon_sess.run(fetches=a_tensor)
self.assertEqual(output, [0])
self.assertEqual(mock_hook.last_run_values.results, [5])
self.assertEqual(mock_hook2.last_run_values.results, [10])
def testOnlyHooksHaveFeeds(self):
with ops.Graph().as_default(), session_lib.Session() as sess:
mock_hook = FakeHook()
mock_hook2 = FakeHook()
mon_sess = monitored_session._HookedSession(
sess=sess, hooks=[mock_hook, mock_hook2])
a_tensor = constant_op.constant([0], name='a_tensor')
b_tensor = constant_op.constant([0], name='b_tensor')
add_tensor = a_tensor + b_tensor
mock_hook.request = session_run_hook.SessionRunArgs(
None, feed_dict={a_tensor: [5]})
mock_hook2.request = session_run_hook.SessionRunArgs(
None, feed_dict={b_tensor: [10]})
self.evaluate(variables.global_variables_initializer())
self.assertEqual(mon_sess.run(fetches=add_tensor), [15])
def testBothHooksAndUserHaveFeeds(self):
with ops.Graph().as_default(), session_lib.Session() as sess:
mock_hook = FakeHook()
mock_hook2 = FakeHook()
mon_sess = monitored_session._HookedSession(
sess=sess, hooks=[mock_hook, mock_hook2])
a_tensor = constant_op.constant([0], name='a_tensor')
b_tensor = constant_op.constant([0], name='b_tensor')
c_tensor = constant_op.constant([0], name='c_tensor')
add_tensor = a_tensor + b_tensor + c_tensor
mock_hook.request = session_run_hook.SessionRunArgs(
None, feed_dict={a_tensor: [5]})
mock_hook2.request = session_run_hook.SessionRunArgs(
None, feed_dict={b_tensor: [10]})
self.evaluate(variables.global_variables_initializer())
feed_dict = {c_tensor: [20]}
self.assertEqual(
mon_sess.run(fetches=add_tensor, feed_dict=feed_dict), [35])
# User feed_dict should not be changed
self.assertEqual(len(feed_dict), 1)
def testHooksFeedConflicts(self):
with ops.Graph().as_default(), session_lib.Session() as sess:
mock_hook = FakeHook()
mock_hook2 = FakeHook()
mon_sess = monitored_session._HookedSession(
sess=sess, hooks=[mock_hook, mock_hook2])
a_tensor = constant_op.constant([0], name='a_tensor')
b_tensor = constant_op.constant([0], name='b_tensor')
add_tensor = a_tensor + b_tensor
mock_hook.request = session_run_hook.SessionRunArgs(
None, feed_dict={a_tensor: [5]})
mock_hook2.request = session_run_hook.SessionRunArgs(
None, feed_dict={a_tensor: [10]})
self.evaluate(variables.global_variables_initializer())
with self.assertRaisesRegex(RuntimeError, 'Same tensor is fed'):
mon_sess.run(fetches=add_tensor)
def testHooksAndUserFeedConflicts(self):
with ops.Graph().as_default(), session_lib.Session() as sess:
mock_hook = FakeHook()
mock_hook2 = FakeHook()
mon_sess = monitored_session._HookedSession(
sess=sess, hooks=[mock_hook, mock_hook2])
a_tensor = constant_op.constant([0], name='a_tensor')
b_tensor = constant_op.constant([0], name='b_tensor')
add_tensor = a_tensor + b_tensor
mock_hook.request = session_run_hook.SessionRunArgs(
None, feed_dict={a_tensor: [5]})
mock_hook2.request = session_run_hook.SessionRunArgs(
None, feed_dict={b_tensor: [10]})
self.evaluate(variables.global_variables_initializer())
with self.assertRaisesRegex(RuntimeError, 'Same tensor is fed'):
mon_sess.run(fetches=add_tensor, feed_dict={b_tensor: [10]})
class RaiseOnceAtCountN(session_run_hook.SessionRunHook):
"""Hook that raises an Exception at step N."""
def __init__(self, n, ex):
self.n = n
self.ex = ex
self.raised = False
def before_run(self, run_context):
# Raise the first time we reach step N.
self.n -= 1
if 0 == self.n and not self.raised:
self.raised = True
raise self.ex
return None
class RunOptionsMetadataHook(session_run_hook.SessionRunHook):
"""A hook that observes & optionally modifies RunOptions and RunMetadata."""
def __init__(self, trace_level, timeout_in_ms, output_partition_graphs,
debug_tensor_watch, report_tensor_allocations_upon_oom):
self._trace_level = trace_level
self._timeout_in_ms = timeout_in_ms
self._output_partition_graphs = output_partition_graphs
self._debug_tensor_watch = debug_tensor_watch
self._report_tensor_allocations_upon_oom = (
report_tensor_allocations_upon_oom)
self.run_options_list = []
self.run_metadata_list = []
def before_run(self, run_context):
options = config_pb2.RunOptions(
trace_level=self._trace_level,
timeout_in_ms=self._timeout_in_ms,
output_partition_graphs=self._output_partition_graphs,
report_tensor_allocations_upon_oom=self
._report_tensor_allocations_upon_oom)
options.debug_options.debug_tensor_watch_opts.extend(
[self._debug_tensor_watch])
return session_run_hook.SessionRunArgs(None, None, options=options)
def after_run(self, run_context, run_values):
self.run_options_list.append(run_values.options)
self.run_metadata_list.append(run_values.run_metadata)
class MonitoredSessionTest(test.TestCase):
"""MonitoredSession tests."""
def test_defaults(self):
with ops.Graph().as_default():
a_var = variables.VariableV1(0)
with monitored_session.MonitoredSession() as session:
self.assertEqual(0, session.run(a_var))
def test_last_step(self):
logdir = _test_dir(self.get_temp_dir(), 'test_last_step')
with ops.Graph().as_default():
gstep = training_util.get_or_create_global_step()
do_step = state_ops.assign_add(gstep, 1)
# Run till step 3 and save.
hooks = [basic_session_run_hooks.StopAtStepHook(last_step=3)]
with monitored_session.MonitoredSession(hooks=hooks) as session:
self.assertEqual(0, session.run(gstep))
self.assertFalse(session.should_stop())
self.assertEqual(1, session.run(do_step))
self.assertFalse(session.should_stop())
self.assertEqual(2, session.run(do_step))
self.assertFalse(session.should_stop())
self.assertEqual(3, session.run(do_step))
self.assertTrue(session.should_stop())
save_path = saver_lib._get_saver_or_default().save(
session._coordinated_creator.tf_sess,
os.path.join(logdir, 'step-3'))
# Run till step 5 and save.
def load_ckpt(scaffold, sess):
scaffold.saver.restore(sess, save_path)
session_creator = monitored_session.ChiefSessionCreator(
monitored_session.Scaffold(init_fn=load_ckpt))
hooks = [basic_session_run_hooks.StopAtStepHook(last_step=5)]
with monitored_session.MonitoredSession(
hooks=hooks, session_creator=session_creator) as session:
self.assertEqual(3, session.run(gstep))
self.assertFalse(session.should_stop())
self.assertEqual(4, session.run(do_step))
self.assertFalse(session.should_stop())
self.assertEqual(5, session.run(do_step))
self.assertTrue(session.should_stop())
def test_num_steps(self):
logdir = _test_dir(self.get_temp_dir(), 'test_num_steps')
with ops.Graph().as_default():
gstep = training_util.get_or_create_global_step()
do_step = state_ops.assign_add(gstep, 1)
# Do 3 steps and save.
hooks = [basic_session_run_hooks.StopAtStepHook(num_steps=3)]
with monitored_session.MonitoredSession(hooks=hooks) as session:
session.run(do_step)
self.assertFalse(session.should_stop())
session.run(do_step)
self.assertFalse(session.should_stop())
session.run(do_step)
self.assertTrue(session.should_stop())
save_path = saver_lib._get_saver_or_default().save(
session._coordinated_creator.tf_sess,
os.path.join(logdir, 'step-3'))
# Restore and do 4 steps.
def load_ckpt(scaffold, sess):
scaffold.saver.restore(sess, save_path)
session_creator = monitored_session.ChiefSessionCreator(
scaffold=monitored_session.Scaffold(init_fn=load_ckpt))
hooks = [basic_session_run_hooks.StopAtStepHook(num_steps=4)]
with monitored_session.MonitoredSession(
hooks=hooks, session_creator=session_creator) as session:
self.assertEqual(4, session.run(do_step))
self.assertFalse(session.should_stop())
session.run(do_step)
self.assertFalse(session.should_stop())
session.run(do_step)
self.assertFalse(session.should_stop())
session.run(do_step)
self.assertTrue(session.should_stop())
# This set of tests, verifies the supervised session behavior when exceptions
# are raised next to the innermost session run() call.
@test_util.run_deprecated_v1
def test_recovery(self):
logdir = _test_dir(self.get_temp_dir(), 'test_recovery')
with ops.Graph().as_default():
gstep = training_util.get_or_create_global_step()
do_step = state_ops.assign_add(gstep, 1)
scaffold = monitored_session.Scaffold()
# Use a hook to save the model every 100 steps. It also saves it at
# the end.
hooks = [
basic_session_run_hooks.CheckpointSaverHook(
logdir, save_steps=1, scaffold=scaffold)
]
with monitored_session.MonitoredSession(
session_creator=monitored_session.ChiefSessionCreator(
scaffold, checkpoint_dir=logdir),
hooks=hooks) as session:
self.assertEqual(0, session.run(gstep))
self.assertEqual(1, session.run(do_step))
self.assertEqual(2, session.run(do_step))
# A restart will find the checkpoint and recover automatically.
with monitored_session.MonitoredSession(
session_creator=monitored_session.ChiefSessionCreator(
scaffold, checkpoint_dir=logdir)) as session:
self.assertEqual(2, session.run(gstep))
# A restart will find the checkpoint and recover automatically.
with monitored_session.MonitoredSession(
session_creator=monitored_session.ChiefSessionCreator(
scaffold,
checkpoint_filename_with_path=checkpoint_management.
latest_checkpoint(logdir))) as session:
self.assertEqual(2, session.run(gstep))
def test_retry_initialization_on_aborted_error(self):
# Tests that we silently retry on abort during initialization.
with ops.Graph().as_default():
gstep = training_util.get_or_create_global_step()
self.init_raised_aborted_error = False
def _init_fn(scaffold, session):
_, _ = scaffold, session
if not self.init_raised_aborted_error:
self.init_raised_aborted_error = True
raise errors_impl.AbortedError(None, None, 'Abort')
with monitored_session.MonitoredSession(
session_creator=monitored_session.ChiefSessionCreator(
scaffold=monitored_session.Scaffold(
init_fn=_init_fn))) as session:
self.assertFalse(session.should_stop())
self.assertEqual(0, session.run(gstep))
self.assertTrue(self.init_raised_aborted_error)
def _retry_test(self, ex):
# Tests that we silently retry on error. Note that this does not test
# recovery as we do not use a CheckpointSaver in this test.
with ops.Graph().as_default():
gstep = training_util.get_or_create_global_step()
do_step = state_ops.assign_add(gstep, 1)
hook = RaiseOnceAtCountN(4, ex)
with monitored_session.MonitoredSession(hooks=[hook]) as session:
self.assertEqual(0, session.run(gstep))
self.assertEqual(1, session.run(do_step))
self.assertEqual(2, session.run(do_step))
self.assertFalse(session.should_stop())
# Here at step 3, the hook triggers and raises AbortedError. The
# MonitoredSession automatically retries and restart from a freshly
# initialized session, so the step is back to 0 and running do_step
# moves it to 1.
self.assertEqual(1, session.run(do_step))
self.assertFalse(session.should_stop())
self.assertTrue(hook.raised)
self.assertEqual(2, session.run(do_step))
self.assertFalse(session.should_stop())
def test_retry_on_aborted_error(self):
self._retry_test(errors_impl.AbortedError(None, None, 'Abort'))
def test_retry_on_unavailable_error(self):
self._retry_test(errors_impl.UnavailableError(None, None, 'Unavailable'))
def test_recover_and_retry_on_aborted_error(self):
# Tests that we silently retry and recover on abort. This test uses
# a CheckpointSaver to have something to recover from.
logdir = _test_dir(self.get_temp_dir(),
'test_recover_and_retry_on_aborted_error')
with ops.Graph().as_default():
gstep = training_util.get_or_create_global_step()
do_step = state_ops.assign_add(gstep, 1)
scaffold = monitored_session.Scaffold()
abort_hook = RaiseOnceAtCountN(
4, errors_impl.AbortedError(None, None, 'Abort'))
# Save after each step.
ckpt_hook = basic_session_run_hooks.CheckpointSaverHook(
logdir, save_steps=1, scaffold=scaffold)
hooks = [abort_hook, ckpt_hook]
with monitored_session.MonitoredSession(
session_creator=monitored_session.ChiefSessionCreator(
scaffold, checkpoint_dir=logdir),
hooks=hooks) as session:
self.assertEqual(0, session.run(gstep))
self.assertEqual(1, session.run(do_step))
self.assertEqual(2, session.run(do_step))
self.assertFalse(session.should_stop())
# Here at step 3, the hook triggers and raises AbortedError. The
# MonitoredSession automatically restores and retries.
self.assertEqual(3, session.run(do_step))
self.assertTrue(abort_hook.raised)
self.assertFalse(session.should_stop())
self.assertEqual(4, session.run(do_step))
self.assertFalse(session.should_stop())
def test_exit_cleanly_on_out_of_range_exception(self):
# Tests that we stop cleanly when OutOfRange is raised.
with ops.Graph().as_default():
gstep = training_util.get_or_create_global_step()
do_step = state_ops.assign_add(gstep, 1)
hook = RaiseOnceAtCountN(2, errors_impl.OutOfRangeError(None, None,
'EOI'))
session = monitored_session.MonitoredSession(hooks=[hook])
# session should cleanly exit from the context.
with session:
self.assertEqual(0, session.run(gstep))
self.assertFalse(session.should_stop())
# Here at step 1, the hook triggers and raises OutOfRange. The
# session should go into should_stop() mode. It should raise the
# exception. So next step should not be executed.
session.run(do_step)
self.assertTrue(False)
self.assertTrue(session.should_stop())
def test_exit_cleanly_on_stop_iteration_exception(self):
# Tests that we stop cleanly when OutOfRange is raised.
with ops.Graph().as_default():
gstep = training_util.get_or_create_global_step()
do_step = state_ops.assign_add(gstep, 1)
hook = RaiseOnceAtCountN(2, StopIteration)
session = monitored_session.MonitoredSession(hooks=[hook])
# session should cleanly exit from the context.
with session:
self.assertEqual(0, session.run(gstep))
self.assertFalse(session.should_stop())
# Here at step 1, the hook triggers and raises StopIteration. The
# session should go into should_stop() mode. It should raise the
# exception. So next step should not be executed.
session.run(do_step)
self.assertTrue(False)
self.assertTrue(session.should_stop())
def test_regular_exception_pass_through_run(self):
# Tests that regular exceptions just pass through a "with
# MonitoredSession" block and set the session in stop mode.
with ops.Graph().as_default():
gstep = training_util.get_or_create_global_step()
do_step = state_ops.assign_add(gstep, 1)
hook = RaiseOnceAtCountN(4, RuntimeError('regular exception'))
session = monitored_session.MonitoredSession(hooks=[hook])
with self.assertRaisesRegex(RuntimeError, 'regular exception'):
with session:
self.assertEqual(0, session.run(gstep))
self.assertEqual(1, session.run(do_step))
self.assertEqual(2, session.run(do_step))
self.assertFalse(session.should_stop())
# This triggers the hook and raises the exception
session.run(do_step)
# We should not hit this
self.assertFalse(True)
self.assertTrue(hook.raised)
self.assertTrue(session.should_stop())
def test_regular_exception_reported_to_coord_pass_through_run(self):
# Tests that regular exceptions reported to the coordinator from a thread
# passes through a "run()" call within a "with MonitoredSession" block and
# set the session in stop mode.
with ops.Graph().as_default():
gstep = training_util.get_or_create_global_step()
session = monitored_session.MonitoredSession()
run_performed_without_error = False
with self.assertRaisesRegex(RuntimeError, 'a thread wants to stop'):
with session:
self.assertEqual(0, session.run(gstep))
# Report an exception through the coordinator.
try:
raise RuntimeError('a thread wants to stop')
except RuntimeError as e:
session._coordinated_creator.coord.request_stop(e)
# Call run() which should perform normally.
self.assertEqual(0, session.run(gstep))
run_performed_without_error = True
self.assertTrue(run_performed_without_error)
def test_regular_exception_reported_to_coord_pass_through_return(self):
# Tests that regular exceptions reported to the coordinator from a thread
# passes through returning from a "with MonitoredSession" block and
# set the session in stop mode.
with ops.Graph().as_default():
gstep = training_util.get_or_create_global_step()
session = monitored_session.MonitoredSession()
with self.assertRaisesRegex(RuntimeError, 'a thread wants to stop'):
with session:
self.assertEqual(0, session.run(gstep))
# Report an exception through the coordinator.
try:
raise RuntimeError('a thread wants to stop')
except RuntimeError as e:
session._coordinated_creator.coord.request_stop(e)
self.assertTrue(session.should_stop())
# This set of tests, verifies the session behavior when exceptions are raised
# from code inside a "with MonitoredSession:" context.
def test_stop_cleanly_when_no_exception_in_with_body(self):
# Tests that regular exceptions pass through
with ops.Graph().as_default():
gstep = training_util.get_or_create_global_step()
do_step = state_ops.assign_add(gstep, 1)
session = monitored_session.MonitoredSession()
with session:
self.assertEqual(1, session.run(do_step))
self.assertEqual(2, session.run(do_step))
self.assertFalse(session.should_stop())
# Should have closed.
self.assertTrue(session.should_stop())
self.assertTrue(session._is_closed())
def test_raises_regular_exceptions_in_with_body(self):
# Tests that regular exceptions in "with body" are seen outside.
with ops.Graph().as_default():
gstep = training_util.get_or_create_global_step()
do_step = state_ops.assign_add(gstep, 1)
session = monitored_session.MonitoredSession()
# We should see that exception.
with self.assertRaisesRegex(RuntimeError, 'regular exception'):
with session:
self.assertEqual(1, session.run(do_step))
self.assertEqual(2, session.run(do_step))
self.assertFalse(session.should_stop())
# Will be visible outside the "with body".
raise RuntimeError('regular exception')
# Should have closed.
self.assertTrue(session.should_stop())
self.assertTrue(session._is_closed())
def test_graph(self):
with ops.Graph().as_default() as g:
with monitored_session.MonitoredSession() as session:
self.assertEqual(g, session.graph)
def test_graph_finalized_during_run_unfinalized_after_exit(self):
with ops.Graph().as_default() as g:
a_var = variables.VariableV1(0)
with monitored_session.MonitoredSession() as session:
self.assertEqual(0, session.run(a_var))
self.assertTrue(g.finalized)
self.assertFalse(g.finalized)
def test_keep_finalized_graph_as_finalized(self):
with ops.Graph().as_default() as g:
a_var = variables.VariableV1(0)
monitored_session.Scaffold().finalize()
with monitored_session.MonitoredSession() as session:
self.assertEqual(0, session.run(a_var))
self.assertTrue(g.finalized)
self.assertTrue(g.finalized)
def test_merge_run_options_from_hooks(self):
"""Test for rewriting RunOptions and observing RunMetadata with hooks."""
with ops.Graph().as_default():
my_const = constant_op.constant(42, name='my_const')
_ = constant_op.constant(24, name='my_const_2')
watch_a = debug_pb2.DebugTensorWatch(
node_name='my_const',
output_slot=0,
debug_ops=['DebugIdentity'],
debug_urls=[])
hook_a = RunOptionsMetadataHook(2, 30000, False, watch_a, False)
watch_b = debug_pb2.DebugTensorWatch(
node_name='my_const_2',
output_slot=0,
debug_ops=['DebugIdentity'],
debug_urls=[])
hook_b = RunOptionsMetadataHook(3, 60000, True, watch_b, True)
with monitored_session.MonitoredSession(
hooks=[hook_a, hook_b]) as session:
self.assertEqual(42, session.run(my_const))
# trace_level=3 should have overridden trace_level=2;
# timeout_in_ms=60000 should have overridden 30000;
# output_partition_graphs=True should have overridden False.
# The two debug tensor watches should have been merged.
self.assertEqual([
config_pb2.RunOptions(
trace_level=3,
timeout_in_ms=60000,
output_partition_graphs=True,
debug_options=debug_pb2.DebugOptions(
debug_tensor_watch_opts=[watch_a, watch_b]),
report_tensor_allocations_upon_oom=True),
], hook_b.run_options_list)
self.assertEqual(1, len(hook_b.run_metadata_list))
self.assertTrue(
isinstance(hook_b.run_metadata_list[0], config_pb2.RunMetadata))
self.assertGreater(len(hook_b.run_metadata_list[0].partition_graphs), 0)
def test_merge_caller_and_hook_run_options(self):
"""Test that RunOptions from caller and hooks can be merged properly."""
with ops.Graph().as_default():
my_const = constant_op.constant(42, name='my_const')
_ = constant_op.constant(24, name='my_const_2')
hook_watch = debug_pb2.DebugTensorWatch(
node_name='my_const_2',
output_slot=0,
debug_ops=['DebugIdentity'],
debug_urls=[])
hook = RunOptionsMetadataHook(2, 60000, False, hook_watch, False)
with monitored_session.MonitoredSession(hooks=[hook]) as session:
caller_watch = debug_pb2.DebugTensorWatch(
node_name='my_const',
output_slot=0,
debug_ops=['DebugIdentity'],
debug_urls=[])
caller_options = config_pb2.RunOptions(
trace_level=3,
timeout_in_ms=30000,
output_partition_graphs=True,
report_tensor_allocations_upon_oom=True)
caller_options.debug_options.debug_tensor_watch_opts.extend(
[caller_watch])
self.assertEqual(42, session.run(my_const, options=caller_options))
# trace_level=3 from the caller should override 2 from the hook.
# timeout_in_ms=60000 from the hook should override from the caller.
# output_partition_graph=True from the caller should override False
# from the hook.
# The two debug watches from the caller and the hook should be merged,
# in that order.
self.assertEqual([
config_pb2.RunOptions(
trace_level=3,
timeout_in_ms=60000,
output_partition_graphs=True,
debug_options=debug_pb2.DebugOptions(
debug_tensor_watch_opts=[caller_watch, hook_watch]),
report_tensor_allocations_upon_oom=True),
], hook.run_options_list)
self.assertEqual(1, len(hook.run_metadata_list))
self.assertTrue(
isinstance(hook.run_metadata_list[0], config_pb2.RunMetadata))
self.assertGreater(len(hook.run_metadata_list[0].partition_graphs), 0)
@test_util.run_deprecated_v1
def test_with_statement_and_close(self):
# Test case for https://github.com/tensorflow/tensorflow/issues/12224
# where close() inside the with should have a better error message.
with self.assertRaisesRegex(RuntimeError, 'Session is already closed'):
with monitored_session.MonitoredSession() as session:
session.close()
def test_step_fn_example(self):
with ops.Graph().as_default():
c = array_ops.placeholder(dtypes.float32)
v = array_ops.identity(c)
def step_fn(step_context):
value = step_context.run_with_hooks(fetches=v, feed_dict={c: 3.2})
return value
with monitored_session.MonitoredSession() as session:
self.assertNear(3.2, session.run_step_fn(step_fn), 0.1)
def test_step_function_stops(self):
with ops.Graph().as_default():
def step_fn(step_context):
step_context.request_stop()
with monitored_session.MonitoredSession() as session:
self.assertEqual(None, session.run_step_fn(step_fn))
self.assertTrue(session.should_stop())
def test_step_request_stop_without_a_with_block(self):
with ops.Graph().as_default():
was_stop_iteration_raised = False
def step_fn(step_context):
step_context.request_stop()
session = monitored_session.MonitoredSession()
try:
self.assertEqual(None, session.run_step_fn(step_fn))
except StopIteration:
was_stop_iteration_raised = True
self.assertTrue(was_stop_iteration_raised)
self.assertFalse(session.should_stop())
def test_step_request_stop_in_a_loop(self):
with ops.Graph().as_default():
def step_fn(step_context):
step_context.request_stop()
with monitored_session.MonitoredSession() as session:
while not session.should_stop():
_ = session.run_step_fn(step_fn)
self.fail('An exception should be raised on the line above.')
def test_step_request_stop_with_returning_a_type(self):
with ops.Graph().as_default():
def step_fn(step_context):
del step_context
return 'a type'
with monitored_session.MonitoredSession() as session:
self.assertEqual('a type', session.run_step_fn(step_fn))
def test_step_with_extra_arguments(self):
with ops.Graph().as_default():
def step_fn(step_context, extra_foo):
del step_context, extra_foo
with monitored_session.MonitoredSession() as session:
with self.assertRaisesRegex(
ValueError,
'`step_fn` may either have one `step_context` argument'):
self.assertEqual(None, session.run_step_fn(step_fn))
def test_step_fn_belongs_to_a_class(self):
with ops.Graph().as_default():
c = array_ops.placeholder(dtypes.float32)
v = array_ops.identity(c)
class Model(object):
def step_fn(self, step_context):
return step_context.run_with_hooks(fetches=v, feed_dict={c: 3.2})
with monitored_session.MonitoredSession() as session:
model = Model()
self.assertNear(3.2, session.run_step_fn(model.step_fn), 0.1)
def test_step_fn_belongs_to_a_class_and_has_extra_methods(self):
with ops.Graph().as_default():
class Model(object):
def step_fn(self, step_context, extra_foo):
del step_context, extra_foo
with monitored_session.MonitoredSession() as session:
with self.assertRaisesRegex(
ValueError,
'`step_fn` may either have one `step_context` argument'):
model = Model()
self.assertEqual(None, session.run_step_fn(model.step_fn))
def test_step_fn_with_hooks(self):
with ops.Graph().as_default():
var = resource_variable_ops.ResourceVariable(0.0)
# This test highlights the interaction of hooks with
# `Monitoredsession.run_step_fn`. The order of execution of operations
# below is:
# 0. stage_0
# 1. stage_1_0 or stage_1_1 in an undefined order
# 2. stage_2
stage_0 = state_ops.assign_add(var, 0.3)
stage_1_0 = state_ops.assign_add(var, 0.7)
# The order of `stage_1_0` and `stage_1_1` is undefined by
# `MonitoredSession`, but we should be able to assert when both of them
# are complete. To obtain a consistent result of adding two different
# constants to `var`, we rely on a control dependency and
# `ResourceVariable`. Otherwise, it is possible that one of the
# additions overwrites the result of the other addition.
with ops.control_dependencies([stage_1_0]):
stage_1_1 = state_ops.assign_add(var, 0.5)
stage_2 = state_ops.assign_add(var, 1.1)
class Hook(session_run_hook.SessionRunHook):
def __init__(self, testing):
self._testing = testing
def before_run(self, run_context):
return session_run_hook.SessionRunArgs(fetches=stage_1_0)
def after_run(self, run_context, run_values):
self._testing.assertNear(0.3 + 0.5 + 0.7,
run_context.session.run(var), 0.1)
self._testing.assertNear(0.3 + 0.5 + 0.7 + 1.1,
run_context.session.run(stage_2), 0.1)
def step_fn(step_context):
self.assertNear(0.3, step_context.session.run(stage_0), 0.1)
return step_context.run_with_hooks(fetches=stage_1_1)
with monitored_session.MonitoredSession(hooks=[Hook(self)]) as session:
self.assertEqual(0.3 + 0.5 + 0.7, session.run_step_fn(step_fn))
def test_step_fn_has_the_same_hooks_behavior_without_recovery(self):
with ops.Graph().as_default():
var = resource_variable_ops.ResourceVariable(0.0)
stage_0 = state_ops.assign_add(var, 0.3)
stage_1_0 = state_ops.assign_add(var, 0.7)
with ops.control_dependencies([stage_1_0]):
stage_1_1 = state_ops.assign_add(var, 0.5)
stage_2 = state_ops.assign_add(var, 1.1)
class Hook(session_run_hook.SessionRunHook):
def __init__(self, testing):
self._testing = testing
def before_run(self, run_context):
return session_run_hook.SessionRunArgs(fetches=stage_1_0)
def after_run(self, run_context, run_values):
self._testing.assertNear(0.3 + 0.5 + 0.7,
run_context.session.run(var), 0.1)
self._testing.assertNear(0.3 + 0.5 + 0.7 + 1.1,
run_context.session.run(stage_2), 0.1)
def step_fn(step_context):
self.assertNear(0.3, step_context.session.run(stage_0), 0.1)
return step_context.run_with_hooks(fetches=stage_1_1)
with monitored_session.SingularMonitoredSession(
hooks=[Hook(self)]) as session:
self.assertEqual(0.3 + 0.5 + 0.7, session.run_step_fn(step_fn))
def test_step_fn_with_hooks_and_request_stop(self):
with ops.Graph().as_default():
trace_the_hook = {'before_run': False, 'after_run': False}
class Hook(session_run_hook.SessionRunHook):
def before_run(self, run_context):
trace_the_hook['before_run'] = True
def after_run(self, run_context, run_values):
trace_the_hook['after_run'] = True
def step_fn(step_context):
step_context.request_stop()
with monitored_session.MonitoredSession(hooks=[Hook()]) as session:
self.assertEqual(None, session.run_step_fn(step_fn))
self.assertTrue(session.should_stop())
# `step_context.request_stop()` in a step_fn interrupts the flow of
# running the hooks.
self.assertFalse(trace_the_hook['before_run'])
self.assertFalse(trace_the_hook['after_run'])
def test_recovers_from_an_exception_in_step_fn(self):
trace_the_exception = {'run_already': False}
with ops.Graph().as_default():
c = array_ops.placeholder(dtypes.float32)
v = array_ops.identity(c)
def step_fn(step_context):
if not trace_the_exception['run_already']:
trace_the_exception['run_already'] = True
raise errors_impl.AbortedError(None, None, 'Abort')
return step_context.run_with_hooks(fetches=v, feed_dict={c: 3.2})
with monitored_session.MonitoredSession() as session:
self.assertNear(3.2, session.run_step_fn(step_fn), 0.1)
self.assertTrue(trace_the_exception['run_already'])
def test_recovers_from_an_exception_in_step_fn_after_hooks(self):
trace_the_exception = {'run_already': False, 'side_effect_counter': 0}
with ops.Graph().as_default():
c = array_ops.placeholder(dtypes.float32)
v = array_ops.identity(c)
graph_state = variables.VariableV1(0.0)
graph_side_effect = state_ops.assign_add(graph_state, 0.31)
def step_fn(step_context):
trace_the_exception['side_effect_counter'] += 1
step_context.session.run(graph_side_effect)
value = step_context.run_with_hooks(fetches=v, feed_dict={c: 3.2})
if not trace_the_exception['run_already']:
trace_the_exception['run_already'] = True
raise errors_impl.AbortedError(None, None, 'Abort')
return value
with self.cached_session() as test_session:
with monitored_session.MonitoredSession(
CountingSessionCreator(test_session)) as session:
session.run(variables.global_variables_initializer())
self.assertNear(3.2, session.run_step_fn(step_fn), 0.1)
self.assertTrue(trace_the_exception['run_already'])
# Make sure the rest of the body of the step_fn is re-executed upon
# AbortedError:
self.assertEqual(2, trace_the_exception['side_effect_counter'])
self.assertNear(0.62, session.run(graph_state), 0.1)
def test_step_fn_doesnt_recover_when_it_wasnt_asked_to(self):
trace_the_exception = {'run_already': False}
with ops.Graph().as_default():
c = array_ops.placeholder(dtypes.float32)
v = array_ops.identity(c)
def step_fn(step_context):
if not trace_the_exception['run_already']:
trace_the_exception['run_already'] = True
raise errors_impl.AbortedError(None, None, 'Abort')
value = step_context.run_with_hooks(fetches=v, feed_dict={c: 3.2})
return value
with monitored_session.SingularMonitoredSession() as session:
with self.assertRaisesRegex(errors_impl.AbortedError, 'Abort'):
self.assertNear(3.2, session.run_step_fn(step_fn), 0.1)
self.fail()
self.assertTrue(trace_the_exception['run_already'])
def test_step_fn_exception_from_before_run(self):
trace_the_exception = {'run_already': False, 'side_effect_counter': 0}
with ops.Graph().as_default():
c = array_ops.placeholder(dtypes.float32)
v = array_ops.identity(c)
vv = constant_op.constant(3.2)
graph_state = variables.VariableV1(0.0)
graph_side_effect = state_ops.assign_add(graph_state, 0.31)
class Hook(session_run_hook.SessionRunHook):
def __init__(self, testing):
self._testing = testing
def before_run(self, run_context):
if not trace_the_exception['run_already']:
trace_the_exception['run_already'] = True
raise errors_impl.AbortedError(None, None, 'Abort')
return session_run_hook.SessionRunArgs(fetches=vv)
def after_run(self, run_context, run_values):
self._testing.assertNear(3.2, run_values.results, 0.1)
def step_fn(step_context):
trace_the_exception['side_effect_counter'] += 1
step_context.session.run(graph_side_effect)
return step_context.run_with_hooks(fetches=v, feed_dict={c: 1.3})
with self.cached_session() as test_session:
with monitored_session.MonitoredSession(
CountingSessionCreator(test_session),
hooks=[Hook(self)]) as session:
test_session.run(variables.global_variables_initializer())
self.assertNear(1.3, session.run_step_fn(step_fn), 0.1)
self.assertEqual(2, trace_the_exception['side_effect_counter'])
self.assertNear(0.62, session.run(graph_state), 0.1)
class SingularMonitoredSessionTest(test.TestCase):
"""Tests SingularMonitoredSession."""
def test_handles_initialization(self):
with ops.Graph().as_default():
a_var = variables.VariableV1(0)
with monitored_session.SingularMonitoredSession() as session:
# If it's not initialized, following statement raises an error.
self.assertEqual(0, session.run(a_var))
def test_do_not_handle_aborted_error(self):
with ops.Graph().as_default():
gstep = training_util.get_or_create_global_step()
class _RaiseAbortedHook(session_run_hook.SessionRunHook):
def before_run(self, run_context):
raise errors_impl.AbortedError(None, None, 'Abort')
with monitored_session.SingularMonitoredSession(
hooks=[_RaiseAbortedHook()]) as session:
with self.assertRaises(errors_impl.AbortedError):
self.assertEqual(0, session.run(gstep))
with self.assertRaises(errors_impl.AbortedError):
with monitored_session.SingularMonitoredSession(
hooks=[_RaiseAbortedHook()]) as session:
self.assertEqual(0, session.run(gstep))
def test_exit_cleanly_on_out_of_range_exception(self):
# Tests that we stop cleanly when OutOfRange is raised.
with ops.Graph().as_default():
gstep = training_util.get_or_create_global_step()
do_step = state_ops.assign_add(gstep, 1)
hook = RaiseOnceAtCountN(2, errors_impl.OutOfRangeError(None, None,
'EOI'))
session = monitored_session.SingularMonitoredSession(hooks=[hook])
# session should cleanly exit from the context.
with session:
self.assertEqual(0, session.run(gstep))
self.assertFalse(session.should_stop())
# Here at step 1, the hook triggers and raises OutOfRange. The
# session should go into should_stop() mode. It should raise the
# exception. So next step should not be executed.
session.run(do_step)
self.assertTrue(False)
self.assertTrue(session.should_stop())
def test_regular_exception_reported_to_coord_pass_through_run(self):
# Tests that regular exceptions reported to the coordinator from a thread
# passes through a "run()" call within a "with MonitoredSession" block and
# set the session in stop mode.
with ops.Graph().as_default():
gstep = training_util.get_or_create_global_step()
session = monitored_session.SingularMonitoredSession()
run_performed_without_error = False
with self.assertRaisesRegex(RuntimeError, 'a thread wants to stop'):
with session:
self.assertEqual(0, session.run(gstep))
# Report an exception through the coordinator.
try:
raise RuntimeError('a thread wants to stop')
except RuntimeError as e:
session._coordinated_creator.coord.request_stop(e)
# Call run() which should perform normally.
self.assertEqual(0, session.run(gstep))
run_performed_without_error = True
self.assertTrue(run_performed_without_error)
def test_stop_cleanly_when_no_exception_in_with_body(self):
# Tests that regular exceptions pass through
with ops.Graph().as_default():
gstep = training_util.get_or_create_global_step()
do_step = state_ops.assign_add(gstep, 1)
session = monitored_session.SingularMonitoredSession()
with session:
self.assertEqual(1, session.run(do_step))
self.assertEqual(2, session.run(do_step))
self.assertFalse(session.should_stop())
# Should have closed.
self.assertTrue(session.should_stop())
self.assertEqual(None, session.raw_session())
def test_graph(self):
with ops.Graph().as_default() as g:
with monitored_session.SingularMonitoredSession() as session:
self.assertEqual(g, session.graph)
def test_raw_session(self):
with ops.Graph().as_default():
with monitored_session.SingularMonitoredSession() as session:
self.assertTrue(isinstance(session.raw_session(), session_lib.Session))
if __name__ == '__main__':
test.main()
| apache-2.0 |
SmithSamuelM/bottle | test/test_contextlocals.py | 11 | 1238 | # -*- coding: utf-8 -*-
'''
Some objects are context-local, meaning that they have different values depending on the context they are accessed from. A context is currently defined as a thread.
'''
import unittest
import bottle
import threading
def run_thread(func):
t = threading.Thread(target=func)
t.start()
t.join()
class TestThreadLocals(unittest.TestCase):
def test_request(self):
e1 = {'PATH_INFO': '/t1'}
e2 = {'PATH_INFO': '/t2'}
def run():
bottle.request.bind(e2)
self.assertEqual(bottle.request.path, '/t2')
bottle.request.bind(e1)
self.assertEqual(bottle.request.path, '/t1')
run_thread(run)
self.assertEqual(bottle.request.path, '/t1')
def test_response(self):
def run():
bottle.response.bind()
bottle.response.content_type='test/thread'
self.assertEqual(bottle.response.headers['Content-Type'], 'test/thread')
bottle.response.bind()
bottle.response.content_type='test/main'
self.assertEqual(bottle.response.headers['Content-Type'], 'test/main')
run_thread(run)
self.assertEqual(bottle.response.headers['Content-Type'], 'test/main')
| mit |
apur27/public | ASX-Python/LoadTrainPredict-LSTM-SLR.py | 1 | 3202 | # -*- coding: utf-8 -*-
"""
Created on Thu Nov 28 09:40:59 2019
@author: UPuroAb
"""
import glob
#import os
import pandas as pd
colnames=['Date', 'Open', 'High', 'Low', 'Close', 'Adj Close', 'Volume']
all_files = glob.glob('C:/QM/rnd/SLR/*.csv') # advisable to use os.path.join as this makes concatenation OS independent
df_from_each_file = (pd.read_csv(f, names=colnames, header=None, encoding='utf-8') for f in all_files)
data = pd.concat(df_from_each_file, ignore_index=True, sort=True)
import numpy as np
import matplotlib.pyplot as plt
#importing prophet
from sklearn.preprocessing import MinMaxScaler
from keras.models import Sequential
from keras.layers import Dense,Dropout, LSTM
asxTicker='Close'
ticker=data
ticker=ticker.reset_index()
new_data = pd.DataFrame(index=range(0,len(ticker)),columns=['Date', 'Close'])
for i in range(0,len(ticker)):
new_data['Date'][i] = ticker['Date'][i]
new_data['Close'][i] = ticker[asxTicker][i]
trainSize=1000
#new_data['Date'] = pd.to_datetime(new_data['Date'],format='%Y-%m-%d')
new_data.index = new_data.Date
new_data.drop('Date', axis=1, inplace=True)
#creating train and test sets
dataset = new_data.values
train = dataset[0:trainSize,:]
valid = dataset[trainSize:,:]
#converting dataset into x_train and y_train
scaler = MinMaxScaler(feature_range=(0, 1))
scaled_data = scaler.fit_transform(dataset)
x_train, y_train = [], []
for i in range(60,len(train)):
x_train.append(scaled_data[i-60:i,0])
y_train.append(scaled_data[i,0])
x_train, y_train = np.array(x_train), np.array(y_train)
x_train = np.reshape(x_train, (x_train.shape[0],x_train.shape[1],1))
# create and fit the LSTM network
model = Sequential()
model.add(LSTM(units=50, return_sequences=True, input_shape=(x_train.shape[1],1)))
model.add(Dropout(0.4))
#model.add(LSTM(units=50))
#
## added
#
#model.add(Dropout(0.3))
model.add(LSTM(units = 100, return_sequences = True))
model.add(Dropout(0.3))
model.add(LSTM(units = 100, return_sequences = True))
model.add(Dropout(0.2))
#
#model.add(LSTM(units = 50, return_sequences = True))
#model.add(Dropout(0.2))
#
#model.add(LSTM(units = 50, return_sequences = True))
#model.add(Dropout(0.2))
model.add(LSTM(units = 50))
model.add(Dropout(0.2))
# added
model.add(Dense(1))
model.compile(loss='mean_squared_error', optimizer='adam')
model.fit(x_train, y_train, epochs=30, batch_size=10, verbose=2)
#predicting 246 values, using past 60 from the train data
inputs = new_data[len(new_data) - len(valid) - 60:].values
inputs = inputs.reshape(-1,1)
inputs = scaler.transform(inputs)
X_test = []
for i in range(60,inputs.shape[0]):
X_test.append(inputs[i-60:i,0])
X_test = np.array(X_test)
X_test = np.reshape(X_test, (X_test.shape[0],X_test.shape[1],1))
closing_price = model.predict(X_test)
closing_price = scaler.inverse_transform(closing_price)
rmsL=np.sqrt(np.mean(np.power((valid-closing_price),2)))
#for plotting
train = new_data[:trainSize]
valid = new_data[trainSize:]
valid['Predictions'] = closing_price
plt.plot(train['Close'])
plt.plot(valid[['Close','Predictions']])
| artistic-2.0 |
bearstech/ansible | lib/ansible/modules/network/cloudengine/ce_ntp_auth.py | 12 | 18637 | #!/usr/bin/python
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'metadata_version': '1.0'}
DOCUMENTATION = '''
---
module: ce_ntp_auth
version_added: "2.4"
short_description: Manages NTP authentication configuration on HUAWEI CloudEngine switches.
description:
- Manages NTP authentication configuration on HUAWEI CloudEngine switches.
author:
- Zhijin Zhou (@CloudEngine-Ansible)
notes:
- If C(state=absent), the module will attempt to remove the given key configuration.
If a matching key configuration isn't found on the device, the module will fail.
- If C(state=absent) and C(authentication=on), authentication will be turned on.
- If C(state=absent) and C(authentication=off), authentication will be turned off.
options:
key_id:
description:
- Authentication key identifier (numeric).
required: true
auth_pwd:
description:
- Plain text with length of 1 to 255, encrypted text with length of 20 to 392.
required: false
default: null
auth_mode:
description:
- Specify authentication algorithm.
required: false
default: null
choices: ['hmac-sha256', 'md5']
auth_type:
description:
- Whether the given password is in cleartext or
has been encrypted. If in cleartext, the device
will encrypt it before storing it.
required: false
default: encrypt
choices: ['text', 'encrypt']
trusted_key:
description:
- Whether the given key is required to be supplied by a time source
for the device to synchronize to the time source.
required: false
default: 'disable'
choices: ['enable', 'disable']
authentication:
description:
- Configure ntp authentication enable or unconfigure ntp authentication enable.
required: false
default: null
choices: ['enable', 'disable']
state:
description:
- Manage the state of the resource.
required: false
default: present
choices: ['present','absent']
'''
EXAMPLES = '''
- name: NTP AUTH test
hosts: cloudengine
connection: local
gather_facts: no
vars:
cli:
host: "{{ inventory_hostname }}"
port: "{{ ansible_ssh_port }}"
username: "{{ username }}"
password: "{{ password }}"
transport: cli
tasks:
- name: "Configure ntp authentication key-id"
ce_ntp_auth:
key_id: 32
auth_mode: md5
auth_pwd: 11111111111111111111111
provider: "{{ cli }}"
- name: "Configure ntp authentication key-id and trusted authentication keyid"
ce_ntp_auth:
key_id: 32
auth_mode: md5
auth_pwd: 11111111111111111111111
trusted_key: enable
provider: "{{ cli }}"
- name: "Configure ntp authentication key-id and authentication enable"
ce_ntp_auth:
key_id: 32
auth_mode: md5
auth_pwd: 11111111111111111111111
authentication: enable
provider: "{{ cli }}"
- name: "Unconfigure ntp authentication key-id and trusted authentication keyid"
ce_ntp_auth:
key_id: 32
state: absent
provider: "{{ cli }}"
- name: "Unconfigure ntp authentication key-id and authentication enable"
ce_ntp_auth:
key_id: 32
authentication: enable
state: absent
provider: "{{ cli }}"
'''
RETURN = '''
proposed:
description: k/v pairs of parameters passed into module
returned: always
type: dict
sample: {
"auth_type": "text",
"authentication": "enable",
"key_id": "32",
"auth_pwd": "1111",
"auth_mode": "md5",
"trusted_key": "enable",
"state": "present"
}
existing:
description: k/v pairs of existing ntp authentication
returned: always
type: dict
sample: {
"authentication": "off",
"authentication-keyid": [
{
"auth_mode": "md5",
"key_id": "1",
"trusted_key": "disable"
}
]
}
end_state:
description: k/v pairs of ntp authentication after module execution
returned: always
type: dict
sample: {
"authentication": "off",
"authentication-keyid": [
{
"auth_mode": "md5",
"key_id": "1",
"trusted_key": "disable"
},
{
"auth_mode": "md5",
"key_id": "32",
"trusted_key": "enable"
}
]
}
state:
description: state as sent in from the playbook
returned: always
type: string
sample: "present"
updates:
description: command sent to the device
returned: always
type: list
sample: [
"ntp authentication-key 32 md5 1111",
"ntp trusted-key 32",
"ntp authentication enable"
]
changed:
description: check to see if a change was made on the device
returned: always
type: boolean
sample: true
'''
import copy
import re
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.ce import ce_argument_spec, load_config, get_nc_config, set_nc_config
CE_NC_GET_NTP_AUTH_CONFIG = """
<filter type="subtree">
<ntp xmlns="http://www.huawei.com/netconf/vrp" content-version="1.0" format-version="1.0">
<ntpAuthKeyCfgs>
<ntpAuthKeyCfg>
<keyId>%s</keyId>
<mode></mode>
<keyVal></keyVal>
<isReliable></isReliable>
</ntpAuthKeyCfg>
</ntpAuthKeyCfgs>
</ntp>
</filter>
"""
CE_NC_GET_ALL_NTP_AUTH_CONFIG = """
<filter type="subtree">
<ntp xmlns="http://www.huawei.com/netconf/vrp" content-version="1.0" format-version="1.0">
<ntpAuthKeyCfgs>
<ntpAuthKeyCfg>
<keyId></keyId>
<mode></mode>
<keyVal></keyVal>
<isReliable></isReliable>
</ntpAuthKeyCfg>
</ntpAuthKeyCfgs>
</ntp>
</filter>
"""
CE_NC_GET_NTP_AUTH_ENABLE = """
<filter type="subtree">
<ntp xmlns="http://www.huawei.com/netconf/vrp" content-version="1.0" format-version="1.0">
<ntpSystemCfg>
<isAuthEnable></isAuthEnable>
</ntpSystemCfg>
</ntp>
</filter>
"""
CE_NC_MERGE_NTP_AUTH_CONFIG = """
<config>
<ntp xmlns="http://www.huawei.com/netconf/vrp" content-version="1.0" format-version="1.0">
<ntpAuthKeyCfgs>
<ntpAuthKeyCfg operation="merge">
<keyId>%s</keyId>
<mode>%s</mode>
<keyVal>%s</keyVal>
<isReliable>%s</isReliable>
</ntpAuthKeyCfg>
</ntpAuthKeyCfgs>
</ntp>
</config>
"""
CE_NC_MERGE_NTP_AUTH_ENABLE = """
<config>
<ntp xmlns="http://www.huawei.com/netconf/vrp" content-version="1.0" format-version="1.0">
<ntpSystemCfg operation="merge">
<isAuthEnable>%s</isAuthEnable>
</ntpSystemCfg>
</ntp>
</config>
"""
CE_NC_DELETE_NTP_AUTH_CONFIG = """
<config>
<ntp xmlns="http://www.huawei.com/netconf/vrp" content-version="1.0" format-version="1.0">
<ntpAuthKeyCfgs>
<ntpAuthKeyCfg operation="delete">
<keyId>%s</keyId>
</ntpAuthKeyCfg>
</ntpAuthKeyCfgs>
</ntp>
</config>
"""
class NtpAuth(object):
"""Manage ntp authentication"""
def __init__(self, argument_spec):
self.spec = argument_spec
self.module = None
self.init_module()
# ntp_auth configration info
self.key_id = self.module.params['key_id']
self.password = self.module.params['auth_pwd'] or None
self.auth_mode = self.module.params['auth_mode'] or None
self.auth_type = self.module.params['auth_type']
self.trusted_key = self.module.params['trusted_key']
self.authentication = self.module.params['authentication'] or None
self.state = self.module.params['state']
self.check_params()
self.ntp_auth_conf = dict()
self.key_id_exist = False
self.cur_trusted_key = 'disable'
# state
self.changed = False
self.updates_cmd = list()
self.results = dict()
self.proposed = dict()
self.existing = list()
self.end_state = list()
self.get_ntp_auth_exist_config()
def check_params(self):
"""Check all input params"""
if not self.key_id.isdigit():
self.module.fail_json(
msg='Error: key_id is not digit.')
if (int(self.key_id) < 1) or (int(self.key_id) > 4294967295):
self.module.fail_json(
msg='Error: The length of key_id is between 1 and 4294967295.')
if self.state == "present":
if (self.auth_type == 'encrypt') and\
((len(self.password) < 20) or (len(self.password) > 392)):
self.module.fail_json(
msg='Error: The length of encrypted password is between 20 and 392.')
elif (self.auth_type == 'text') and\
((len(self.password) < 1) or (len(self.password) > 255)):
self.module.fail_json(
msg='Error: The length of text password is between 1 and 255.')
def init_module(self):
"""Init module object"""
required_if = [("state", "present", ("password", "auth_mode"))]
self.module = AnsibleModule(
argument_spec=self.spec,
required_if=required_if,
supports_check_mode=True
)
def check_response(self, xml_str, xml_name):
"""Check if response message is already succeed."""
if "<ok/>" not in xml_str:
self.module.fail_json(msg='Error: %s failed.' % xml_name)
def get_ntp_auth_enable(self):
"""Get ntp authentication enable state"""
xml_str = CE_NC_GET_NTP_AUTH_ENABLE
con_obj = get_nc_config(self.module, xml_str)
if "<data/>" in con_obj:
return
# get ntp authentication enable
auth_en = re.findall(
r'.*<isAuthEnable>(.*)</isAuthEnable>.*', con_obj)
if auth_en:
if auth_en[0] == 'true':
self.ntp_auth_conf['authentication'] = 'enable'
else:
self.ntp_auth_conf['authentication'] = 'disable'
def get_ntp_all_auth_keyid(self):
"""Get all authentication keyid info"""
ntp_auth_conf = list()
xml_str = CE_NC_GET_ALL_NTP_AUTH_CONFIG
con_obj = get_nc_config(self.module, xml_str)
if "<data/>" in con_obj:
self.ntp_auth_conf["authentication-keyid"] = "None"
return ntp_auth_conf
# get ntp authentication config
ntp_auth = re.findall(
r'.*<keyId>(.*)</keyId>.*\s*<mode>(.*)</mode>.*\s*'
r'<keyVal>(.*)</keyVal>.*\s*<isReliable>(.*)</isReliable>.*', con_obj)
for ntp_auth_num in ntp_auth:
if ntp_auth_num[0] == self.key_id:
self.key_id_exist = True
if ntp_auth_num[3] == 'true':
self.cur_trusted_key = 'enable'
else:
self.cur_trusted_key = 'disable'
if ntp_auth_num[3] == 'true':
trusted_key = 'enable'
else:
trusted_key = 'disable'
ntp_auth_conf.append(dict(key_id=ntp_auth_num[0],
auth_mode=ntp_auth_num[1].lower(),
trusted_key=trusted_key))
self.ntp_auth_conf["authentication-keyid"] = ntp_auth_conf
return ntp_auth_conf
def get_ntp_auth_exist_config(self):
"""Get ntp authentication existed configure"""
self.get_ntp_auth_enable()
self.get_ntp_all_auth_keyid()
def config_ntp_auth_keyid(self):
"""Config ntp authentication keyid"""
if self.trusted_key == 'enable':
trusted_key = 'true'
else:
trusted_key = 'false'
xml_str = CE_NC_MERGE_NTP_AUTH_CONFIG % (
self.key_id, self.auth_mode.upper(), self.password, trusted_key)
ret_xml = set_nc_config(self.module, xml_str)
self.check_response(ret_xml, "NTP_AUTH_KEYID_CONFIG")
def config_ntp_auth_enable(self):
"""Config ntp authentication enable"""
if self.ntp_auth_conf['authentication'] != self.authentication:
if self.authentication == 'enable':
state = 'true'
else:
state = 'false'
xml_str = CE_NC_MERGE_NTP_AUTH_ENABLE % state
ret_xml = set_nc_config(self.module, xml_str)
self.check_response(ret_xml, "NTP_AUTH_ENABLE")
def undo_config_ntp_auth_keyid(self):
"""Undo ntp authentication key-id"""
xml_str = CE_NC_DELETE_NTP_AUTH_CONFIG % self.key_id
ret_xml = set_nc_config(self.module, xml_str)
self.check_response(ret_xml, "UNDO_NTP_AUTH_KEYID_CONFIG")
def cli_load_config(self, commands):
"""Load config by cli"""
if not self.module.check_mode:
load_config(self.module, commands)
def config_ntp_auth_keyid_by_cli(self):
"""Config ntp authentication keyid bye the way of CLI"""
commands = list()
config_cli = "ntp authentication-keyid %s authentication-mode %s %s" % (
self.key_id, self.auth_mode, self.password)
commands.append(config_cli)
self.cli_load_config(commands)
def config_ntp_auth(self):
"""Config ntp authentication"""
if self.state == "present":
if self.auth_type == 'encrypt':
self.config_ntp_auth_keyid()
else:
self.config_ntp_auth_keyid_by_cli()
else:
if not self.key_id_exist:
self.module.fail_json(
msg='Error: The Authentication-keyid does not exist.')
self.undo_config_ntp_auth_keyid()
if self.authentication:
self.config_ntp_auth_enable()
self.changed = True
def get_existing(self):
"""Get existing info"""
self.existing = copy.deepcopy(self.ntp_auth_conf)
def get_proposed(self):
"""Get proposed result"""
auth_type = self.auth_type
trusted_key = self.trusted_key
if self.state == 'absent':
auth_type = None
trusted_key = None
self.proposed = dict(key_id=self.key_id, auth_pwd=self.password,
auth_mode=self.auth_mode, auth_type=auth_type,
trusted_key=trusted_key, authentication=self.authentication,
state=self.state)
def get_update_cmd(self):
"""Get updated commands"""
cli_str = ""
if self.state == "present":
cli_str = "ntp authentication-keyid %s authentication-mode %s " % (
self.key_id, self.auth_mode)
if self.auth_type == 'encrypt':
cli_str = "%s cipher %s" % (cli_str, self.password)
else:
cli_str = "%s %s" % (cli_str, self.password)
else:
cli_str = "undo ntp authentication-keyid %s" % self.key_id
self.updates_cmd.append(cli_str)
if self.authentication:
cli_str = ""
if self.ntp_auth_conf['authentication'] != self.authentication:
if self.authentication == 'enable':
cli_str = "ntp authentication enable"
else:
cli_str = "undo ntp authentication enable"
if cli_str != "":
self.updates_cmd.append(cli_str)
cli_str = ""
if self.state == "present":
if self.trusted_key != self.cur_trusted_key:
if self.trusted_key == 'enable':
cli_str = "ntp trusted authentication-keyid %s" % self.key_id
else:
cli_str = "undo ntp trusted authentication-keyid %s" % self.key_id
else:
cli_str = "undo ntp trusted authentication-keyid %s" % self.key_id
if cli_str != "":
self.updates_cmd.append(cli_str)
def get_end_state(self):
"""Get end state info"""
self.ntp_auth_conf = dict()
self.get_ntp_auth_exist_config()
self.end_state = copy.deepcopy(self.ntp_auth_conf)
def show_result(self):
"""Show result"""
self.results['changed'] = self.changed
self.results['proposed'] = self.proposed
self.results['existing'] = self.existing
self.results['end_state'] = self.end_state
if self.changed:
self.results['updates'] = self.updates_cmd
else:
self.results['updates'] = list()
self.module.exit_json(**self.results)
def work(self):
"""Excute task"""
self.get_existing()
self.get_proposed()
self.get_update_cmd()
self.config_ntp_auth()
self.get_end_state()
self.show_result()
def main():
"""Main function entry"""
argument_spec = dict(
key_id=dict(required=True, type='str'),
auth_pwd=dict(type='str', no_log=True),
auth_mode=dict(choices=['md5', 'hmac-sha256'], type='str'),
auth_type=dict(choices=['text', 'encrypt'], default='encrypt'),
trusted_key=dict(choices=['enable', 'disable'], default='disable'),
authentication=dict(choices=['enable', 'disable']),
state=dict(choices=['absent', 'present'], default='present'),
)
argument_spec.update(ce_argument_spec)
ntp_auth_obj = NtpAuth(argument_spec)
ntp_auth_obj.work()
if __name__ == '__main__':
main()
| gpl-3.0 |
Facetracker-project/facetracker-core | lib/youtube-dl/youtube_dl/extractor/nba.py | 31 | 1666 | from __future__ import unicode_literals
from .common import InfoExtractor
from ..utils import (
remove_end,
parse_duration,
)
class NBAIE(InfoExtractor):
_VALID_URL = r'https?://(?:watch\.|www\.)?nba\.com/(?:nba/)?video(?P<id>/[^?]*?)/?(?:/index\.html)?(?:\?.*)?$'
_TESTS = [{
'url': 'http://www.nba.com/video/games/nets/2012/12/04/0021200253-okc-bkn-recap.nba/index.html',
'md5': 'c0edcfc37607344e2ff8f13c378c88a4',
'info_dict': {
'id': '0021200253-okc-bkn-recap.nba',
'ext': 'mp4',
'title': 'Thunder vs. Nets',
'description': 'Kevin Durant scores 32 points and dishes out six assists as the Thunder beat the Nets in Brooklyn.',
'duration': 181,
},
}, {
'url': 'http://www.nba.com/video/games/hornets/2014/12/05/0021400276-nyk-cha-play5.nba/',
'only_matching': True,
}]
def _real_extract(self, url):
video_id = self._match_id(url)
webpage = self._download_webpage(url, video_id)
video_url = 'http://ht-mobile.cdn.turner.com/nba/big' + video_id + '_nba_1280x720.mp4'
shortened_video_id = video_id.rpartition('/')[2]
title = remove_end(
self._og_search_title(webpage, default=shortened_video_id), ' : NBA.com')
description = self._og_search_description(webpage)
duration = parse_duration(
self._html_search_meta('duration', webpage, 'duration'))
return {
'id': shortened_video_id,
'url': video_url,
'title': title,
'description': description,
'duration': duration,
}
| gpl-2.0 |
hlmnrmr/liveblog | server/app.py | 1 | 3362 | #!/usr/bin/env python
# -*- coding: utf-8; -*-
#
# This file is part of Superdesk.
#
# Copyright 2013, 2014, 2015 Sourcefabric z.u. and contributors.
#
# For the full copyright and license information, please see the
# AUTHORS and LICENSE files distributed with this source code, or
# at https://www.sourcefabric.org/superdesk/license
import os
import jinja2
import flask_s3
import settings
from flask_cache import Cache
from liveblog.blogs import bloglist_assets_blueprint, bloglist_blueprint
from liveblog.blogs.embeds import embed_blueprint
from liveblog.common import BlogCache
from liveblog.syndication.producer import producers_blueprint
from liveblog.syndication.consumer import consumers_blueprint
from liveblog.syndication.syndication import syndication_blueprint
from liveblog.syndication.blogs import blogs_blueprint as syndication_blogs_blueprint
from liveblog.marketplace.marketer import marketers_blueprint
from liveblog.analytics.analytics import analytics_blueprint
from liveblog.items.items import drag_and_drop_blueprint
from liveblog.client_modules.client_modules import blog_posts_blueprint
from liveblog.advertisements.advertisements import advertisements_blueprint
from superdesk.factory import get_app as superdesk_app
def get_app(config=None):
"""App factory.
:param config: configuration that can override config from `settings.py`
:return: a new SuperdeskEve app instance
"""
if config is None:
config = {}
config['APP_ABSPATH'] = os.path.abspath(os.path.dirname(__file__))
for key in dir(settings):
if key.isupper():
config.setdefault(key, getattr(settings, key))
media_storage = None
if config.get('AMAZON_CONTAINER_NAME'):
from superdesk.storage.amazon_media_storage import AmazonMediaStorage
media_storage = AmazonMediaStorage
config['DOMAIN'] = {}
# Create superdesk app instance.
app = superdesk_app(config, media_storage)
# Add custom jinja2 template loader.
custom_loader = jinja2.ChoiceLoader([
jinja2.FileSystemLoader('superdesk/templates'),
app.jinja_loader
])
app.jinja_loader = custom_loader
# Caching.
app.cache = Cache(app, config={'CACHE_TYPE': 'simple'})
app.blog_cache = BlogCache(cache=app.cache)
# Amazon S3 support.
s3 = flask_s3.FlaskS3()
s3.init_app(app)
# Embed feature.
app.register_blueprint(embed_blueprint)
# Embed bloglist.
app.register_blueprint(bloglist_assets_blueprint)
app.register_blueprint(bloglist_blueprint)
# Analytics.
app.register_blueprint(analytics_blueprint)
# Advertisements.
app.register_blueprint(advertisements_blueprint)
# Syndication feature.
app.register_blueprint(producers_blueprint)
app.register_blueprint(consumers_blueprint)
app.register_blueprint(syndication_blueprint)
app.register_blueprint(syndication_blogs_blueprint)
# Marketplace.
app.register_blueprint(marketers_blueprint)
# Drag and drop
app.register_blueprint(drag_and_drop_blueprint)
# New posts endpoint
app.register_blueprint(blog_posts_blueprint)
return app
if __name__ == '__main__':
debug = True
host = '0.0.0.0'
port = int(os.environ.get('PORT', '5000'))
app = get_app()
app.run(host=host, port=port, debug=debug, use_reloader=debug)
| agpl-3.0 |
Kelina/TensorEmbeddings | 6_seg/FINAL_MorphoDist_6Seg.py | 1 | 15605 | # PYTHON3
# This uses word embeddings made from the last word, current word and next word (each in 3 segments), concatenated.
# Therefore, each word vector has 1800 dimensions.
import os
import datetime
import sys
import general_global_variables as gl
import pickle as cPickle
import numpy
from scipy import linalg, mat, dot
from scipy.spatial.distance import cdist
import collections
import random
#savedir = "/mounts/data/proj/wordgraph/segem20151005/"
savedir = "/mounts/data/proj/kann/SegEm/results/test_20151005/" # for earlier results
savedir_now = "/mounts/data/proj/kann/SegEm/results/20151006_max_frequ_nn_10/"
def normalize(v):
norm=numpy.linalg.norm(v)
if norm==0:
return v
return v/norm
def loadEmbeddingFile(embeddingFile, useRandomWordEmbeddings):
#print('\nLOADEMBEDDINGFILE ', datetime.datetime.now().time())
embeddingDict={}
file = open(embeddingFile)
counter = 0
for line in file:
counter += 1
if counter == 1001 and fast_version == True:
break
tokens=line[:len(line)-2].split(' ') # consider lowercases
#if tokens[0] == 'rolf-peter':
# print('not exisiting')
# exit(0)
# continue
values=[]
for i in range(3, len(tokens)):
if useRandomWordEmbeddings:
values.append(float(random.uniform(-1.0, 1.0))) # produce random values for the segment embeddings
else:
values.append(float(tokens[i]))
#print(len(values))
#print(tokens[0] + ' ' + tokens[1] + ' ' + tokens[2])
#sys.exit(0)
embeddingDict[tokens[0] + ' ' + tokens[1] + ' ' + tokens[2]]=normalize(values)
#print ('Embedding loading finished.')
return embeddingDict
def HammingDist(v1, v2):
counterErrors = 0
for i in range(len(v1)):
if v1[i] != v2[i]:
counterErrors += 1
return counterErrors
def getMorphoDist(emb_dict, morph_dict, voc_list, word_frequ_dict, seg_dict):
detailed_log = open(savedir_now + 'FINAL_' + log_name + '_detailed', 'a')
morphoDist = 0 # average tags per word that are different
wrongAnswers = 0
successWords = 0
k = 5
numberWords = 1000
wordsForEvaluation = []
# Get $numberWords random words:
pred_matrix=[] # The Matrix for the embeddings we want to find neighbours for.
gold_word_list=[]
#print(len(voc_list))
random.shuffle(voc_list)
if not store_experiment_words:
#print(gl.experimentWords)
voc_list = []
for transfer_word in gl.experimentWords:
voc_list.append(transfer_word)
gl.experimentWords = []
#print(voc_list)
for word in voc_list:
if not store_experiment_words:
break
if word_frequ_dict[word] > 1:
continue
gl.experimentWords.append(word)
#print('word: ' +word)
# Find a set of 3 words where the current word is the 2nd one. For this, suffle first.
emb_dict_keys = list(emb_dict.keys()) # Python 3; use keys = d.keys() in Python 2
random.shuffle(emb_dict_keys)
for key in emb_dict_keys:
key_value = emb_dict[key]
key_tokens = key.split(' ')
if word == key_tokens[1]:
if not containsPunctuation(key):
#print('Used trigramm: ' + key)
value = key_value
successWords += 1
wordsForEvaluation.append(key)
pred_matrix.append(key_value)
gold_word_list.append(key)
break
if successWords >= numberWords:
break
#continue
print('experiment words found')
# Load the words from before, because we said we want to repeat with the same words.
if not store_experiment_words:
counter = 0
pred_matrix = []
gold_word_list = []
wordsForEvaluation = []
for word in allWords:
counter += 1
if counter == 1001:
break
wordsForEvaluation.append(word)
pred_matrix.append(emb_dict[word])
gold_word_list.append(word)
# Obtain now the closest neighbors.
word_list = []
emb_matrix = []
for key, value in emb_dict.items():
# Don't consider trigrams with punctuation.
if containsPunctuation(key + " "):
continue
all_keys = key.split(' ')
if word_frequ_dict[all_keys[1]] > max_frequency_nn or max_frequency_nn == -1:
continue
word_list.append(key)
emb_matrix.append(value)
#print(len(emb_matrix))
pred_matrix=mat(pred_matrix)
emb_matrix = mat(emb_matrix)
rows=len(wordsForEvaluation)
overallDistance = 0
print('Calculating simi_matrix...')
print(pred_matrix.shape)
print(emb_matrix.shape)
#sys.exit(0)
simi_matrix=1-cdist(pred_matrix, emb_matrix, 'cosine')
#print('...simi_matrix done!')
max_index_matrix=simi_matrix.argsort()[:,-k-1:]
pred_word_matrix=[]
for row in range(max_index_matrix.shape[0]):
pred_list=[word_list[i] for i in max_index_matrix[row]]
pred_word_matrix.append(pred_list)
# Get the tags and calculate the number of wrong ones.
for i in range(numberWords):
segmentRep = getSegmentsofTrigram(gold_word_list[i], seg_dict)
secondWord = gold_word_list[i].split(' ')[1]
detailed_log.write('The original word: ' + gold_word_list[i] + ' (' + segmentRep + ') (number of occurrences: ' + str(word_frequ_dict[secondWord]) + ')\n')
detailed_log.write('The possible morphological tags: ')
if gold_word_list[i] in morph_dict:
for tags in morph_dict[gold_word_list[i]]:
detailed_log.write(tags)
else:
print('There is some problem with ' + gold_word_list[i])
exit(0)
continue
detailed_log.write('\n')
for j in [5,4,3,2,1,0]:
if pred_word_matrix[i][j] == gold_word_list[i]:
continue
segmentRepPred = getSegmentsofTrigram(pred_word_matrix[i][j], seg_dict)
the_words = pred_word_matrix[i][j].split(' ')
detailed_log.write('\npredicted word: ' + pred_word_matrix[i][j] + ' (' + segmentRepPred + ') (number of occurrences: ' + str(word_frequ_dict[the_words[1]]) + ')\n')
differentTags, final1, final2 = minimumMorphDist(morph_dict[gold_word_list[i]], morph_dict[pred_word_matrix[i][j]])
# log the tags for the closest neighbors
detailed_log.write(final2)
if differentTags > 0:
wrongAnswers += 1
morphoDist += differentTags
detailed_log.write('\nNumber of different tags: ' + str(differentTags))
detailed_log.write('\n\n\n')
detailed_log.write('Wrong answers: ' + str(wrongAnswers / (numberWords * k)) + "%")
detailed_log.write('\nAv. number of wrong tags per word: ' + str(morphoDist / (numberWords * k)))
return wrongAnswers, morphoDist / (numberWords * k), k*numberWords
def getSegmentsofTrigram(trigram, seg_dict):
result = ""
words = trigram.split(' ')
for i in range(len(words)):
segments = seg_dict[words[i]]
result = result + segments[0] + "-"
result = result + segments[1]
if i != len(words)-1:
result = result + " "
#print(result)
return result
# Returns if or not certain punctiation is contained in a text.
def containsPunctuation(text):
punctuation = '.,;:()?!\"{}[]-_/`\''
for aChar in punctuation:
if " " + aChar in text or aChar + " " in text:
return True
return False
# This method deletes capitalization and converts every number to 9.
def convertMorphDict(morphDict):
morph_dict = {}
for key, value in morphDict.items():
newKey = key.lower()
newKey = takeOutNumbers(newKey)
morph_dict[newKey] = value
return morph_dict
# To be used in getMorphoDistFromTagsInWords.
# Getting the smallest number of different tags for two words. The input are the set of possible representations for word 1 and word 2 (format: set of Strings like "case=acc|number=sg|gender=masc").
def minimumMorphDist(tagset1, tagset2):
distance = 1000
entered = False
for rep1 in tagset1:
for rep2 in tagset2:
entered = True
newDistance = getMorphoDistFromTagsInWords(rep1, rep2)
if newDistance < distance:
final1 = rep1
final2 = rep2
distance = newDistance
'''
print(tagset1)
print(tagset2)
print('Final result:')
print(final1)
print(final2)
print(distance)
sys.exit(0)
'''
if entered == False:
print('NOT ENTERED')
print(tagset1)
print(tagset2)
# sys.exit(0)
return distance, final1, final2
# TODO: make * ok answer
def getMorphoDistFromTagsInWords(tag1, tag2):
same = 0
different = 0
dict1 = {}
dict2 = {}
tokens1 = tag1.split('|')
tokens2 = tag2.split('|')
for token in tokens1:
if token == "_":
continue
parts = token.split('=')
if len(parts) == 1:
dict1[parts[0]] = "none"
else:
dict1[parts[0]] = parts[1]
for token in tokens2:
if token == "_":
continue
parts = token.split('=')
if len(parts) == 1:
dict2[parts[0]] = "none"
else:
dict2[parts[0]] = parts[1]
for key, value in dict1.items():
if key not in dict2:
different += 1
else:
if value != dict2[key]:
different += 1
else:
same += 1
for key, value in dict2.items():
if key not in dict1:
different += 1
# Count 1 error less if both are plural and their gender is not the same.
if ("number" in dict1) and ("number" in dict2) and ("gender" in dict1) and ("gender" in dict2):
if dict1["number"] == "pl" and dict2["number"] == "pl" and dict1["gender"] != dict2["gender"]:
different -= 1
return different
def takeOutNumbers(word):
newWord = ''
numbers = '0123456789'
for i in range(len(word)):
if word[i] in numbers:
newWord = newWord + '9'
else:
newWord = newWord + word[i]
return newWord
def readTagMeanings(tagFile):
tagMeanings_dict = collections.defaultdict(int)
for line in tagFile:
tokens = tokens=line.strip().split('\t')
tagMeanings_dict[tokens[0]] = tokens[1]
return tagMeanings_dict
def getSameWords(aFile):
words_from_before = []
a_file = open(aFile)
for line in a_file:
#print(line)
if "The original word" in line:
tokens_1 = line.split(": ")
tokens_2 = tokens_1[1].split(" (")
tokens = tokens_2[0].split(" ")
words_from_before.append(tokens_2[0])
return words_from_before
if __name__ == '__main__':
if not os.path.exists(savedir_now):
os.makedirs(savedir_now)
else:
print('Directory exists, so existing files would be substituted by new ones.\nStop execution.')
sys.exit(1)
# Defines if all words of the embedding set should be loaded:
if sys.argv[1] == "True":
fast_version = True
else:
fast_version = False
# Defines if we want to use an earlier set of words to repeat an experiment.
# If False, the set for this number of executions will be overwritten.
if sys.argv[2] == "True":
store_experiment_words = True
else:
store_experiment_words = False
fast_version = False
store_experiment_words = True
max_frequency_nn = -1
log_name = 'log_file'
name_compare_file = savedir + 'FINAL_log_file_detailed' # use the words used for this experiment
logFile = open(log_name, 'a')
useRandomSegVectors = False
numberOfExecutions = 1
if not store_experiment_words:
numberOfExecutions = 1 # only one exectution if we repeat experiment... because of how we load the words
# Load the trigrams for the experiment.
# If you want to do this, check first that the file for the right number of executions exists.
if not store_experiment_words:
allWords = getSameWords(name_compare_file)
#print(allWords)
#sys.exit(0)
# Load the segmentation dictionary in order to show the segments for each word.
segDictFile = open(savedir + 'seg_dictionary_3_012_new', 'rb')
seg_dict = cPickle.load(segDictFile)
segDictFile.close()
# Load the dictionary with the word frequency.
wfFile = open(savedir + 'wordFrequency', 'rb')
word_frequ_dict = cPickle.load(wfFile)
wfFile.close()
# Load the morphDict. This is a mapping from words to a (binary) vector with their morphological tags.
wordDataFile = open(savedir + 'dict3WordsToTags', 'rb')
morphDict = cPickle.load(wordDataFile)
wordDataFile.close()
#print('MorphDict has been reloaded. ', datetime.datetime.now().time())
morph_dict = convertMorphDict(morphDict)
#print('converted\n')
# Load the vocabulary list (all words of the voabulary stored in a list).
vocListFile = open(savedir + 'vocabularyList_Tiger', 'rb')
vocabularyList = cPickle.load(vocListFile)
vocListFile.close()
newVocList = []
for word in vocabularyList:
word = word.lower()
word = takeOutNumbers(word)
if not (word in newVocList):
newVocList.append(word)
# Load the word embeddings.
if useRandomSegVectors:
embDict = loadEmbeddingFile('/mounts/data/proj/kann/FINAL_RESCAL_RESULTS/3_segEmb_randomSegEmb.txt', False) # TODO: substitute with sth useful
logMessage = 'Using random segment vectors.'
else:
embDict = loadEmbeddingFile(savedir + '6_seg_emb.txt', False)
logMessage = 'Using segment vectors from corpus.'
print(logMessage)
logFile.write(logMessage + '\n')
logFile.close
avResult = 0
for i in range(numberOfExecutions):
logFile = open('log_file', 'a')
logFile.write(str(i) + '\n')
logFile.close
#print(embDict)
finalNumberWrongAnswers, avDifferentTagsPerWordPair, totalNumberWords = getMorphoDist(embDict, morph_dict, newVocList, word_frequ_dict, seg_dict)
#print('\nTotal distance: ' + str(overallDistance))
print('Wrong answers: ' + str(finalNumberWrongAnswers) + ' out of ' + str(totalNumberWords))
print('The average number of different tags per word are: ' + str(avDifferentTagsPerWordPair))
avResult += avDifferentTagsPerWordPair
logFile.close
print("The average of different tags per word pair: " + str(avResult / numberOfExecutions))
if (store_experiment_words):
print('Store the words used for the experiment to binary file...')
save_file = open(savedir_now + 'experimentWords_' + str(numberOfExecutions) + 'runs', 'wb')
cPickle.dump(gl.vocabularyList, save_file, -1)
save_file.close()
print('Done.\n')
| apache-2.0 |
mozilla/zamboni | mkt/comm/migrations/0002_auto_20150727_1017.py | 13 | 1816 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
('comm', '0001_initial'),
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('versions', '0001_initial'),
('webapps', '0001_initial'),
]
operations = [
migrations.AddField(
model_name='communicationthread',
name='_addon',
field=models.ForeignKey(related_name='threads', db_column=b'addon_id', to='webapps.Webapp'),
preserve_default=True,
),
migrations.AddField(
model_name='communicationthread',
name='_version',
field=models.ForeignKey(related_name='threads', db_column=b'version_id', to='versions.Version', null=True),
preserve_default=True,
),
migrations.AlterUniqueTogether(
name='communicationthread',
unique_together=set([('_addon', '_version')]),
),
migrations.AddField(
model_name='communicationnote',
name='author',
field=models.ForeignKey(related_name='comm_notes', blank=True, to=settings.AUTH_USER_MODEL, null=True),
preserve_default=True,
),
migrations.AddField(
model_name='communicationnote',
name='thread',
field=models.ForeignKey(related_name='notes', to='comm.CommunicationThread'),
preserve_default=True,
),
migrations.AddField(
model_name='commattachment',
name='note',
field=models.ForeignKey(related_name='attachments', to='comm.CommunicationNote'),
preserve_default=True,
),
]
| bsd-3-clause |
rds0751/colinkers | env/Lib/site-packages/django/template/loader.py | 126 | 2401 | from django.utils import six
from django.utils.deprecation import (
DeprecationInstanceCheck, RemovedInDjango20Warning,
)
from . import engines
from .base import Origin
from .exceptions import TemplateDoesNotExist
def get_template(template_name, using=None):
"""
Loads and returns a template for the given name.
Raises TemplateDoesNotExist if no such template exists.
"""
chain = []
engines = _engine_list(using)
for engine in engines:
try:
return engine.get_template(template_name)
except TemplateDoesNotExist as e:
chain.append(e)
raise TemplateDoesNotExist(template_name, chain=chain)
def select_template(template_name_list, using=None):
"""
Loads and returns a template for one of the given names.
Tries names in order and returns the first template found.
Raises TemplateDoesNotExist if no such template exists.
"""
if isinstance(template_name_list, six.string_types):
raise TypeError(
'select_template() takes an iterable of template names but got a '
'string: %r. Use get_template() if you want to load a single '
'template by name.' % template_name_list
)
chain = []
engines = _engine_list(using)
for template_name in template_name_list:
for engine in engines:
try:
return engine.get_template(template_name)
except TemplateDoesNotExist as e:
chain.append(e)
if template_name_list:
raise TemplateDoesNotExist(', '.join(template_name_list), chain=chain)
else:
raise TemplateDoesNotExist("No template names provided")
def render_to_string(template_name, context=None, request=None, using=None):
"""
Loads a template and renders it with a context. Returns a string.
template_name may be a string or a list of strings.
"""
if isinstance(template_name, (list, tuple)):
template = select_template(template_name, using=using)
else:
template = get_template(template_name, using=using)
return template.render(context, request)
def _engine_list(using=None):
return engines.all() if using is None else [engines[using]]
class LoaderOrigin(six.with_metaclass(DeprecationInstanceCheck, Origin)):
alternative = 'django.template.Origin'
deprecation_warning = RemovedInDjango20Warning
| agpl-3.0 |
ArionMiles/MIS-Bot | mis_bot/misbot/until_func.py | 1 | 2364 | import textwrap
from misbot.decorators import signed_up, premium
from misbot.mis_utils import until_x
@signed_up
@premium(tier=1)
def until_eighty(bot, update):
"""Calculate number of lectures you must consecutively attend before you attendance is 80%
If :py:func:`misbot.mis_utils.until_x` returns a negative number, attendance is already over 80%
:param bot: Telegram Bot object
:type bot: telegram.bot.Bot
:param update: Telegram Update object
:type update: telegram.update.Update
"""
bot.send_chat_action(chat_id=update.message.chat_id, action='typing')
no_of_lectures = int(until_x(update.message.chat_id, 80))
if no_of_lectures < 0:
bot.sendMessage(chat_id=update.message.chat_id, text="Your attendance is already over 80%. Relax.")
else:
messageContent = "No. of lectures to attend: {}".format(no_of_lectures)
bot.sendMessage(chat_id=update.message.chat_id, text=messageContent)
@signed_up
@premium(tier=1)
def until(bot, update, args):
"""Like :py:func:`until_eighty` but user supplies the number.
:param bot: Telegram Bot object
:type bot: telegram.bot.Bot
:param update: Telegram Update object
:type update: telegram.update.Update
:param args: User supplied arguments
:type args: tuple
:return: None
:rtype: None
"""
if len(args) == 0:
messageContent = textwrap.dedent("""
You must specify a number after the command to use this feature.
E.g: `/until 75`
""")
bot.sendMessage(chat_id=update.message.chat_id, text=messageContent, parse_mode='markdown')
return
try:
figure = float(args[0])
except (ValueError, IndexError):
bot.sendMessage(chat_id=update.message.chat_id, text="You must send a number between 1-99.")
return
if figure > 99:
bot.sendMessage(chat_id=update.message.chat_id, text="You must send a number between 1-99.")
return
no_of_lectures = int(until_x(update.message.chat_id, figure))
if no_of_lectures < 0:
bot.sendMessage(chat_id=update.message.chat_id, text="Your attendance is already over {}%. Relax.".format(figure))
else:
messageContent = "No. of lectures to attend: {}".format(no_of_lectures)
bot.sendMessage(chat_id=update.message.chat_id, text=messageContent)
| mit |
Dhivyap/ansible | lib/ansible/modules/network/fortios/fortios_router_bfd.py | 14 | 8927 | #!/usr/bin/python
from __future__ import (absolute_import, division, print_function)
# Copyright 2019 Fortinet, Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
__metaclass__ = type
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'metadata_version': '1.1'}
DOCUMENTATION = '''
---
module: fortios_router_bfd
short_description: Configure BFD in Fortinet's FortiOS and FortiGate.
description:
- This module is able to configure a FortiGate or FortiOS (FOS) device by allowing the
user to set and modify router feature and bfd category.
Examples include all parameters and values need to be adjusted to datasources before usage.
Tested with FOS v6.0.5
version_added: "2.8"
author:
- Miguel Angel Munoz (@mamunozgonzalez)
- Nicolas Thomas (@thomnico)
notes:
- Requires fortiosapi library developed by Fortinet
- Run as a local_action in your playbook
requirements:
- fortiosapi>=0.9.8
options:
host:
description:
- FortiOS or FortiGate IP address.
type: str
required: false
username:
description:
- FortiOS or FortiGate username.
type: str
required: false
password:
description:
- FortiOS or FortiGate password.
type: str
default: ""
vdom:
description:
- Virtual domain, among those defined previously. A vdom is a
virtual instance of the FortiGate that can be configured and
used as a different unit.
type: str
default: root
https:
description:
- Indicates if the requests towards FortiGate must use HTTPS protocol.
type: bool
default: true
ssl_verify:
description:
- Ensures FortiGate certificate must be verified by a proper CA.
type: bool
default: true
version_added: 2.9
router_bfd:
description:
- Configure BFD.
default: null
type: dict
suboptions:
neighbor:
description:
- neighbor
type: list
suboptions:
interface:
description:
- Interface name. Source system.interface.name.
type: str
ip:
description:
- IPv4 address of the BFD neighbor.
required: true
type: str
'''
EXAMPLES = '''
- hosts: localhost
vars:
host: "192.168.122.40"
username: "admin"
password: ""
vdom: "root"
ssl_verify: "False"
tasks:
- name: Configure BFD.
fortios_router_bfd:
host: "{{ host }}"
username: "{{ username }}"
password: "{{ password }}"
vdom: "{{ vdom }}"
https: "False"
router_bfd:
neighbor:
-
interface: "<your_own_value> (source system.interface.name)"
ip: "<your_own_value>"
'''
RETURN = '''
build:
description: Build number of the fortigate image
returned: always
type: str
sample: '1547'
http_method:
description: Last method used to provision the content into FortiGate
returned: always
type: str
sample: 'PUT'
http_status:
description: Last result given by FortiGate on last operation applied
returned: always
type: str
sample: "200"
mkey:
description: Master key (id) used in the last call to FortiGate
returned: success
type: str
sample: "id"
name:
description: Name of the table used to fulfill the request
returned: always
type: str
sample: "urlfilter"
path:
description: Path of the table used to fulfill the request
returned: always
type: str
sample: "webfilter"
revision:
description: Internal revision number
returned: always
type: str
sample: "17.0.2.10658"
serial:
description: Serial number of the unit
returned: always
type: str
sample: "FGVMEVYYQT3AB5352"
status:
description: Indication of the operation's result
returned: always
type: str
sample: "success"
vdom:
description: Virtual domain used
returned: always
type: str
sample: "root"
version:
description: Version of the FortiGate
returned: always
type: str
sample: "v5.6.3"
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.connection import Connection
from ansible.module_utils.network.fortios.fortios import FortiOSHandler
from ansible.module_utils.network.fortimanager.common import FAIL_SOCKET_MSG
def login(data, fos):
host = data['host']
username = data['username']
password = data['password']
ssl_verify = data['ssl_verify']
fos.debug('on')
if 'https' in data and not data['https']:
fos.https('off')
else:
fos.https('on')
fos.login(host, username, password, verify=ssl_verify)
def filter_router_bfd_data(json):
option_list = ['neighbor']
dictionary = {}
for attribute in option_list:
if attribute in json and json[attribute] is not None:
dictionary[attribute] = json[attribute]
return dictionary
def underscore_to_hyphen(data):
if isinstance(data, list):
for elem in data:
elem = underscore_to_hyphen(elem)
elif isinstance(data, dict):
new_data = {}
for k, v in data.items():
new_data[k.replace('_', '-')] = underscore_to_hyphen(v)
data = new_data
return data
def router_bfd(data, fos):
vdom = data['vdom']
router_bfd_data = data['router_bfd']
filtered_data = underscore_to_hyphen(filter_router_bfd_data(router_bfd_data))
return fos.set('router',
'bfd',
data=filtered_data,
vdom=vdom)
def is_successful_status(status):
return status['status'] == "success" or \
status['http_method'] == "DELETE" and status['http_status'] == 404
def fortios_router(data, fos):
if data['router_bfd']:
resp = router_bfd(data, fos)
return not is_successful_status(resp), \
resp['status'] == "success", \
resp
def main():
fields = {
"host": {"required": False, "type": "str"},
"username": {"required": False, "type": "str"},
"password": {"required": False, "type": "str", "default": "", "no_log": True},
"vdom": {"required": False, "type": "str", "default": "root"},
"https": {"required": False, "type": "bool", "default": True},
"ssl_verify": {"required": False, "type": "bool", "default": True},
"router_bfd": {
"required": False, "type": "dict", "default": None,
"options": {
"neighbor": {"required": False, "type": "list",
"options": {
"interface": {"required": False, "type": "str"},
"ip": {"required": True, "type": "str"}
}}
}
}
}
module = AnsibleModule(argument_spec=fields,
supports_check_mode=False)
# legacy_mode refers to using fortiosapi instead of HTTPAPI
legacy_mode = 'host' in module.params and module.params['host'] is not None and \
'username' in module.params and module.params['username'] is not None and \
'password' in module.params and module.params['password'] is not None
if not legacy_mode:
if module._socket_path:
connection = Connection(module._socket_path)
fos = FortiOSHandler(connection)
is_error, has_changed, result = fortios_router(module.params, fos)
else:
module.fail_json(**FAIL_SOCKET_MSG)
else:
try:
from fortiosapi import FortiOSAPI
except ImportError:
module.fail_json(msg="fortiosapi module is required")
fos = FortiOSAPI()
login(module.params, fos)
is_error, has_changed, result = fortios_router(module.params, fos)
fos.logout()
if not is_error:
module.exit_json(changed=has_changed, meta=result)
else:
module.fail_json(msg="Error in repo", meta=result)
if __name__ == '__main__':
main()
| gpl-3.0 |
Chiroptera/QCThesis | MyML/utils/sorting.py | 3 | 1629 | import numpy as np
from numba import jit
#
# jitted version was 110 times faster than unjitted for 1e6 array
# ported and adapted to arg-k-select from:
# http://blog.teamleadnet.com/2012/07/quick-select-algorithm-find-kth-element.html
@jit(nopython=True)
def arg_k_select(ary, k, out):
# def arg_k_select(ary, k):
args = np.empty(ary.size, dtype=np.int32)
for i in range(args.size):
args[i] = i
fro = 0
to = ary.size - 1
while fro < to:
r = fro
w = to
mid_arg = args[(r+w) / 2]
mid = ary[mid_arg]
while r < w:
r_arg = args[r]
w_arg = args[w]
if ary[r_arg] >= mid:
tmp = args[w]
args[w] = args[r]
args[r] = tmp
w -= 1
else:
r += 1
r_arg = args[r]
if ary[r_arg] > mid:
r -= 1
if k <= r:
to = r
else:
fro = r + 1
for i in range(k):
out[i] = args[i]
# return args[:k]
def quicksort(array):
_quicksort(array, 0, len(array) - 1)
def _quicksort(array, start, stop):
if stop - start > 0:
pivot, left, right = array[start], start, stop
while left <= right:
while array[left] < pivot:
left += 1
while array[right] > pivot:
right -= 1
if left <= right:
array[left], array[right] = array[right], array[left]
left += 1
right -= 1
_quicksort(array, start, right)
_quicksort(array, left, stop)
| mit |
reubano/changanya | changanya/nilsimsa.py | 1 | 5127 | # -*- coding: utf-8 -*-
# vim: sw=4:ts=4:expandtab
"""
Implementation of Nilsimsa hashes (signatures) in Python.
Most useful for filtering spam by creating signatures of documents to
find near-duplicates. Charikar similarity hashes can be used on any
datastream, whereas Nilsimsa is a digest ideal for documents (written
in any language) because it uses histograms of [rolling] trigraphs
instead of the usual bag-of-words model where order doesn't matter.
Related paper: http://spdp.dti.unimi.it/papers/pdcs04.pdf
Part of changanya by reubano. See README and LICENSE.
"""
from changanya.hashtype import Hashtype
_TRAN = (
"\x02\xD6\x9E\x6F\xF9\x1D\x04\xAB\xD0\x22\x16\x1F\xD8\x73\xA1\xAC"
"\x3B\x70\x62\x96\x1E\x6E\x8F\x39\x9D\x05\x14\x4A\xA6\xBE\xAE\x0E"
"\xCF\xB9\x9C\x9A\xC7\x68\x13\xE1\x2D\xA4\xEB\x51\x8D\x64\x6B\x50"
"\x23\x80\x03\x41\xEC\xBB\x71\xCC\x7A\x86\x7F\x98\xF2\x36\x5E\xEE"
"\x8E\xCE\x4F\xB8\x32\xB6\x5F\x59\xDC\x1B\x31\x4C\x7B\xF0\x63\x01"
"\x6C\xBA\x07\xE8\x12\x77\x49\x3C\xDA\x46\xFE\x2F\x79\x1C\x9B\x30"
"\xE3\x00\x06\x7E\x2E\x0F\x38\x33\x21\xAD\xA5\x54\xCA\xA7\x29\xFC"
"\x5A\x47\x69\x7D\xC5\x95\xB5\xF4\x0B\x90\xA3\x81\x6D\x25\x55\x35"
"\xF5\x75\x74\x0A\x26\xBF\x19\x5C\x1A\xC6\xFF\x99\x5D\x84\xAA\x66"
"\x3E\xAF\x78\xB3\x20\x43\xC1\xED\x24\xEA\xE6\x3F\x18\xF3\xA0\x42"
"\x57\x08\x53\x60\xC3\xC0\x83\x40\x82\xD7\x09\xBD\x44\x2A\x67\xA8"
"\x93\xE0\xC2\x56\x9F\xD9\xDD\x85\x15\xB4\x8A\x27\x28\x92\x76\xDE"
"\xEF\xF8\xB2\xB7\xC9\x3D\x45\x94\x4B\x11\x0D\x65\xD5\x34\x8B\x91"
"\x0C\xFA\x87\xE9\x7C\x5B\xB1\x4D\xE5\xD4\xCB\x10\xA2\x17\x89\xBC"
"\xDB\xB0\xE2\x97\x88\x52\xF7\x48\xD3\x61\x2C\x3A\x2B\xD1\x8C\xFB"
"\xF1\xCD\xE4\x6A\xE7\xA9\xFD\xC4\x37\xC8\xD2\xF6\xDF\x58\x72\x4E")
TRAN = [ord(x) for x in _TRAN]
DEF_HASHBITS = 256
class Nilsimsa(Hashtype):
def __init__(self, data='', hashbits=DEF_HASHBITS):
self.hashtype = Nilsimsa
super(Nilsimsa, self).__init__(hashbits)
self.count = 0 # num characters seen
self.acc = [0] * self.hashbits # accumulators for computing digest
self.last = [-1] * 4 # last four seen chars (-1 until set)
self.hash = self.create_hash(data)
def _tran3(self, a, b, c, n):
"""Get accumulator for a transition n between chars a, b, c."""
multiple = (n + n + 1)
index = (a + n) & (self.hashbits - 1)
acc = (TRAN[index] ^ TRAN[b] * multiple) + TRAN[(c) ^ TRAN[n]]
return acc & (self.hashbits - 1)
def _digest(self):
"""Get digest of data seen thus far as a list of bytes."""
total = 0 # number of triplets seen
if self.count == 3: # 3 chars = 1 triplet
total = 1
elif self.count == 4: # 4 chars = 4 triplets
total = 4
elif self.count > 4: # otherwise 8 triplets/char less
total = 8 * self.count - 28 # 28 'missed' during 'ramp-up'
threshold = total / self.hashbits # threshold for accumulators
code = [0] * self.hashbits # start with all zero bits
for i in range(self.hashbits): # for all accumulators
if self.acc[i] > threshold: # if it meets the threshold
code[i >> 3] += 1 << (i & 7) # set corresponding digest bit
code = code[::-1] # reverse the byte order
out = 0
for i in range(self.hashbits): # turn bit list into real bits
if code[i]:
out += 1 << i
return out
def create_hash(self, data):
"""Calculates a Nilsimsa signature with appropriate bitlength.
Input must be a string. Returns nothing.
Reference: http://ixazon.dynip.com/~cmeclax/nilsimsa.html
"""
if type(data) != str:
raise Exception('Nilsimsa hashes can only be created on strings')
"""Add data to running digest, increasing the accumulators for 0-8
triplets formed by this char and the previous 0-3 chars."""
for character in data:
ch = ord(character)
self.count += 1
# incr accumulators for triplets
if self.last[1] > -1:
self.acc[self._tran3(ch, self.last[0], self.last[1], 0)] += 1
if self.last[2] > -1:
self.acc[self._tran3(ch, self.last[0], self.last[2], 1)] += 1
self.acc[self._tran3(ch, self.last[1], self.last[2], 2)] += 1
if self.last[3] > -1:
self.acc[self._tran3(ch, self.last[0], self.last[3], 3)] += 1
self.acc[self._tran3(ch, self.last[1], self.last[3], 4)] += 1
self.acc[self._tran3(ch, self.last[2], self.last[3], 5)] += 1
self.acc[self._tran3(self.last[3], self.last[0], ch, 6)] += 1
self.acc[self._tran3(self.last[3], self.last[2], ch, 7)] += 1
# adjust last seen chars
self.last = [ch] + self.last[:3]
return self._digest()
| mit |
mchristopher/PokemonGo-DesktopMap | app/pywin/Lib/base64.py | 23 | 11807 | #! /usr/bin/env python
"""RFC 3548: Base16, Base32, Base64 Data Encodings"""
# Modified 04-Oct-1995 by Jack Jansen to use binascii module
# Modified 30-Dec-2003 by Barry Warsaw to add full RFC 3548 support
import re
import struct
import string
import binascii
__all__ = [
# Legacy interface exports traditional RFC 1521 Base64 encodings
'encode', 'decode', 'encodestring', 'decodestring',
# Generalized interface for other encodings
'b64encode', 'b64decode', 'b32encode', 'b32decode',
'b16encode', 'b16decode',
# Standard Base64 encoding
'standard_b64encode', 'standard_b64decode',
# Some common Base64 alternatives. As referenced by RFC 3458, see thread
# starting at:
#
# http://zgp.org/pipermail/p2p-hackers/2001-September/000316.html
'urlsafe_b64encode', 'urlsafe_b64decode',
]
_translation = [chr(_x) for _x in range(256)]
EMPTYSTRING = ''
def _translate(s, altchars):
translation = _translation[:]
for k, v in altchars.items():
translation[ord(k)] = v
return s.translate(''.join(translation))
# Base64 encoding/decoding uses binascii
def b64encode(s, altchars=None):
"""Encode a string using Base64.
s is the string to encode. Optional altchars must be a string of at least
length 2 (additional characters are ignored) which specifies an
alternative alphabet for the '+' and '/' characters. This allows an
application to e.g. generate url or filesystem safe Base64 strings.
The encoded string is returned.
"""
# Strip off the trailing newline
encoded = binascii.b2a_base64(s)[:-1]
if altchars is not None:
return encoded.translate(string.maketrans(b'+/', altchars[:2]))
return encoded
def b64decode(s, altchars=None):
"""Decode a Base64 encoded string.
s is the string to decode. Optional altchars must be a string of at least
length 2 (additional characters are ignored) which specifies the
alternative alphabet used instead of the '+' and '/' characters.
The decoded string is returned. A TypeError is raised if s is
incorrectly padded. Characters that are neither in the normal base-64
alphabet nor the alternative alphabet are discarded prior to the padding
check.
"""
if altchars is not None:
s = s.translate(string.maketrans(altchars[:2], '+/'))
try:
return binascii.a2b_base64(s)
except binascii.Error, msg:
# Transform this exception for consistency
raise TypeError(msg)
def standard_b64encode(s):
"""Encode a string using the standard Base64 alphabet.
s is the string to encode. The encoded string is returned.
"""
return b64encode(s)
def standard_b64decode(s):
"""Decode a string encoded with the standard Base64 alphabet.
Argument s is the string to decode. The decoded string is returned. A
TypeError is raised if the string is incorrectly padded. Characters that
are not in the standard alphabet are discarded prior to the padding
check.
"""
return b64decode(s)
_urlsafe_encode_translation = string.maketrans(b'+/', b'-_')
_urlsafe_decode_translation = string.maketrans(b'-_', b'+/')
def urlsafe_b64encode(s):
"""Encode a string using the URL- and filesystem-safe Base64 alphabet.
Argument s is the string to encode. The encoded string is returned. The
alphabet uses '-' instead of '+' and '_' instead of '/'.
"""
return b64encode(s).translate(_urlsafe_encode_translation)
def urlsafe_b64decode(s):
"""Decode a string using the URL- and filesystem-safe Base64 alphabet.
Argument s is the string to decode. The decoded string is returned. A
TypeError is raised if the string is incorrectly padded. Characters that
are not in the URL-safe base-64 alphabet, and are not a plus '+' or slash
'/', are discarded prior to the padding check.
The alphabet uses '-' instead of '+' and '_' instead of '/'.
"""
return b64decode(s.translate(_urlsafe_decode_translation))
# Base32 encoding/decoding must be done in Python
_b32alphabet = {
0: 'A', 9: 'J', 18: 'S', 27: '3',
1: 'B', 10: 'K', 19: 'T', 28: '4',
2: 'C', 11: 'L', 20: 'U', 29: '5',
3: 'D', 12: 'M', 21: 'V', 30: '6',
4: 'E', 13: 'N', 22: 'W', 31: '7',
5: 'F', 14: 'O', 23: 'X',
6: 'G', 15: 'P', 24: 'Y',
7: 'H', 16: 'Q', 25: 'Z',
8: 'I', 17: 'R', 26: '2',
}
_b32tab = _b32alphabet.items()
_b32tab.sort()
_b32tab = [v for k, v in _b32tab]
_b32rev = dict([(v, long(k)) for k, v in _b32alphabet.items()])
def b32encode(s):
"""Encode a string using Base32.
s is the string to encode. The encoded string is returned.
"""
parts = []
quanta, leftover = divmod(len(s), 5)
# Pad the last quantum with zero bits if necessary
if leftover:
s += ('\0' * (5 - leftover))
quanta += 1
for i in range(quanta):
# c1 and c2 are 16 bits wide, c3 is 8 bits wide. The intent of this
# code is to process the 40 bits in units of 5 bits. So we take the 1
# leftover bit of c1 and tack it onto c2. Then we take the 2 leftover
# bits of c2 and tack them onto c3. The shifts and masks are intended
# to give us values of exactly 5 bits in width.
c1, c2, c3 = struct.unpack('!HHB', s[i*5:(i+1)*5])
c2 += (c1 & 1) << 16 # 17 bits wide
c3 += (c2 & 3) << 8 # 10 bits wide
parts.extend([_b32tab[c1 >> 11], # bits 1 - 5
_b32tab[(c1 >> 6) & 0x1f], # bits 6 - 10
_b32tab[(c1 >> 1) & 0x1f], # bits 11 - 15
_b32tab[c2 >> 12], # bits 16 - 20 (1 - 5)
_b32tab[(c2 >> 7) & 0x1f], # bits 21 - 25 (6 - 10)
_b32tab[(c2 >> 2) & 0x1f], # bits 26 - 30 (11 - 15)
_b32tab[c3 >> 5], # bits 31 - 35 (1 - 5)
_b32tab[c3 & 0x1f], # bits 36 - 40 (1 - 5)
])
encoded = EMPTYSTRING.join(parts)
# Adjust for any leftover partial quanta
if leftover == 1:
return encoded[:-6] + '======'
elif leftover == 2:
return encoded[:-4] + '===='
elif leftover == 3:
return encoded[:-3] + '==='
elif leftover == 4:
return encoded[:-1] + '='
return encoded
def b32decode(s, casefold=False, map01=None):
"""Decode a Base32 encoded string.
s is the string to decode. Optional casefold is a flag specifying whether
a lowercase alphabet is acceptable as input. For security purposes, the
default is False.
RFC 3548 allows for optional mapping of the digit 0 (zero) to the letter O
(oh), and for optional mapping of the digit 1 (one) to either the letter I
(eye) or letter L (el). The optional argument map01 when not None,
specifies which letter the digit 1 should be mapped to (when map01 is not
None, the digit 0 is always mapped to the letter O). For security
purposes the default is None, so that 0 and 1 are not allowed in the
input.
The decoded string is returned. A TypeError is raised if s were
incorrectly padded or if there are non-alphabet characters present in the
string.
"""
quanta, leftover = divmod(len(s), 8)
if leftover:
raise TypeError('Incorrect padding')
# Handle section 2.4 zero and one mapping. The flag map01 will be either
# False, or the character to map the digit 1 (one) to. It should be
# either L (el) or I (eye).
if map01:
s = s.translate(string.maketrans(b'01', b'O' + map01))
if casefold:
s = s.upper()
# Strip off pad characters from the right. We need to count the pad
# characters because this will tell us how many null bytes to remove from
# the end of the decoded string.
padchars = 0
mo = re.search('(?P<pad>[=]*)$', s)
if mo:
padchars = len(mo.group('pad'))
if padchars > 0:
s = s[:-padchars]
# Now decode the full quanta
parts = []
acc = 0
shift = 35
for c in s:
val = _b32rev.get(c)
if val is None:
raise TypeError('Non-base32 digit found')
acc += _b32rev[c] << shift
shift -= 5
if shift < 0:
parts.append(binascii.unhexlify('%010x' % acc))
acc = 0
shift = 35
# Process the last, partial quanta
last = binascii.unhexlify('%010x' % acc)
if padchars == 0:
last = '' # No characters
elif padchars == 1:
last = last[:-1]
elif padchars == 3:
last = last[:-2]
elif padchars == 4:
last = last[:-3]
elif padchars == 6:
last = last[:-4]
else:
raise TypeError('Incorrect padding')
parts.append(last)
return EMPTYSTRING.join(parts)
# RFC 3548, Base 16 Alphabet specifies uppercase, but hexlify() returns
# lowercase. The RFC also recommends against accepting input case
# insensitively.
def b16encode(s):
"""Encode a string using Base16.
s is the string to encode. The encoded string is returned.
"""
return binascii.hexlify(s).upper()
def b16decode(s, casefold=False):
"""Decode a Base16 encoded string.
s is the string to decode. Optional casefold is a flag specifying whether
a lowercase alphabet is acceptable as input. For security purposes, the
default is False.
The decoded string is returned. A TypeError is raised if s is
incorrectly padded or if there are non-alphabet characters present in the
string.
"""
if casefold:
s = s.upper()
if re.search('[^0-9A-F]', s):
raise TypeError('Non-base16 digit found')
return binascii.unhexlify(s)
# Legacy interface. This code could be cleaned up since I don't believe
# binascii has any line length limitations. It just doesn't seem worth it
# though.
MAXLINESIZE = 76 # Excluding the CRLF
MAXBINSIZE = (MAXLINESIZE//4)*3
def encode(input, output):
"""Encode a file."""
while True:
s = input.read(MAXBINSIZE)
if not s:
break
while len(s) < MAXBINSIZE:
ns = input.read(MAXBINSIZE-len(s))
if not ns:
break
s += ns
line = binascii.b2a_base64(s)
output.write(line)
def decode(input, output):
"""Decode a file."""
while True:
line = input.readline()
if not line:
break
s = binascii.a2b_base64(line)
output.write(s)
def encodestring(s):
"""Encode a string into multiple lines of base-64 data."""
pieces = []
for i in range(0, len(s), MAXBINSIZE):
chunk = s[i : i + MAXBINSIZE]
pieces.append(binascii.b2a_base64(chunk))
return "".join(pieces)
def decodestring(s):
"""Decode a string."""
return binascii.a2b_base64(s)
# Useable as a script...
def test():
"""Small test program"""
import sys, getopt
try:
opts, args = getopt.getopt(sys.argv[1:], 'deut')
except getopt.error, msg:
sys.stdout = sys.stderr
print msg
print """usage: %s [-d|-e|-u|-t] [file|-]
-d, -u: decode
-e: encode (default)
-t: encode and decode string 'Aladdin:open sesame'"""%sys.argv[0]
sys.exit(2)
func = encode
for o, a in opts:
if o == '-e': func = encode
if o == '-d': func = decode
if o == '-u': func = decode
if o == '-t': test1(); return
if args and args[0] != '-':
with open(args[0], 'rb') as f:
func(f, sys.stdout)
else:
func(sys.stdin, sys.stdout)
def test1():
s0 = "Aladdin:open sesame"
s1 = encodestring(s0)
s2 = decodestring(s1)
print s0, repr(s1), s2
if __name__ == '__main__':
test()
| mit |
Morisset/PyNeb_devel | pyneb/sample_scripts/Choroni_School/ex7_3.py | 1 | 2278 | # Analysis of a simple two-component model, meant to illustrate the bias arising from assuming
# that the region is homogeneous in density
# First, an emission region made up of two different subregions is modelled,
# each with a different mass and density. The resulting overall emissivity is computed
# Second, the region is analyzed as if it were a homogeneous region
import pyneb as pn
import matplotlib.pyplot as plt
from pyneb.utils.misc import parseAtom
def plot_2comp(tem1=1e4, tem2=1e4, dens1=3e2, dens2=5e5, mass1=1, mass2=5e-4):
# List of diagnostics used to analyze the region
diags = pn.Diagnostics()
for diag in pn.diags_dict:
if diag[0:7] != '[FeIII]':
diags.addDiag(diag)
diags.addClabel('[SIII] 6312/9069', '[SIII]A')
diags.addClabel('[OIII] 4363/5007', '[OIII]A')
# Define all the ions that are involved in the diagnostics
all_atoms = diags.atomDict
pn.log_.message('Atoms built')
obs = pn.Observation(corrected = True)
for atom in all_atoms:
# Computes all the intensities of all the lines of all the ions considered
for wavelength in all_atoms[atom].lineList:
elem, spec = parseAtom(atom)
intens1 = all_atoms[atom].getEmissivity(tem1, dens1, wave = wavelength) * dens1 * mass1
intens2 = all_atoms[atom].getEmissivity(tem2, dens2, wave = wavelength) * dens2 * mass2
obs.addLine(pn.EmissionLine(elem, spec, wavelength,
obsIntens=[intens1, intens2, intens1+intens2],
obsError=[0.0, 0.0, 0.0]))
pn.log_.message('Virtual observations computed')
emisgrids = pn.getEmisGridDict(atomDict = all_atoms)
pn.log_.message('EmisGrids available')
# Produce a diagnostic plot for each of the two regions and another one for the (misanalyzed) overall region
plt.subplot(2,2,1)
diags.plot(emisgrids, obs, i_obs=0)
plt.subplot(2,2,2)
diags.plot(emisgrids, obs, i_obs=1)
plt.subplot(2,1,2)
pn.log_.level=3
diags.plot(emisgrids, obs, i_obs=2)
if __name__ == '__main__':
plot_2comp(tem1=1e4, tem2=1e4, dens1=3e2, dens2=5e5, mass1=1, mass2=5e-4)
plt.show()
| gpl-3.0 |
drawks/ansible | hacking/build_library/build_ansible/command_plugins/porting_guide.py | 9 | 3245 | # coding: utf-8
# Copyright: (c) 2019, Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import argparse
import os.path
import sys
from jinja2 import Environment, DictLoader
# Pylint doesn't understand Python3 namespace modules.
from ..commands import Command # pylint: disable=relative-beyond-top-level
PORTING_GUIDE_TEMPLATE = """
.. _porting_{{ ver }}_guide:
*************************
Ansible {{ ver }} Porting Guide
*************************
This section discusses the behavioral changes between Ansible {{ prev_ver }} and Ansible {{ ver }}.
It is intended to assist in updating your playbooks, plugins and other parts of your Ansible infrastructure so they will work with this version of Ansible.
We suggest you read this page along with `Ansible Changelog for {{ ver }} <https://github.com/ansible/ansible/blob/devel/changelogs/CHANGELOG-v{{ ver }}.rst>`_ to understand what updates you may need to make.
This document is part of a collection on porting. The complete list of porting guides can be found at :ref:`porting guides <porting_guides>`.
.. contents:: Topics
Playbook
========
No notable changes
Command Line
============
No notable changes
Deprecated
==========
No notable changes
Modules
=======
No notable changes
Modules removed
---------------
The following modules no longer exist:
* No notable changes
Deprecation notices
-------------------
No notable changes
Noteworthy module changes
-------------------------
No notable changes
Plugins
=======
No notable changes
Porting custom scripts
======================
No notable changes
Networking
==========
No notable changes
""" # noqa for E501 (line length).
# jinja2 is horrid about getting rid of extra newlines so we have to have a single line per
# paragraph for proper wrapping to occur
JINJA_ENV = Environment(
loader=DictLoader({'porting_guide': PORTING_GUIDE_TEMPLATE,
}),
extensions=['jinja2.ext.i18n'],
trim_blocks=True,
lstrip_blocks=True,
)
def generate_porting_guide(version):
template = JINJA_ENV.get_template('porting_guide')
version_list = version.split('.')
version_list[-1] = str(int(version_list[-1]) - 1)
previous_version = '.'.join(version_list)
content = template.render(ver=version, prev_ver=previous_version)
return content
def write_guide(version, guide_content):
filename = f'porting_guide_{version}.rst'
with open(filename, 'w') as out_file:
out_file.write(guide_content)
class PortingGuideCommand(Command):
name = 'porting-guide'
@classmethod
def init_parser(cls, add_parser):
parser = add_parser(cls.name, description="Generate a fresh porting guide template")
parser.add_argument("--version", dest="version", type=str, required=True, action='store',
help="Version of Ansible to write the porting guide for")
@staticmethod
def main(args):
guide_content = generate_porting_guide(args.version)
write_guide(args.version, guide_content)
return 0
| gpl-3.0 |
skycucumber/xuemc | python/venv/lib/python2.7/site-packages/whoosh/analysis/__init__.py | 96 | 3288 | # Copyright 2007 Matt Chaput. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY MATT CHAPUT ``AS IS'' AND ANY EXPRESS OR
# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO
# EVENT SHALL MATT CHAPUT OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA,
# OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
# EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# The views and conclusions contained in the software and documentation are
# those of the authors and should not be interpreted as representing official
# policies, either expressed or implied, of Matt Chaput.
"""Classes and functions for turning a piece of text into an indexable stream
of "tokens" (usually equivalent to words). There are three general classes
involved in analysis:
* Tokenizers are always at the start of the text processing pipeline. They take
a string and yield Token objects (actually, the same token object over and
over, for performance reasons) corresponding to the tokens (words) in the
text.
Every tokenizer is a callable that takes a string and returns an iterator of
tokens.
* Filters take the tokens from the tokenizer and perform various
transformations on them. For example, the LowercaseFilter converts all tokens
to lowercase, which is usually necessary when indexing regular English text.
Every filter is a callable that takes a token generator and returns a token
generator.
* Analyzers are convenience functions/classes that "package up" a tokenizer and
zero or more filters into a single unit. For example, the StandardAnalyzer
combines a RegexTokenizer, LowercaseFilter, and StopFilter.
Every analyzer is a callable that takes a string and returns a token
iterator. (So Tokenizers can be used as Analyzers if you don't need any
filtering).
You can compose tokenizers and filters together using the ``|`` character::
my_analyzer = RegexTokenizer() | LowercaseFilter() | StopFilter()
The first item must be a tokenizer and the rest must be filters (you can't put
a filter first or a tokenizer after the first item).
"""
from whoosh.analysis.acore import *
from whoosh.analysis.tokenizers import *
from whoosh.analysis.filters import *
from whoosh.analysis.morph import *
from whoosh.analysis.intraword import *
from whoosh.analysis.ngrams import *
from whoosh.analysis.analyzers import *
| gpl-2.0 |
wanglongqi/sympy | sympy/physics/mechanics/tests/test_lagrange2.py | 51 | 1411 | from sympy import symbols
from sympy.physics.mechanics import dynamicsymbols
from sympy.physics.mechanics import ReferenceFrame, Point, Particle
from sympy.physics.mechanics import LagrangesMethod, Lagrangian
### This test asserts that a system with more than one external forces
### is acurately formed with Lagrange method (see issue #8626)
def test_lagrange_2forces():
### Equations for two damped springs in serie with two forces
### generalized coordinates
qs = q1, q2 = dynamicsymbols('q1, q2')
### generalized speeds
qds = q1d, q2d = dynamicsymbols('q1, q2', 1)
### Mass, spring strength, friction coefficient
m, k, nu = symbols('m, k, nu')
N = ReferenceFrame('N')
O = Point('O')
### Two points
P1 = O.locatenew('P1', q1 * N.x)
P1.set_vel(N, q1d * N.x)
P2 = O.locatenew('P1', q2 * N.x)
P2.set_vel(N, q2d * N.x)
pP1 = Particle('pP1', P1, m)
pP1.potential_energy = k * q1**2 / 2
pP2 = Particle('pP2', P2, m)
pP2.potential_energy = k * (q1 - q2)**2 / 2
#### Friction forces
forcelist = [(P1, - nu * q1d * N.x),
(P2, - nu * q2d * N.x)]
lag = Lagrangian(N, pP1, pP2)
l_method = LagrangesMethod(lag, (q1, q2), forcelist=forcelist, frame=N)
l_method.form_lagranges_equations()
eq1 = l_method.eom[0]
assert eq1.diff(q1d) == nu
eq2 = l_method.eom[1]
assert eq2.diff(q2d) == nu
| bsd-3-clause |
twiest/openshift-tools | openshift/installer/vendored/openshift-ansible-3.5.127/roles/lib_openshift/src/test/unit/test_oc_process.py | 82 | 21084 | '''
Unit tests for oc process
'''
import os
import six
import sys
import unittest
import mock
# Removing invalid variable names for tests so that I can
# keep them brief
# pylint: disable=invalid-name,no-name-in-module
# Disable import-error b/c our libraries aren't loaded in jenkins
# pylint: disable=import-error
# place class in our python path
module_path = os.path.join('/'.join(os.path.realpath(__file__).split('/')[:-4]), 'library') # noqa: E501
sys.path.insert(0, module_path)
from oc_process import OCProcess, locate_oc_binary # noqa: E402
# pylint: disable=too-many-public-methods
class OCProcessTest(unittest.TestCase):
'''
Test class for OCProcess
'''
mysql = '''{
"kind": "Template",
"apiVersion": "v1",
"metadata": {
"name": "mysql-ephemeral",
"namespace": "openshift",
"selfLink": "/oapi/v1/namespaces/openshift/templates/mysql-ephemeral",
"uid": "fb8b5f04-e3d3-11e6-a982-0e84250fc302",
"resourceVersion": "480",
"creationTimestamp": "2017-01-26T14:30:27Z",
"annotations": {
"iconClass": "icon-mysql-database",
"openshift.io/display-name": "MySQL (Ephemeral)",
"tags": "database,mysql"
}
},
"objects": [
{
"apiVersion": "v1",
"kind": "Service",
"metadata": {
"creationTimestamp": null,
"name": "${DATABASE_SERVICE_NAME}"
},
"spec": {
"ports": [
{
"name": "mysql",
"nodePort": 0,
"port": 3306,
"protocol": "TCP",
"targetPort": 3306
}
],
"selector": {
"name": "${DATABASE_SERVICE_NAME}"
},
"sessionAffinity": "None",
"type": "ClusterIP"
},
"status": {
"loadBalancer": {}
}
},
{
"apiVersion": "v1",
"kind": "DeploymentConfig",
"metadata": {
"creationTimestamp": null,
"name": "${DATABASE_SERVICE_NAME}"
},
"spec": {
"replicas": 1,
"selector": {
"name": "${DATABASE_SERVICE_NAME}"
},
"strategy": {
"type": "Recreate"
},
"template": {
"metadata": {
"creationTimestamp": null,
"labels": {
"name": "${DATABASE_SERVICE_NAME}"
}
},
"spec": {
"containers": [
{
"capabilities": {},
"env": [
{
"name": "MYSQL_USER",
"value": "${MYSQL_USER}"
},
{
"name": "MYSQL_PASSWORD",
"value": "${MYSQL_PASSWORD}"
},
{
"name": "MYSQL_DATABASE",
"value": "${MYSQL_DATABASE}"
}
],
"image": " ",
"imagePullPolicy": "IfNotPresent",
"livenessProbe": {
"initialDelaySeconds": 30,
"tcpSocket": {
"port": 3306
},
"timeoutSeconds": 1
},
"name": "mysql",
"ports": [
{
"containerPort": 3306,
"protocol": "TCP"
}
],
"readinessProbe": {
"exec": {
"command": [
"/bin/sh",
"-i",
"-c",
"MYSQL_PWD=$MYSQL_PASSWORD mysql -h 127.0.0.1 -u $MYSQL_USER -D $MYSQL_DATABASE -e 'SELECT 1'"
]
},
"initialDelaySeconds": 5,
"timeoutSeconds": 1
},
"resources": {
"limits": {
"memory": "${MEMORY_LIMIT}"
}
},
"securityContext": {
"capabilities": {},
"privileged": false
},
"terminationMessagePath": "/dev/termination-log",
"volumeMounts": [
{
"mountPath": "/var/lib/mysql/data",
"name": "${DATABASE_SERVICE_NAME}-data"
}
]
}
],
"dnsPolicy": "ClusterFirst",
"restartPolicy": "Always",
"volumes": [
{
"emptyDir": {
"medium": ""
},
"name": "${DATABASE_SERVICE_NAME}-data"
}
]
}
},
"triggers": [
{
"imageChangeParams": {
"automatic": true,
"containerNames": [
"mysql"
],
"from": {
"kind": "ImageStreamTag",
"name": "mysql:${MYSQL_VERSION}",
"namespace": "${NAMESPACE}"
},
"lastTriggeredImage": ""
},
"type": "ImageChange"
},
{
"type": "ConfigChange"
}
]
},
"status": {}
}
],
"parameters": [
{
"name": "MEMORY_LIMIT",
"displayName": "Memory Limit",
"description": "Maximum amount of memory the container can use.",
"value": "512Mi"
},
{
"name": "NAMESPACE",
"displayName": "Namespace",
"description": "The OpenShift Namespace where the ImageStream resides.",
"value": "openshift"
},
{
"name": "DATABASE_SERVICE_NAME",
"displayName": "Database Service Name",
"description": "The name of the OpenShift Service exposed for the database.",
"value": "mysql",
"required": true
},
{
"name": "MYSQL_USER",
"displayName": "MySQL Connection Username",
"description": "Username for MySQL user that will be used for accessing the database.",
"generate": "expression",
"from": "user[A-Z0-9]{3}",
"required": true
},
{
"name": "MYSQL_PASSWORD",
"displayName": "MySQL Connection Password",
"description": "Password for the MySQL connection user.",
"generate": "expression",
"from": "[a-zA-Z0-9]{16}",
"required": true
},
{
"name": "MYSQL_DATABASE",
"displayName": "MySQL Database Name",
"description": "Name of the MySQL database accessed.",
"value": "sampledb",
"required": true
},
{
"name": "MYSQL_VERSION",
"displayName": "Version of MySQL Image",
"description": "Version of MySQL image to be used (5.5, 5.6 or latest).",
"value": "5.6",
"required": true
}
],
"labels": {
"template": "mysql-ephemeral-template"
}
}'''
@mock.patch('oc_process.Utils.create_tmpfile_copy')
@mock.patch('oc_process.OCProcess._run')
def test_state_list(self, mock_cmd, mock_tmpfile_copy):
''' Testing a get '''
params = {'template_name': 'mysql-ephermeral',
'namespace': 'test',
'content': None,
'state': 'list',
'reconcile': False,
'create': False,
'params': {'NAMESPACE': 'test', 'DATABASE_SERVICE_NAME': 'testdb'},
'kubeconfig': '/etc/origin/master/admin.kubeconfig',
'debug': False}
mock_cmd.side_effect = [
(0, OCProcessTest.mysql, '')
]
mock_tmpfile_copy.side_effect = [
'/tmp/mock_kubeconfig',
]
results = OCProcess.run_ansible(params, False)
self.assertFalse(results['changed'])
self.assertEqual(results['results']['results'][0]['metadata']['name'], 'mysql-ephemeral')
@mock.patch('oc_process.Utils.create_tmpfile_copy')
@mock.patch('oc_process.OCProcess._run')
def test_process_no_create(self, mock_cmd, mock_tmpfile_copy):
''' Testing a process with no create '''
params = {'template_name': 'mysql-ephermeral',
'namespace': 'test',
'content': None,
'state': 'present',
'reconcile': False,
'create': False,
'params': {'NAMESPACE': 'test', 'DATABASE_SERVICE_NAME': 'testdb'},
'kubeconfig': '/etc/origin/master/admin.kubeconfig',
'debug': False}
mysqlproc = '''{
"kind": "List",
"apiVersion": "v1",
"metadata": {},
"items": [
{
"apiVersion": "v1",
"kind": "Service",
"metadata": {
"creationTimestamp": null,
"labels": {
"template": "mysql-ephemeral-template"
},
"name": "testdb"
},
"spec": {
"ports": [
{
"name": "mysql",
"nodePort": 0,
"port": 3306,
"protocol": "TCP",
"targetPort": 3306
}
],
"selector": {
"name": "testdb"
},
"sessionAffinity": "None",
"type": "ClusterIP"
},
"status": {
"loadBalancer": {}
}
},
{
"apiVersion": "v1",
"kind": "DeploymentConfig",
"metadata": {
"creationTimestamp": null,
"labels": {
"template": "mysql-ephemeral-template"
},
"name": "testdb"
},
"spec": {
"replicas": 1,
"selector": {
"name": "testdb"
},
"strategy": {
"type": "Recreate"
},
"template": {
"metadata": {
"creationTimestamp": null,
"labels": {
"name": "testdb"
}
},
"spec": {
"containers": [
{
"capabilities": {},
"env": [
{
"name": "MYSQL_USER",
"value": "userHJJ"
},
{
"name": "MYSQL_PASSWORD",
"value": "GITOAduAMaV6k688"
},
{
"name": "MYSQL_DATABASE",
"value": "sampledb"
}
],
"image": " ",
"imagePullPolicy": "IfNotPresent",
"livenessProbe": {
"initialDelaySeconds": 30,
"tcpSocket": {
"port": 3306
},
"timeoutSeconds": 1
},
"name": "mysql",
"ports": [
{
"containerPort": 3306,
"protocol": "TCP"
}
],
"readinessProbe": {
"exec": {
"command": [
"/bin/sh",
"-i",
"-c",
"MYSQL_PWD=$MYSQL_PASSWORD mysql -h 127.0.0.1 -u $MYSQL_USER -D $MYSQL_DATABASE -e 'SELECT 1'"
]
},
"initialDelaySeconds": 5,
"timeoutSeconds": 1
},
"resources": {
"limits": {
"memory": "512Mi"
}
},
"securityContext": {
"capabilities": {},
"privileged": false
},
"terminationMessagePath": "/dev/termination-log",
"volumeMounts": [
{
"mountPath": "/var/lib/mysql/data",
"name": "testdb-data"
}
]
}
],
"dnsPolicy": "ClusterFirst",
"restartPolicy": "Always",
"volumes": [
{
"emptyDir": {
"medium": ""
},
"name": "testdb-data"
}
]
}
},
"triggers": [
{
"imageChangeParams": {
"automatic": true,
"containerNames": [
"mysql"
],
"from": {
"kind": "ImageStreamTag",
"name": "mysql:5.6",
"namespace": "test"
},
"lastTriggeredImage": ""
},
"type": "ImageChange"
},
{
"type": "ConfigChange"
}
]
}
}
]
}'''
mock_cmd.side_effect = [
(0, OCProcessTest.mysql, ''),
(0, OCProcessTest.mysql, ''),
(0, mysqlproc, ''),
]
mock_tmpfile_copy.side_effect = [
'/tmp/mock_kubeconfig',
]
results = OCProcess.run_ansible(params, False)
self.assertFalse(results['changed'])
self.assertEqual(results['results']['results']['items'][0]['metadata']['name'], 'testdb')
@unittest.skipIf(six.PY3, 'py2 test only')
@mock.patch('os.path.exists')
@mock.patch('os.environ.get')
def test_binary_lookup_fallback(self, mock_env_get, mock_path_exists):
''' Testing binary lookup fallback '''
mock_env_get.side_effect = lambda _v, _d: ''
mock_path_exists.side_effect = lambda _: False
self.assertEqual(locate_oc_binary(), 'oc')
@unittest.skipIf(six.PY3, 'py2 test only')
@mock.patch('os.path.exists')
@mock.patch('os.environ.get')
def test_binary_lookup_in_path(self, mock_env_get, mock_path_exists):
''' Testing binary lookup in path '''
oc_bin = '/usr/bin/oc'
mock_env_get.side_effect = lambda _v, _d: '/bin:/usr/bin'
mock_path_exists.side_effect = lambda f: f == oc_bin
self.assertEqual(locate_oc_binary(), oc_bin)
@unittest.skipIf(six.PY3, 'py2 test only')
@mock.patch('os.path.exists')
@mock.patch('os.environ.get')
def test_binary_lookup_in_usr_local(self, mock_env_get, mock_path_exists):
''' Testing binary lookup in /usr/local/bin '''
oc_bin = '/usr/local/bin/oc'
mock_env_get.side_effect = lambda _v, _d: '/bin:/usr/bin'
mock_path_exists.side_effect = lambda f: f == oc_bin
self.assertEqual(locate_oc_binary(), oc_bin)
@unittest.skipIf(six.PY3, 'py2 test only')
@mock.patch('os.path.exists')
@mock.patch('os.environ.get')
def test_binary_lookup_in_home(self, mock_env_get, mock_path_exists):
''' Testing binary lookup in ~/bin '''
oc_bin = os.path.expanduser('~/bin/oc')
mock_env_get.side_effect = lambda _v, _d: '/bin:/usr/bin'
mock_path_exists.side_effect = lambda f: f == oc_bin
self.assertEqual(locate_oc_binary(), oc_bin)
@unittest.skipIf(six.PY2, 'py3 test only')
@mock.patch('shutil.which')
@mock.patch('os.environ.get')
def test_binary_lookup_fallback_py3(self, mock_env_get, mock_shutil_which):
''' Testing binary lookup fallback '''
mock_env_get.side_effect = lambda _v, _d: ''
mock_shutil_which.side_effect = lambda _f, path=None: None
self.assertEqual(locate_oc_binary(), 'oc')
@unittest.skipIf(six.PY2, 'py3 test only')
@mock.patch('shutil.which')
@mock.patch('os.environ.get')
def test_binary_lookup_in_path_py3(self, mock_env_get, mock_shutil_which):
''' Testing binary lookup in path '''
oc_bin = '/usr/bin/oc'
mock_env_get.side_effect = lambda _v, _d: '/bin:/usr/bin'
mock_shutil_which.side_effect = lambda _f, path=None: oc_bin
self.assertEqual(locate_oc_binary(), oc_bin)
@unittest.skipIf(six.PY2, 'py3 test only')
@mock.patch('shutil.which')
@mock.patch('os.environ.get')
def test_binary_lookup_in_usr_local_py3(self, mock_env_get, mock_shutil_which):
''' Testing binary lookup in /usr/local/bin '''
oc_bin = '/usr/local/bin/oc'
mock_env_get.side_effect = lambda _v, _d: '/bin:/usr/bin'
mock_shutil_which.side_effect = lambda _f, path=None: oc_bin
self.assertEqual(locate_oc_binary(), oc_bin)
@unittest.skipIf(six.PY2, 'py3 test only')
@mock.patch('shutil.which')
@mock.patch('os.environ.get')
def test_binary_lookup_in_home_py3(self, mock_env_get, mock_shutil_which):
''' Testing binary lookup in ~/bin '''
oc_bin = os.path.expanduser('~/bin/oc')
mock_env_get.side_effect = lambda _v, _d: '/bin:/usr/bin'
mock_shutil_which.side_effect = lambda _f, path=None: oc_bin
self.assertEqual(locate_oc_binary(), oc_bin)
| apache-2.0 |
andersardo/gedMerge | errRelationUtils.py | 1 | 19125 | # -*- coding: utf-8 -*-
# This Python file uses the following encoding: utf-8
import sys, os
from collections import defaultdict
import codecs, locale
from dbUtils import getFamilyFromId
from matchUtils import nodeSim
from mergeUtils import mergeEvent, mergeFam, mergePers
from graphUtils import eventDisp, printNode, mapPersId
from luceneDB import luceneDB
"""
#Family SVM default
from svmutil import svm_load_model, svm_predict
#famSVMfeatures = getattr(importlib.import_module('featureSet'), config['famfeatureSet'])
from featureSet import famExtended as famSVMfeatures
svmFamModel = svm_load_model('conf/famExtended.model')
"""
#limits used in comparisons
chSameLimit = 0.5
simLimit = 0.1
def sanity(personDB, familyDB, relationDB, do=['child', 'family', 'relation']):
#SANITY CHECKS
childErr = []
famErr = []
relErr = set()
if 'child' in do:
#can only be child in 1 family
aggrPipe = [
{'$match': {'relTyp': 'child'}},
{'$project': {'persId': '$persId', 'count': {'$concat': ['1']}}},
{'$group': {'_id': '$persId', 'count': {'$sum': 1}}},
{'$match': {'count': {'$gt': 1}}}
]
for multiChild in relationDB.aggregate(aggrPipe):
pers = personDB.find_one({'_id': multiChild['_id']})
chFams = []
for fams in relationDB.find({'relTyp': 'child', 'persId': pers['_id']}):
famId = fams['famId']
chFams.append(famId)
#print 'Relation ERROR child in many families', pers['_id'], pers['name'], chFams
childErr.append((pers, chFams))
if 'family' in do:
#1 husb/wife per family
for partner in ('husb', 'wife'):
aggrPipe = [
{'$match': {'relTyp': partner}},
{'$project': {'famId': '$famId', 'count': {'$concat': ['1']}}},
{'$group': {'_id': '$famId', 'count': {'$sum': 1}}},
{'$match': {'count': {'$gt': 1}}}]
for multiPartner in relationDB.aggregate(aggrPipe):
pers = []
for r in relationDB.find({'famId': multiPartner['_id'], 'relTyp': partner}):
p = personDB.find_one({'_id': r['persId']})
pers.append(p)
#print 'Relation ERROR Family', multiPartner['_id'], 'have', multiPartner['count'], partner, pers[0]['_id'], pers[0]['name'], pers[1]['_id'], pers[1]['name']
famErr.append((multiPartner['_id'], pers))
if 'relation' in do:
#Persons without relations
"""
Slow
for pers in personDB.find():
rel = relationDB.find_one({'persId': pers['_id']})
if not rel:
#print 'Relation WARNING Person without relations:', pers['_id'], pers['name']
relErr.append(pers)
"""
pers = set(personDB.find({}, {'_id': 1}).distinct('_id'))
persRel = set(relationDB.find({}, {'_id': 0, 'persId': 1}).distinct('persId'))
#families
fam = set(familyDB.find({}, {'_id': 1}).distinct('_id'))
famRel = set(relationDB.find({}, {'_id': 0, 'famId': 1}).distinct('famId'))
relErr = pers.difference(persRel).union(fam.difference(famRel))
return (childErr, famErr, relErr)
"""
def mergeFam(fId1, fId2, personDB, familyDB, relationDB, origDB):
# print ' Merging families', fId2, 'into', fId1
origDB.update_one({'recordId': fId1, 'type': 'family'},
{'$push': {'map': fId2}})
#Test fId1:husb/wife == fId2:husb/wife -- evt merge persons?
for r in relationDB.find({'famId': fId2}):
relationDB.delete_one(r)
del(r['_id'])
r['famId'] = fId1
relationDB.replace_one(r, r, upsert=True)
#merge marriage events
marrEvents = []
for fid in (fId1, fId2):
marr = familyDB.find_one({'_id': fid}, {'_id': False, 'marriage': True})
if marr: marrEvents.append(marr['marriage'])
if marrEvents:
#print 'marrEvents', marrEvents
familyDB.update_one({'_id': fId1}, {'$set':
{'marriage': mergeEvent(marrEvents)}})
familyDB.delete_one({'_id': fId2}) #delete family fId2
# FIX Need name of DB:
searchDB = luceneDB(personDB.full_name.split('.')[0])
searchDB.deleteRec(fId2)
return
def mergePers(pId1, pId2, personDB, familyDB, relationDB, origDB):
#print ' Merging persons', pId2, 'into', pId1
origDB.update_one({'recordId': pId1, 'type': 'person'},
{'$push': {'map': pId2}})
for r in relationDB.find({'persId': pId2}):
relationDB.delete_one(r)
del(r['_id'])
r['persId'] = pId1
relationDB.replace_one(r, r, upsert=True)
#merge birth/death
for ev in ('birth', 'death'):
Events = []
for pid in (pId1, pId2):
event = personDB.find_one({'_id': pid}, {'_id': False, ev: True})
if event: Events.append(event[ev])
if Events:
personDB.update_one({'_id': pId1}, {'$set':
{ev: mergeEvent(Events)}})
personDB.delete_one({'_id': pId2}) #delete person pId2
# FIX Need name of DB:
searchDB = luceneDB(personDB.full_name.split('.')[0])
searchDB.deleteRec(pId2)
#Evt check if pId1 barn i två familjer och inga problem => delete den familjen
return
"""
def repairChild(childErr, personDB, familyDB, relationDB, origDB):
notFixed = []
for (pers, chFams) in childErr:
merged = []
for i in range(len(chFams)):
if chFams[i] in merged: continue
for j in range(i+1, len(chFams)):
work = getFamilyFromId(chFams[i], familyDB, relationDB)
match = getFamilyFromId(chFams[j], familyDB, relationDB)
if not work or not match: continue
minCh = float(min(len(work['children']), len(match['children'])))
if minCh > 0.0:
sameChildren = set(work['children']).intersection(set(match['children']))
chSame = len(sameChildren)/minCh
else:
chSame=1
husbSame = (work['husb'] == match['husb'])
wifeSame = (work['wife'] == match['wife'])
husbSimilarity = nodeSim(personDB.find_one({'_id': work['husb']}),
personDB.find_one({'_id': match['husb']}))
wifeSimilarity = nodeSim(personDB.find_one({'_id': work['wife']}),
personDB.find_one({'_id': match['wife']}))
if husbSame: otherSimilarity = wifeSimilarity
else: otherSimilarity = husbSimilarity
##
#p1 majority of children same
# 1 partner same or Null
# other partner similar
parentsCond = ( (husbSame or wifeSame) )
if parentsCond and otherSimilarity>simLimit and chSame>=chSameLimit:
mergeFam( chFams[i], chFams[j], personDB, familyDB,
relationDB, origDB, updateLucene=True)
merged.append(chFams[j])
print 'p1 merged', chFams[i], chFams[j]
continue
#
#p1a majority of children same
# 1 partner against Null
# other partner similar
parentsCond = ( (((work['husb'] and not match['husb']) or
(not work['husb'] and match['husb'])) and
wifeSimilarity > simLimit )
or
(((work['wife'] and not match['wife']) or
(not work['wife'] and match['wife'])) and
husbSimilarity > simLimit )
)
if parentsCond and chSame>=chSameLimit:
mergeFam( chFams[i], chFams[j], personDB, familyDB,
relationDB, origDB, updateLucene=True)
merged.append(chFams[j])
print 'p1a merged', chFams[i], chFams[j]
continue
#
#p2 majority of children same
# both partners against Null
parentsCond = ( ((work['husb'] and not match['husb']) or
(not work['husb'] and match['husb']))
and
((work['wife'] and not match['wife']) or
(not work['wife'] and match['wife']))
)
if parentsCond and chSame>=chSameLimit:
mergeFam( chFams[i], chFams[j], personDB, familyDB,
relationDB, origDB, updateLucene=True)
merged.append(chFams[j])
print 'p2 merged', chFams[i], chFams[j]
continue
#
#p3 majority of children same
# both partners same
#p4 children same or against Null
# 1 partner same or Null
# other partner similar
##
#p5: 1 family without parents
if (((work['husb'] is None) and (work['wife'] is None)) or
((match['husb'] is None) and (match['wife'] is None))):
mergeFam( chFams[i], chFams[j], personDB, familyDB,
relationDB, origDB, updateLucene=True)
merged.append(chFams[j])
print 'p5 merged', chFams[i], chFams[j]
continue
"""
#p6: wife or husb same
pat = False
for partner in ('husb', 'wife'):
if work[partner]==match[partner] and work[partner] is not None:
#What if other partner not equal? MergePers? ??
pat = True
break
if pat:
mergeFam( chFams[i], chFams[j], personDB, familyDB, relationDB, origDB)
merged.append(chFams[j])
print 'p6 merged', chFams[i], chFams[j]
continue
#
!!!!!!!!!!! Uppdaterar matches, fam_matches
config = {'persons': personDB, 'match_persons': personDB,
'families': familyDB, 'match_families': familyDB,
'relations': relationDB, 'match_relations': relationDB,
#dummies
'matches': personDB, 'fam_matches': personDB}
v = famSVMfeatures(work, match, config) ##FIX import default famExtended
p_labels, p_acc, p_vals = svm_predict([0],[v],svmFamModel,options="-b 1")
svmstat = p_vals[0][0]
#print ' SVM=', svmstat, ';', chFams[i], chFams[j]
if svmstat > 0.66:
#print 'Pattern 3: (SVM) found', pers['_id'], pers['name'], chFams[i], chFams[j]
mergeFam(chFams[i], chFams[j], personDB, familyDB, relationDB, origDB)
merged.append(chFams[j])
continue
"""
print 'Not repaired', pers['_id'], pers['name'], chFams[i], chFams[j]
#print ' ',
#for x in (minCh, chSame, husbSame, wifeSame, husbSimilarity, wifeSimilarity, otherSimilarity): print x,
#print
notFixed.append((pers, chFams))
return notFixed
def repairFam(famErr, personDB, familyDB, relationDB, origDB):
#1 husb/wife per family
notFixed = []
for (famId, persList) in famErr:
pStr = ''
for p in persList:
pStr += p['_id']+' '+p['name']+';'
print pStr
##
#p1 1 partner samma eller None
# other partner inga andra relations
#p2 1 av dubl partner har inga andra rel
p1Rel = relationDB.find({'persId': persList[0]['_id']}).count()
p2Rel = relationDB.find({'persId': persList[1]['_id']}).count()
#pSimilarity = nodeSim(personDB.find_one({'_id': persList[0]}),
# personDB.find_one({'_id': persList[1]}))
pSimilarity = nodeSim(persList[0], persList[1])
if (p1Rel==1 or p2Rel==1) and pSimilarity>simLimit:
mergePers(persList[0]['_id'], persList[1]['_id'], personDB, familyDB, relationDB, origDB)
print 'p2 merged', pStr
continue
#p3 1 av dubl partner utan andra relationer och i en familj utan andra relationer
##
print ' Not repaired', pStr, p1Rel, p2Rel, pSimilarity
notFixed.append((famId, persList))
return notFixed
def repairRel(relErr, personDB, familyDB, relationDB, origDB):
#Persons/families without relations
rErr = relErr.copy()
for id in relErr:
if id.startswith('F_'):
#delete families without relations
print 'Deleting family without relations', id
familyDB.delete_one({'_id': id})
rErr.remove(id)
return rErr
def printFams(famList, centerPersonId, centerFamId, gvFil, personDB, familyDB, relationDB):
global mapPersId
mapPersId.clear()
mapFamc = {}
persList = set()
for famId in famList:
fam = getFamilyFromId(famId, familyDB, relationDB)
for famrel in relationDB.find({'famId': famId}):
if famrel['relTyp']=='child': continue
if famrel['persId'] == centerPersonId:
printNode(famrel['persId'], 'shape="tab", style=filled, fillcolor="aquamarine"', personDB, gvFil)
else:
printNode(famrel['persId'], 'shape="tab", style=filled, fillcolor="lightyellow"', personDB, gvFil)
"""
for partner in ('husb', 'wife'):
if partner in fam:
if fam[partner] == centerPersonId:
printNode(fam[partner], 'shape="tab", style=filled, fillcolor="aquamarine"', personDB, gvFil)
else:
printNode(fam[partner], 'shape="folder", style=filled, fillcolor="lightyellow"', personDB, gvFil)
"""
prev = None
for ch in fam['children']:
mapFamc[ch] = fam['_id']
if ch == centerPersonId:
printNode(ch, 'shape="tab", style=filled, fillcolor="aquamarine"', personDB, gvFil)
else:
printNode(ch, 'shape="box", style=filled, fillcolor="whitesmoke"', personDB, gvFil)
if prev:
gvFil.write(mapPersId[prev]+' -> '+mapPersId[ch]+' [style=invis, label="", len=0.02];'+"\n")
prev = ch
gvFil.write('{rank=same; ')
for ch in fam['children']:
#print ch, mapPersId
gvFil.write(mapPersId[ch] + '; ')
gvFil.write("}\n")
for famId in famList:
fam = getFamilyFromId(famId, familyDB, relationDB)
txt = '<FONT POINT-SIZE="8.0">' + fam['refId'] + '<br/>'
if 'marriage' in fam:
txt += 'Marriage:'+eventDisp(fam['marriage']).replace('<br>',', ')
txt += '</FONT>'
if famId == centerFamId:
gvFil.write(fam['_id'] + '[label=<'+txt+'>, style=filled, fillcolor="aquamarine", shape="note"];')
gvFil.write("\n")
else:
gvFil.write(fam['_id'] + '[label=<'+txt+'>, style=filled, shape="note"];')
gvFil.write("\n")
for ch in fam['children']:
gvFil.write(fam['_id'] + '->' + mapPersId[ch])
gvFil.write("\n")
for famrel in relationDB.find({'famId': famId}):
if famrel['relTyp']=='child': continue
gvFil.write(mapPersId[famrel['persId']] + '->' + fam['_id'])
gvFil.write("\n")
"""
if 'wife' in fam and fam['wife']:
gvFil.write(mapPersId[fam['wife']] + '->' + fam['_id'])
gvFil.write("\n")
if 'husb' in fam and fam['husb']:
gvFil.write(mapPersId[fam['husb']] + '->' + fam['_id'])
gvFil.write("\n")
"""
def genGraphFam(persId, famId1, famId2, personDB, familyDB, relationDB):
#global mapPersId
#mapPersId = {}
filnamn = persId+'.gv'
title = 'Error relations'
gvFil = open(filnamn, 'wb')
gvFil = codecs.getwriter('UTF-8')(gvFil)
gvFil.write('digraph G {charset=utf8; overlap=false; rankdir = LR; ratio = compress; ranksep = 0.25; nodesep = 0.03;fontname=Helvetica; fontsize=16; fontcolor=black; label="'+title+'"; labelloc=t;')
gvFil.write("\n")
famList = set()
for fam in (famId1, famId2):
famList.add(fam)
for partner in ('husb', 'wife'):
if partner in fam:
for r in relationDB.find({'persId': fam[partner]}):
famList.add(r['famId'])
for r in relationDB.find({'persId': persId, 'relTyp': 'child'}): famList.add(r['famId'])
printFams(famList, persId, '', gvFil, personDB, familyDB, relationDB)
gvFil.write( "}\n" )
gvFil.close()
os.system('dot -Tsvg -O '+filnamn)
#print ' Img', filnamn
fil = open(filnamn+'.svg' , 'rb')
graph = fil.read()
fil.close()
return graph
def genGraphPers(persId1, persId2, famId, personDB, familyDB, relationDB):
filnamn = persId1+'.gv'
title = 'Error relations'
gvFil = open(filnamn, 'wb')
gvFil = codecs.getwriter('UTF-8')(gvFil)
gvFil.write('digraph G {charset=utf8; overlap=false; rankdir = LR; ratio = compress; ranksep = 0.25; nodesep = 0.03;fontname=Helvetica; fontsize=16; fontcolor=black; label="'+title+'"; labelloc=t;')
gvFil.write("\n")
famList = set()
for persId in (persId1, persId2):
for r in relationDB.find({'persId': persId}):
famList.add(r['famId'])
printFams(famList, '', famId, gvFil, personDB, familyDB, relationDB)
gvFil.write( "}\n" )
gvFil.close()
os.system('dot -Tsvg -O '+filnamn)
#print ' Img', filnamn
fil = open(filnamn+'.svg' , 'rb')
graph = fil.read()
fil.close()
return graph
if __name__=="__main__":
import codecs, locale
locale.setlocale(locale.LC_ALL, 'en_US.UTF-8') #sorting??
sys.stdout = codecs.getwriter('UTF-8')(sys.stdout)
import common
DB = 'anders_DavidEkedahlMaster'
conf = common.init(DB, matchDBName = DB, indexes=True)
personDB = conf['persons']
familyDB = conf['families']
relationDB = conf['relations']
origDB = conf['originalData']
(childErr, famErr, relErr) = sanity(personDB, familyDB, relationDB)
print 'Only child in one family', len(childErr)
notFixed = repairChild(childErr, personDB, familyDB, relationDB, origDB)
print 'Not fixed', len(notFixed)
print 'Multi husb/wife in one family'
repairFam(famErr, personDB, familyDB, relationDB, origDB)
print 'Rel err'
repairRel(relErr, personDB, familyDB, relationDB, origDB)
| mit |
exic/spade2 | xmppd/modules/db_fake.py | 1 | 17617 | # -*- coding: utf-8 -*-
from xmpp import *
"""try:
from xml.marshal.generic import *
marshal = Marshaller()
unmarshal = Unmarshaller()
except:
import marshal
import marshal as unmarshal
"""
import marshal
import marshal as unmarshal
import copy
db={}
def build_database(server_instance):
global db
for a_registered_server in server_instance.servernames:
server_instance.DEBUG('server','DB: Building database tree for %s'%a_registered_server,'info')
if not db.has_key(a_registered_server):
db[a_registered_server]={}
db[a_registered_server]['__ir__'] = {}
db[a_registered_server]['__ir__']['storage'] = {'karma':{'down':307200,'up':307200307200307200,'last_time':0.0,'tot_down':0,'tot_up':0}}
db[a_registered_server]['__ir__']['password'] = 'test'
db[a_registered_server]['__ir__']['anon_allow'] = 'yes'
db[a_registered_server]['__ir__']['roster'] = {}
db[a_registered_server]['__ir__']['groups'] = {}
"""
db[a_registered_server]['t'] = {}
db[a_registered_server]['t']['password'] = 'thx1138.dsic.upv.es'
db[a_registered_server]['t']['anon_allow'] = 'yes'
db[a_registered_server]['t']['roster'] = {}
#db[a_registered_server]['t']['roster']['t2@thx1138.dsic.upv.es'] = {'subscription':'both'}
db[a_registered_server]['t']['groups'] = {}
db[a_registered_server]['t2'] = {}
db[a_registered_server]['t2']['password'] = 'thx1138.dsic.upv.es'
db[a_registered_server]['t2']['anon_allow'] = 'yes'
db[a_registered_server]['t2']['roster'] = {}
#db[a_registered_server]['t2']['roster']['t@thx1138.dsic.upv.es'] = {'subscription':'both'}
db[a_registered_server]['t2']['groups'] = {}
db[a_registered_server]['t3'] = {}
db[a_registered_server]['t3']['password'] = 't3'
db[a_registered_server]['t3']['anon_allow'] = 'yes'
db[a_registered_server]['t3']['roster'] = {}
db[a_registered_server]['t3']['groups'] = {}
db[a_registered_server]['t3']['name'] = "T3"
db[a_registered_server]['t4'] = {}
db[a_registered_server]['t4']['password'] = 't4'
db[a_registered_server]['t4']['anon_allow'] = 'yes'
db[a_registered_server]['t4']['roster'] = {}
db[a_registered_server]['t4']['groups'] = {}
db[a_registered_server]['t4']['name'] = "T4"
db[a_registered_server]['test'] = {}
db[a_registered_server]['test']['storage'] = {'karma':{'down':307200,'up':307200307200307200,'last_time':0.0,'tot_down':0,'tot_up':0}}
db[a_registered_server]['test']['password'] = 'test'
#Anon_allow tells the privacy subsystem if it's okay for someone to contact you
#without any subscription at all.
db[a_registered_server]['test']['anon_allow'] = 'no'
db[a_registered_server]['test']['roster'] = {}
# {'jid':'test2@172.16.1.34','name':'Test Account 2','subscription':'both'},
# {'jid':'test3@172.16.1.34','subscription':'both'}]
db[a_registered_server]['test']['groups'] = {}
db[a_registered_server]['test']['groups']['Friends'] = ['test2@172.16.1.34','test3@172.16.1.34']
db[a_registered_server]['test2'] = {}
db[a_registered_server]['test2']['storage'] = {'karma':{'down':307200,'up':307200,'last_time':0.0,'tot_down':0,'tot_up':0}}
db[a_registered_server]['test2']['password'] = 'test'
db[a_registered_server]['test2']['anon_allow'] = 'yes'
db[a_registered_server]['test2']['roster'] = {}
db[a_registered_server]['test2']['roster']['test3@'+a_registered_server] = {'subscription':'both'}
db[a_registered_server]['test2']['groups'] = {}
db[a_registered_server]['test2']['groups']['Friends'] = ['test3@172.16.1.34','test3@172.16.0.2','test3@'+a_registered_server]
db[a_registered_server]['test3'] = {}
db[a_registered_server]['test3']['storage'] = {'karma':{'down':307200,'up':307200,'last_time':0.0,'tot_down':0,'tot_up':0}}
db[a_registered_server]['test3']['password'] = 'test'
db[a_registered_server]['test3']['anon_allow'] = 'yes'
db[a_registered_server]['test3']['name'] = 'テスト・アカウント#3'
#Roster Info
##Roster Items
db[a_registered_server]['test3']['roster'] = {}
db[a_registered_server]['test3']['roster']['test2@'+a_registered_server] = {'subscription':'both'}
db[a_registered_server]['test3']['roster']['pixelcort@'+a_registered_server] = {'subscription':'both'}
db[a_registered_server]['test3']['roster']['kris_tate@'+a_registered_server] = {'subscription':'both'}
##Item Groups
db[a_registered_server]['test3']['groups'] = {}
db[a_registered_server]['test3']['groups']['かっこういいな人'] = ['test2@172.16.1.34','test2@172.16.0.2','test2@'+a_registered_server,'pixelcort@'+a_registered_server,'kris_tate@'+a_registered_server]
db[a_registered_server]['pixelcort'] = {}
db[a_registered_server]['pixelcort']['storage'] = {'karma':{'down':307200,'up':307200,'last_time':0.0,'tot_down':0,'tot_up':0}}
db[a_registered_server]['pixelcort']['password'] = 'test'
db[a_registered_server]['pixelcort']['anon_allow'] = 'yes'
db[a_registered_server]['pixelcort']['name'] = 'Cortland Klein'
#Roster Info
##Roster Items
db[a_registered_server]['pixelcort']['roster'] = {}
db[a_registered_server]['pixelcort']['roster']['tekcor@'+a_registered_server] = {'subscription':'both'}
db[a_registered_server]['pixelcort']['roster']['kris_tate@'+a_registered_server] = {'subscription':'both'}
db[a_registered_server]['pixelcort']['roster']['mvanveen@'+a_registered_server] = {'subscription':'both'}
##Item Groups
db[a_registered_server]['pixelcort']['groups'] = {}
db[a_registered_server]['pixelcort']['groups']['Friends'] = ['tekcor@'+a_registered_server,'mvanveen@'+a_registered_server]
db[a_registered_server]['pixelcort']['groups']['Kris'] = ['kris_tate@'+a_registered_server]
db[a_registered_server]['kris_tate'] = {}
db[a_registered_server]['kris_tate']['storage'] = {'karma':{'down':307200,'up':1000,'last_time':0.0,'tot_down':0,'tot_up':0}}
db[a_registered_server]['kris_tate']['password'] = 'test'
db[a_registered_server]['kris_tate']['anon_allow'] = 'yes'
db[a_registered_server]['kris_tate']['name'] = 'Kristopher Tate'
#Roster Info
##Roster Items
db[a_registered_server]['kris_tate']['roster'] = {}
db[a_registered_server]['kris_tate']['roster']['tekcor@'+a_registered_server] = {'subscription':'both'}
db[a_registered_server]['kris_tate']['roster']['pixelcort@'+a_registered_server] = {'subscription':'both'}
db[a_registered_server]['kris_tate']['roster']['mvanveen@'+a_registered_server] = {'subscription':'both'}
##Item Groups
db[a_registered_server]['kris_tate']['groups'] = {}
db[a_registered_server]['kris_tate']['groups']['かっこういいな人'] = ['tekcor@'+a_registered_server,'pixelcort@'+a_registered_server,'mvanveen@'+a_registered_server]
db[a_registered_server]['tekcor'] = {}
db[a_registered_server]['tekcor']['storage'] = {'karma':{'down':307200,'up':307200,'last_time':0.0,'tot_down':0,'tot_up':0}}
db[a_registered_server]['tekcor']['password'] = 'test'
db[a_registered_server]['tekcor']['anon_allow'] = 'yes'
db[a_registered_server]['tekcor']['name'] = 'Thom McGrath'
#Roster Info
##Roster Items
db[a_registered_server]['tekcor']['roster'] = {}
db[a_registered_server]['tekcor']['roster']['pixelcort@'+a_registered_server] = {'subscription':'both'}
db[a_registered_server]['tekcor']['roster']['kris_tate@'+a_registered_server] = {'subscription':'both'}
##Item Groups
db[a_registered_server]['tekcor']['groups'] = {}
db[a_registered_server]['tekcor']['groups']['Friends'] = ['kris_tate@'+a_registered_server,'pixelcort@'+a_registered_server]
db[a_registered_server]['mvanveen'] = {}
db[a_registered_server]['mvanveen']['storage'] = {'karma':{'down':307200,'up':307200,'last_time':0.0,'tot_down':0,'tot_up':0}}
db[a_registered_server]['mvanveen']['password'] = 'test'
db[a_registered_server]['mvanveen']['anon_allow'] = 'yes'
db[a_registered_server]['mvanveen']['name'] = 'Mike Van Veen'
#Roster Info
##Roster Items
db[a_registered_server]['mvanveen']['roster'] = {}
db[a_registered_server]['mvanveen']['roster']['pixelcort@'+a_registered_server] = {'subscription':'both'}
db[a_registered_server]['mvanveen']['roster']['kris_tate@'+a_registered_server] = {'subscription':'both'}
##Item Groups
db[a_registered_server]['mvanveen']['groups'] = {}
db[a_registered_server]['mvanveen']['groups']['Friends'] = ['kris_tate@'+a_registered_server,'pixelcort@'+a_registered_server]
"""
#for guy in db[a_registered_server].keys():
# db[a_registered_server][guy]['roster'][a_registered_server] = {'subscription':'to','name':"Help Desk"}
class AUTH(PlugIn):
NS=''
def getpassword(self, node, domain):
try: return db[domain][node]['password']
except KeyError: pass
def isuser(self, node, domain):
try:
if db[str(domain)].has_key(node): return True
except: pass
if str(domain) in self._owner.components.keys(): return True
return False
class DB(PlugIn):
NS=''
def plugin(self,server):
self.DEBUG('Building Database tree!','info')
self.load_database()
build_database(server) #Building database!
def store(self,domain,node,stanza,id='next_unique_id'):
if not node: node, domain = domain.split(".",1)
try:
self.DEBUG("Storing to database:\n%s:%s::%s:%s"%(domain,node,id,stanza),'info')
db[domain][node]['storage'][id] = stanza
self.save_database()
return True
except KeyError:
self.DEBUG("Could not store in database:\n%s:%s::%s:%s"%(domain,node,id,stanza),'error')
return False
def get_store(self,domain,node,id):
if not node: node, domain = domain.split(".",1)
try:
return db[domain][node]['storage'][id]
except KeyError:
return False
def get_storage(self,domain,node):
if not node: node, domain = domain.split(".",1)
try:
l = copy.copy(db[domain][node]['storage'].values())
db[domain][node]['storage'].clear()
return l
except KeyError:
return []
except:
return []
def save(self,domain,node,stanza,id='next_unique_id'):
if not node: node, domain = domain.split(".",1)
try:
self.DEBUG("Saving to database:\n%s:%s::%s:%s"%(domain,node,id,stanza),'info')
db[domain][node][id] = stanza
self.save_database()
return True
except KeyError:
self.DEBUG("DB ERR: Could not save to database:\n%s:%s::%s:%s"%(domain,node,id,stanza),'warn')
return False
def save_to_roster(self,domain,node,jid,info,add_only_if_already=False):
if not node: node, domain = domain.split(".",1)
self.DEBUG("Saving roster info to database %s-->(%s) [%s]:\n"%(jid,node+'@'+domain,str(info)),'info')
if db[domain][node]['roster'].has_key(jid) and add_only_if_already==False:
db[domain][node]['roster'][jid].update(info)
else:
db[domain][node]['roster'][jid] = info
self.save_database()
def pull_roster(self,domain,node,jid):
if not node: node, domain = domain.split(".",1)
try:
data = db[domain][node]['roster'][jid]
if data.has_key('subscription') == False:
data.update({'subscription':'none'})
return data
except KeyError:
self.DEBUG('DB ERR: Could not retrieve %s@%s roster for %s'%(node,domain,jid),'warn')
return None
def del_from_roster(self,domain,node,jid):
if not node: node, domain = domain.split(".",1)
self.DEBUG("Deleting roster info from database %s--X(%s):\n"%(jid,node+'@'+domain),'info')
try:
del(db[domain][node]['roster'][jid])
self.save_database()
return True
except KeyError, err:
self.DEBUG('DB ERR: A Client tried to remove a contact that wasn\'t even added! (%s::%s::%s)'%(domain,node,jid),'warn')
return False
def del_from_roster_jid(self,domain,node,jid,what):
if not node: node, domain = domain.split(".",1)
self.DEBUG("Deleting roster info from database %s--X(%s):\n"%(jid,node+'@'+domain),'info')
try:
del(db[domain][node]['roster'][jid][what])
self.save_database()
return True
except KeyError, err:
self.DEBUG('DB ERR: A Client tried to remove a contact attr that wasn\'t even added! (%s::%s::%s)'%(domain,node,jid),'warn')
return False
def save_groupie(self,domain,node,jid,groups):
if not node: node, domain = domain.split(".",1)
temp = []
for x in groups:
if type(x)==type(u''): x = x.encode('utf-8')
elif type(x)==type(u''): x = unicode(x).encode('utf-8')
temp += [x]
group_list = x
self.DEBUG("Saving groupie jid to database %s-->(%s) [%s]:\n"%(jid,node+'@'+domain,unicode(groups).encode('utf-8')),'info')
for gn,gm in db[domain][node]['groups'].iteritems():
if gn not in group_list and jid in db[domain][node]['groups'][gn]:
db[domain][node]['groups'][gn].remove(jid)
elif gn in group_list and jid not in db[domain][node]['groups'][gn]:
db[domain][node]['groups'][gn] += [jid]
self.save_database()
def del_groupie(self,domain,node,jid):
if not node: node, domain = domain.split(".",1)
try:
self.DEBUG("Deleting groupie from database %s--X(%s):\n"%(jid,node+'@'+domain),'info')
for gn,gm in db[domain][node]['groups'].iteritems():
if jid in db[domain][node]['groups'][gn]:
db[domain][node]['groups'][gn].remove(jid)
except Exception, err:
self.DEBUG('DB ERR: A groupie went mad! %s::%s::%s'%(domain,node,jid),'warn')
self.save_database()
def get(self,domain,node,what):
if not node: node, domain = domain.split(".",1)
try:
return db[domain][node][what]
except KeyError:
self.DEBUG('DB ERR: Could not retrieve %s::%s::%s'%(domain,node,what),'warn')
return None
def delete(self,domain,node,what):
if not node: node, domain = domain.split(".",1)
try:
del(db[domain][node][what])
self.save_database()
return True
except KeyError:
self.DEBUG('DB ERR: Could not delete %s::%s::%s'%(domain,node,what),'warn')
return None
def getNumRegistered(self,server):
return len(db[server].keys())
def register_user(self,domain,node,password,name):
try:
db[domain][node] = {}
db[domain][node]['password'] = password
db[domain][node]['roster'] = {}
db[domain][node]['storage'] = {}
db[domain][node]['groups'] = {}
db[domain][node]['name'] = name
db[domain][node]['anon_allow'] = 'yes'
db[domain][node]['roster'] = {}
#db[domain][node]['roster'][domain] = {'subscription':'to','name':"Help Desk"}
self.DEBUG("Registered user %s in domain %s"%(node,domain),'info')
self.save_database()
return True
except:
self.DEBUG('Error registering username %s in domain %s'%(node,domain),'error')
return False
def save_database(self, filename="user_db.xml"):
try:
global db
#print "#### userdbfile = " + str(self.userdbfile)
#print "#### spoolpath = " + str(self.spoolpath)
"""
if not os.path.exists(self.spoolpath):
self.DEBUG("spoolpath does no exist.", 'warn')
p = self.spoolpath.split(os.sep)
tmpitem=''
for item in p:
tmpitem+=os.sep+str(item)
if not os.path.exists(tmpitem):
self.DEBUG("mkdir " + str(tmpitem), 'info')
os.mkdir(tmpitem)
"""
fh = open(filename, 'w')
marshal.dump(db, fh)
fh.close()
self.DEBUG('save_database: User database saved!', 'info')
return True
except:
self.DEBUG('save_database: Could not save user database', 'error')
return False
def load_database(self, filename="user_db.xml"):
"""
Loads an entire database from disk
"""
try:
global db
fh = open(filename, 'r')
db = unmarshal.load(fh)
fh.close()
self.DEBUG('load_database: User database loaded', 'info')
return True
except:
self.DEBUG('load_database: Could not load user database', 'error')
return False
def __str__(self):
return str(db)
@property
def db(self):
return db
def print_database(self):
print str(self)
| lgpl-2.1 |
AugurProject/augur-prototype | test/test_augur.py | 1 | 6992 | #!/usr/bin/env python
"""
augur unit tests.
"""
from __future__ import division
import sys
try:
import cdecimal
sys.modules["decimal"] = cdecimal
except:
pass
import os
import platform
from decimal import Decimal
if platform.python_version() < "2.7":
unittest = __import__("unittest2")
else:
import unittest
HERE = os.path.dirname(os.path.realpath(__file__))
sys.path.insert(0, os.path.join(HERE, os.pardir))
sys.path.insert(0, os.path.join(HERE, os.pardir, "augur"))
from augur import app, socketio, Api
class TestAugur(unittest.TestCase):
def setUp(self):
self.api = Api()
self.ns = "/socket.io/"
self.brainwallet = "testmagicbrainwallet"
self.settings = {
"host": "localhost",
"port": 8899,
"core_path": os.path.join(HERE, "core"),
}
self.vote_id = 1
self.decision_id = 1
self.state = 0
app.config["TESTING"] = True
self.ns = "/socket.io/"
self.client = socketio.test_client(app, namespace=self.ns)
self.client.get_received(self.ns)
# self.socket_emit_receive({
# "emit-name": "start",
# })
def socket_emit_receive(self, intake):
label, data = None, None
emit_name = intake.pop("emit-name", None)
if intake:
self.client.emit(emit_name, intake, namespace=self.ns)
else:
self.client.emit(emit_name, namespace=self.ns)
received = self.client.get_received(self.ns)
if received:
try:
label = received[0]["name"]
data = received[0]["args"][0]
except:
label = received[0]["name"]
data = received[0]["args"]
return label, data
def test_settings(self):
label, data = self.socket_emit_receive({
"emit-name": "settings",
"host": self.settings["host"],
"port": self.settings["port"],
"core_path": self.settings["core_path"],
})
self.assertEqual(label, "settings")
self.assertIn("host", data)
self.assertIn("port", data)
self.assertIn("core_path", data)
self.assertEqual(data["host"], self.settings["host"])
self.assertEqual(data["port"], self.settings["port"])
self.assertEqual(data["core_path"], self.settings["core_path"])
def test_ping(self):
label, data = self.socket_emit_receive({
"emit-name": "ping",
})
self.assertIsNotNone(label)
self.assertIsNotNone(data)
def test_get_account(self):
label, data = self.socket_emit_receive({
"emit-name": "get-account",
})
self.assertEqual(label, "account")
self.assertIn("address", data)
self.assertIn("privkey", data)
self.assertIn("cash", data)
self.assertIn("shares", data)
self.assertIn("branches", data)
self.assertIn("decisions", data)
def test_update_account(self):
pass
# label, data = self.socket_emit_receive({
# "emit-name": "update-account",
# })
# self.assertEqual(label, "account")
# self.assertIn("cash", data)
# self.assertIn("shares", data)
# self.assertIn("branches", data)
def test_get_block(self):
label, data = self.socket_emit_receive({
"emit-name": "get-block",
"block_number": 1,
})
self.assertEqual(label, "block")
self.assertIsNotNone(data)
def test_peers(self):
label, data = self.socket_emit_receive({
"emit-name": "peers",
})
self.assertEqual(label, "peers")
self.assertIsNotNone(data)
def test_blockcount(self):
label, data = self.socket_emit_receive({
"emit-name": "blockcount",
})
self.assertEqual(label, "blockcount")
self.assertIsNotNone(data)
def test_report(self):
self.socket_emit_receive({
"emit-name": "report",
"vote_id": self.vote_id,
"decision_id": self.decision_id,
"state": self.state,
})
def test_explore_block(self):
label, data = self.socket_emit_receive({
"emit-name": "explore-block",
"block_number": 1,
})
self.assertEqual(label, "show-block")
self.assertIsNotNone(data)
def test_start(self):
pass
# self.socket_emit_receive({
# "emit-name": "start",
# "password": self.brainwallet,
# })
def test_stop(self):
pass
# self.socket_emit_receive({
# "emit-name": "stop",
# })
def test_miner(self):
pass
# label, data = self.socket_emit_receive({
# "emit-name": "miner",
# "arg": "start",
# })
# self.assertEqual(label, "miner")
# self.assertEqual(data, "on")
def test_send_cash(self):
pass
# self.socket_emit_receive({
# "emit-name": "send-cash",
# "amount": 1,
# "address": 0,
# })
def test_send_reps(self):
pass
# self.socket_emit_receive({
# "emit-name": "send-cash",
# "amount": 1,
# "address": 0,
# "branch": "random",
# })
def test_create_branch(self):
pass
# self.socket_emit_receive({
# "emit-name": "create-branch",
# "name": "potentpotables",
# })
def test_add_decision(self):
pass
# self.socket_emit_receive({
# "emit-name": "add-decision",
# "branchId": "random",
# "decisionMaturation": 14000,
# "decisionId": "somerandomhex",
# "decisionText": "Is the cake a lie?",
# })
def test_add_market(self):
pass
# self.socket_emit_receive({
# "emit-name": "add-market",
# "decisionId": "7928d7317255e8a7",
# "marketInv": 10,
# })
def test_update_market(self):
pass
# label, data = self.socket_emit_receive({
# "emit-name": "update-market",
# "id": "6d5ff28910cc0949" + ".market",
# })
# self.assertEqual(label, "market")
# self.assertIsNotNone(data)
def test_trade(self):
pass
# self.socket_emit_receive({
# "emit-name": "trade",
# "marketId": "1f56c5596200f0f9.market",
# "marketState": "0",
# "tradeAmount": "100",
# "tradeType": "buy",
# })
def tearDown(self):
# self.socket_emit_receive({
# "emit-name": "stop",
# })
del self.api
del self.client
if __name__ == "__main__":
suite = unittest.TestLoader().loadTestsFromTestCase(TestAugur)
unittest.TextTestRunner(verbosity=2).run(suite)
| mit |
IIS-DIRL/Python-Tools | misc/img_resize.py | 2 | 2012 | import os
import sys
import glob
import logging
import argparse
from PIL import Image
def argparser():
parser = argparse.ArgumentParser(description='interactive graph cut for moth image')
parser.add_argument('-i', '--image', help='process input image',
nargs='+', default=[])
parser.add_argument('-r', '--recursive', help='process all image in given directory',
nargs='+', default=[])
parser.add_argument('--height', help='the height of resized image',
type=int, default=240)
parser.add_argument('--width', help='the width of resized image',
type=int, default=320)
return parser
def main(args):
imgs = []
saved_path = '{}x{}'.format(args.width, args.height)
flatten = lambda l: [item for sublist in l for item in sublist]
if args.image or args.recursive:
if args.image: imgs += [os.path.abspath(img) for img in args.image]
if args.recursive:
for repo in args.recursive:
ext = ['jpg', 'jpeg', 'png']
repo = [os.path.abspath(repo) + '/**/*.' + e for e in ext]
repo = [glob.glob(r, recursive=True) for r in repo]
repo = flatten(repo)
imgs += repo
for i, img in enumerate(imgs):
saved_dir = os.path.join('/'.join(img.split('/')[:-1]), saved_path)
saved_img_path = os.path.join(saved_dir, img.split('/')[-1])
if not os.path.exists(saved_dir):
os.makedirs(saved_dir)
im = Image.open(img)
im = im.resize((args.width, args.height), Image.BILINEAR)
im.save(saved_img_path)
logging.info('({}/{}) Saved {} with size ({})'.format(
i+1, len(imgs), img.split('/')[-1], im.size))
if __name__ == '__main__':
logging.basicConfig(
level=logging.INFO,
format='%(asctime)s [ %(levelname)8s ] - %(message)s',
datefmt='%Y-%m-%d %H:%M:%S',
stream=sys.stdout
)
parser = argparser()
main(parser.parse_args())
| apache-2.0 |
turon/openthread | third_party/mbedtls/repo/tests/scripts/test_generate_test_code.py | 9 | 54564 | #!/usr/bin/env python3
# Unit test for generate_test_code.py
#
# Copyright (C) 2018, Arm Limited, All Rights Reserved
# SPDX-License-Identifier: Apache-2.0
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# This file is part of Mbed TLS (https://tls.mbed.org)
"""
Unit tests for generate_test_code.py
"""
try:
# Python 2
from StringIO import StringIO
except ImportError:
# Python 3
from io import StringIO
from unittest import TestCase, main as unittest_main
try:
# Python 2
from mock import patch
except ImportError:
# Python 3
from unittest.mock import patch
from generate_test_code import gen_dependencies, gen_dependencies_one_line
from generate_test_code import gen_function_wrapper, gen_dispatch
from generate_test_code import parse_until_pattern, GeneratorInputError
from generate_test_code import parse_suite_dependencies
from generate_test_code import parse_function_dependencies
from generate_test_code import parse_function_arguments, parse_function_code
from generate_test_code import parse_functions, END_HEADER_REGEX
from generate_test_code import END_SUITE_HELPERS_REGEX, escaped_split
from generate_test_code import parse_test_data, gen_dep_check
from generate_test_code import gen_expression_check, write_dependencies
from generate_test_code import write_parameters, gen_suite_dep_checks
from generate_test_code import gen_from_test_data
class GenDep(TestCase):
"""
Test suite for function gen_dep()
"""
def test_dependencies_list(self):
"""
Test that gen_dep() correctly creates dependencies for given
dependency list.
:return:
"""
dependencies = ['DEP1', 'DEP2']
dep_start, dep_end = gen_dependencies(dependencies)
preprocessor1, preprocessor2 = dep_start.splitlines()
endif1, endif2 = dep_end.splitlines()
self.assertEqual(preprocessor1, '#if defined(DEP1)',
'Preprocessor generated incorrectly')
self.assertEqual(preprocessor2, '#if defined(DEP2)',
'Preprocessor generated incorrectly')
self.assertEqual(endif1, '#endif /* DEP2 */',
'Preprocessor generated incorrectly')
self.assertEqual(endif2, '#endif /* DEP1 */',
'Preprocessor generated incorrectly')
def test_disabled_dependencies_list(self):
"""
Test that gen_dep() correctly creates dependencies for given
dependency list.
:return:
"""
dependencies = ['!DEP1', '!DEP2']
dep_start, dep_end = gen_dependencies(dependencies)
preprocessor1, preprocessor2 = dep_start.splitlines()
endif1, endif2 = dep_end.splitlines()
self.assertEqual(preprocessor1, '#if !defined(DEP1)',
'Preprocessor generated incorrectly')
self.assertEqual(preprocessor2, '#if !defined(DEP2)',
'Preprocessor generated incorrectly')
self.assertEqual(endif1, '#endif /* !DEP2 */',
'Preprocessor generated incorrectly')
self.assertEqual(endif2, '#endif /* !DEP1 */',
'Preprocessor generated incorrectly')
def test_mixed_dependencies_list(self):
"""
Test that gen_dep() correctly creates dependencies for given
dependency list.
:return:
"""
dependencies = ['!DEP1', 'DEP2']
dep_start, dep_end = gen_dependencies(dependencies)
preprocessor1, preprocessor2 = dep_start.splitlines()
endif1, endif2 = dep_end.splitlines()
self.assertEqual(preprocessor1, '#if !defined(DEP1)',
'Preprocessor generated incorrectly')
self.assertEqual(preprocessor2, '#if defined(DEP2)',
'Preprocessor generated incorrectly')
self.assertEqual(endif1, '#endif /* DEP2 */',
'Preprocessor generated incorrectly')
self.assertEqual(endif2, '#endif /* !DEP1 */',
'Preprocessor generated incorrectly')
def test_empty_dependencies_list(self):
"""
Test that gen_dep() correctly creates dependencies for given
dependency list.
:return:
"""
dependencies = []
dep_start, dep_end = gen_dependencies(dependencies)
self.assertEqual(dep_start, '', 'Preprocessor generated incorrectly')
self.assertEqual(dep_end, '', 'Preprocessor generated incorrectly')
def test_large_dependencies_list(self):
"""
Test that gen_dep() correctly creates dependencies for given
dependency list.
:return:
"""
dependencies = []
count = 10
for i in range(count):
dependencies.append('DEP%d' % i)
dep_start, dep_end = gen_dependencies(dependencies)
self.assertEqual(len(dep_start.splitlines()), count,
'Preprocessor generated incorrectly')
self.assertEqual(len(dep_end.splitlines()), count,
'Preprocessor generated incorrectly')
class GenDepOneLine(TestCase):
"""
Test Suite for testing gen_dependencies_one_line()
"""
def test_dependencies_list(self):
"""
Test that gen_dep() correctly creates dependencies for given
dependency list.
:return:
"""
dependencies = ['DEP1', 'DEP2']
dep_str = gen_dependencies_one_line(dependencies)
self.assertEqual(dep_str, '#if defined(DEP1) && defined(DEP2)',
'Preprocessor generated incorrectly')
def test_disabled_dependencies_list(self):
"""
Test that gen_dep() correctly creates dependencies for given
dependency list.
:return:
"""
dependencies = ['!DEP1', '!DEP2']
dep_str = gen_dependencies_one_line(dependencies)
self.assertEqual(dep_str, '#if !defined(DEP1) && !defined(DEP2)',
'Preprocessor generated incorrectly')
def test_mixed_dependencies_list(self):
"""
Test that gen_dep() correctly creates dependencies for given
dependency list.
:return:
"""
dependencies = ['!DEP1', 'DEP2']
dep_str = gen_dependencies_one_line(dependencies)
self.assertEqual(dep_str, '#if !defined(DEP1) && defined(DEP2)',
'Preprocessor generated incorrectly')
def test_empty_dependencies_list(self):
"""
Test that gen_dep() correctly creates dependencies for given
dependency list.
:return:
"""
dependencies = []
dep_str = gen_dependencies_one_line(dependencies)
self.assertEqual(dep_str, '', 'Preprocessor generated incorrectly')
def test_large_dependencies_list(self):
"""
Test that gen_dep() correctly creates dependencies for given
dependency list.
:return:
"""
dependencies = []
count = 10
for i in range(count):
dependencies.append('DEP%d' % i)
dep_str = gen_dependencies_one_line(dependencies)
expected = '#if ' + ' && '.join(['defined(%s)' %
x for x in dependencies])
self.assertEqual(dep_str, expected,
'Preprocessor generated incorrectly')
class GenFunctionWrapper(TestCase):
"""
Test Suite for testing gen_function_wrapper()
"""
def test_params_unpack(self):
"""
Test that params are properly unpacked in the function call.
:return:
"""
code = gen_function_wrapper('test_a', '', ('a', 'b', 'c', 'd'))
expected = '''
void test_a_wrapper( void ** params )
{
test_a( a, b, c, d );
}
'''
self.assertEqual(code, expected)
def test_local(self):
"""
Test that params are properly unpacked in the function call.
:return:
"""
code = gen_function_wrapper('test_a',
'int x = 1;', ('x', 'b', 'c', 'd'))
expected = '''
void test_a_wrapper( void ** params )
{
int x = 1;
test_a( x, b, c, d );
}
'''
self.assertEqual(code, expected)
def test_empty_params(self):
"""
Test that params are properly unpacked in the function call.
:return:
"""
code = gen_function_wrapper('test_a', '', ())
expected = '''
void test_a_wrapper( void ** params )
{
(void)params;
test_a( );
}
'''
self.assertEqual(code, expected)
class GenDispatch(TestCase):
"""
Test suite for testing gen_dispatch()
"""
def test_dispatch(self):
"""
Test that dispatch table entry is generated correctly.
:return:
"""
code = gen_dispatch('test_a', ['DEP1', 'DEP2'])
expected = '''
#if defined(DEP1) && defined(DEP2)
test_a_wrapper,
#else
NULL,
#endif
'''
self.assertEqual(code, expected)
def test_empty_dependencies(self):
"""
Test empty dependency list.
:return:
"""
code = gen_dispatch('test_a', [])
expected = '''
test_a_wrapper,
'''
self.assertEqual(code, expected)
class StringIOWrapper(StringIO, object):
"""
file like class to mock file object in tests.
"""
def __init__(self, file_name, data, line_no=0):
"""
Init file handle.
:param file_name:
:param data:
:param line_no:
"""
super(StringIOWrapper, self).__init__(data)
self.line_no = line_no
self.name = file_name
def next(self):
"""
Iterator method. This method overrides base class's
next method and extends the next method to count the line
numbers as each line is read.
:return: Line read from file.
"""
parent = super(StringIOWrapper, self)
if getattr(parent, 'next', None):
# Python 2
line = parent.next()
else:
# Python 3
line = parent.__next__()
return line
# Python 3
__next__ = next
def readline(self, length=0):
"""
Wrap the base class readline.
:param length:
:return:
"""
line = super(StringIOWrapper, self).readline()
if line is not None:
self.line_no += 1
return line
class ParseUntilPattern(TestCase):
"""
Test Suite for testing parse_until_pattern().
"""
def test_suite_headers(self):
"""
Test that suite headers are parsed correctly.
:return:
"""
data = '''#include "mbedtls/ecp.h"
#define ECP_PF_UNKNOWN -1
/* END_HEADER */
'''
expected = '''#line 1 "test_suite_ut.function"
#include "mbedtls/ecp.h"
#define ECP_PF_UNKNOWN -1
'''
stream = StringIOWrapper('test_suite_ut.function', data, line_no=0)
headers = parse_until_pattern(stream, END_HEADER_REGEX)
self.assertEqual(headers, expected)
def test_line_no(self):
"""
Test that #line is set to correct line no. in source .function file.
:return:
"""
data = '''#include "mbedtls/ecp.h"
#define ECP_PF_UNKNOWN -1
/* END_HEADER */
'''
offset_line_no = 5
expected = '''#line %d "test_suite_ut.function"
#include "mbedtls/ecp.h"
#define ECP_PF_UNKNOWN -1
''' % (offset_line_no + 1)
stream = StringIOWrapper('test_suite_ut.function', data,
offset_line_no)
headers = parse_until_pattern(stream, END_HEADER_REGEX)
self.assertEqual(headers, expected)
def test_no_end_header_comment(self):
"""
Test that InvalidFileFormat is raised when end header comment is
missing.
:return:
"""
data = '''#include "mbedtls/ecp.h"
#define ECP_PF_UNKNOWN -1
'''
stream = StringIOWrapper('test_suite_ut.function', data)
self.assertRaises(GeneratorInputError, parse_until_pattern, stream,
END_HEADER_REGEX)
class ParseSuiteDependencies(TestCase):
"""
Test Suite for testing parse_suite_dependencies().
"""
def test_suite_dependencies(self):
"""
:return:
"""
data = '''
* depends_on:MBEDTLS_ECP_C
* END_DEPENDENCIES
*/
'''
expected = ['MBEDTLS_ECP_C']
stream = StringIOWrapper('test_suite_ut.function', data)
dependencies = parse_suite_dependencies(stream)
self.assertEqual(dependencies, expected)
def test_no_end_dep_comment(self):
"""
Test that InvalidFileFormat is raised when end dep comment is missing.
:return:
"""
data = '''
* depends_on:MBEDTLS_ECP_C
'''
stream = StringIOWrapper('test_suite_ut.function', data)
self.assertRaises(GeneratorInputError, parse_suite_dependencies,
stream)
def test_dependencies_split(self):
"""
Test that InvalidFileFormat is raised when end dep comment is missing.
:return:
"""
data = '''
* depends_on:MBEDTLS_ECP_C:A:B: C : D :F : G: !H
* END_DEPENDENCIES
*/
'''
expected = ['MBEDTLS_ECP_C', 'A', 'B', 'C', 'D', 'F', 'G', '!H']
stream = StringIOWrapper('test_suite_ut.function', data)
dependencies = parse_suite_dependencies(stream)
self.assertEqual(dependencies, expected)
class ParseFuncDependencies(TestCase):
"""
Test Suite for testing parse_function_dependencies()
"""
def test_function_dependencies(self):
"""
Test that parse_function_dependencies() correctly parses function
dependencies.
:return:
"""
line = '/* BEGIN_CASE ' \
'depends_on:MBEDTLS_ENTROPY_NV_SEED:MBEDTLS_FS_IO */'
expected = ['MBEDTLS_ENTROPY_NV_SEED', 'MBEDTLS_FS_IO']
dependencies = parse_function_dependencies(line)
self.assertEqual(dependencies, expected)
def test_no_dependencies(self):
"""
Test that parse_function_dependencies() correctly parses function
dependencies.
:return:
"""
line = '/* BEGIN_CASE */'
dependencies = parse_function_dependencies(line)
self.assertEqual(dependencies, [])
def test_tolerance(self):
"""
Test that parse_function_dependencies() correctly parses function
dependencies.
:return:
"""
line = '/* BEGIN_CASE depends_on:MBEDTLS_FS_IO: A : !B:C : F*/'
dependencies = parse_function_dependencies(line)
self.assertEqual(dependencies, ['MBEDTLS_FS_IO', 'A', '!B', 'C', 'F'])
class ParseFuncSignature(TestCase):
"""
Test Suite for parse_function_arguments().
"""
def test_int_and_char_params(self):
"""
Test int and char parameters parsing
:return:
"""
line = 'void entropy_threshold( char * a, int b, int result )'
args, local, arg_dispatch = parse_function_arguments(line)
self.assertEqual(args, ['char*', 'int', 'int'])
self.assertEqual(local, '')
self.assertEqual(arg_dispatch, ['(char *) params[0]',
'*( (int *) params[1] )',
'*( (int *) params[2] )'])
def test_hex_params(self):
"""
Test hex parameters parsing
:return:
"""
line = 'void entropy_threshold( char * a, data_t * h, int result )'
args, local, arg_dispatch = parse_function_arguments(line)
self.assertEqual(args, ['char*', 'hex', 'int'])
self.assertEqual(local,
' data_t data1 = {(uint8_t *) params[1], '
'*( (uint32_t *) params[2] )};\n')
self.assertEqual(arg_dispatch, ['(char *) params[0]',
'&data1',
'*( (int *) params[3] )'])
def test_unsupported_arg(self):
"""
Test unsupported arguments (not among int, char * and data_t)
:return:
"""
line = 'void entropy_threshold( char * a, data_t * h, char result )'
self.assertRaises(ValueError, parse_function_arguments, line)
def test_no_params(self):
"""
Test no parameters.
:return:
"""
line = 'void entropy_threshold()'
args, local, arg_dispatch = parse_function_arguments(line)
self.assertEqual(args, [])
self.assertEqual(local, '')
self.assertEqual(arg_dispatch, [])
class ParseFunctionCode(TestCase):
"""
Test suite for testing parse_function_code()
"""
def assert_raises_regex(self, exp, regex, func, *args):
"""
Python 2 & 3 portable wrapper of assertRaisesRegex(p)? function.
:param exp: Exception type expected to be raised by cb.
:param regex: Expected exception message
:param func: callable object under test
:param args: variable positional arguments
"""
parent = super(ParseFunctionCode, self)
# Pylint does not appreciate that the super method called
# conditionally can be available in other Python version
# then that of Pylint.
# Workaround is to call the method via getattr.
# Pylint ignores that the method got via getattr is
# conditionally executed. Method has to be a callable.
# Hence, using a dummy callable for getattr default.
dummy = lambda *x: None
# First Python 3 assertRaisesRegex is checked, since Python 2
# assertRaisesRegexp is also available in Python 3 but is
# marked deprecated.
for name in ('assertRaisesRegex', 'assertRaisesRegexp'):
method = getattr(parent, name, dummy)
if method is not dummy:
method(exp, regex, func, *args)
break
else:
raise AttributeError(" 'ParseFunctionCode' object has no attribute"
" 'assertRaisesRegex' or 'assertRaisesRegexp'"
)
def test_no_function(self):
"""
Test no test function found.
:return:
"""
data = '''
No
test
function
'''
stream = StringIOWrapper('test_suite_ut.function', data)
err_msg = 'file: test_suite_ut.function - Test functions not found!'
self.assert_raises_regex(GeneratorInputError, err_msg,
parse_function_code, stream, [], [])
def test_no_end_case_comment(self):
"""
Test missing end case.
:return:
"""
data = '''
void test_func()
{
}
'''
stream = StringIOWrapper('test_suite_ut.function', data)
err_msg = r'file: test_suite_ut.function - '\
'end case pattern .*? not found!'
self.assert_raises_regex(GeneratorInputError, err_msg,
parse_function_code, stream, [], [])
@patch("generate_test_code.parse_function_arguments")
def test_function_called(self,
parse_function_arguments_mock):
"""
Test parse_function_code()
:return:
"""
parse_function_arguments_mock.return_value = ([], '', [])
data = '''
void test_func()
{
}
'''
stream = StringIOWrapper('test_suite_ut.function', data)
self.assertRaises(GeneratorInputError, parse_function_code,
stream, [], [])
self.assertTrue(parse_function_arguments_mock.called)
parse_function_arguments_mock.assert_called_with('void test_func()\n')
@patch("generate_test_code.gen_dispatch")
@patch("generate_test_code.gen_dependencies")
@patch("generate_test_code.gen_function_wrapper")
@patch("generate_test_code.parse_function_arguments")
def test_return(self, parse_function_arguments_mock,
gen_function_wrapper_mock,
gen_dependencies_mock,
gen_dispatch_mock):
"""
Test generated code.
:return:
"""
parse_function_arguments_mock.return_value = ([], '', [])
gen_function_wrapper_mock.return_value = ''
gen_dependencies_mock.side_effect = gen_dependencies
gen_dispatch_mock.side_effect = gen_dispatch
data = '''
void func()
{
ba ba black sheep
have you any wool
}
/* END_CASE */
'''
stream = StringIOWrapper('test_suite_ut.function', data)
name, arg, code, dispatch_code = parse_function_code(stream, [], [])
self.assertTrue(parse_function_arguments_mock.called)
parse_function_arguments_mock.assert_called_with('void func()\n')
gen_function_wrapper_mock.assert_called_with('test_func', '', [])
self.assertEqual(name, 'test_func')
self.assertEqual(arg, [])
expected = '''#line 1 "test_suite_ut.function"
void test_func()
{
ba ba black sheep
have you any wool
exit:
;
}
'''
self.assertEqual(code, expected)
self.assertEqual(dispatch_code, "\n test_func_wrapper,\n")
@patch("generate_test_code.gen_dispatch")
@patch("generate_test_code.gen_dependencies")
@patch("generate_test_code.gen_function_wrapper")
@patch("generate_test_code.parse_function_arguments")
def test_with_exit_label(self, parse_function_arguments_mock,
gen_function_wrapper_mock,
gen_dependencies_mock,
gen_dispatch_mock):
"""
Test when exit label is present.
:return:
"""
parse_function_arguments_mock.return_value = ([], '', [])
gen_function_wrapper_mock.return_value = ''
gen_dependencies_mock.side_effect = gen_dependencies
gen_dispatch_mock.side_effect = gen_dispatch
data = '''
void func()
{
ba ba black sheep
have you any wool
exit:
yes sir yes sir
3 bags full
}
/* END_CASE */
'''
stream = StringIOWrapper('test_suite_ut.function', data)
_, _, code, _ = parse_function_code(stream, [], [])
expected = '''#line 1 "test_suite_ut.function"
void test_func()
{
ba ba black sheep
have you any wool
exit:
yes sir yes sir
3 bags full
}
'''
self.assertEqual(code, expected)
def test_non_void_function(self):
"""
Test invalid signature (non void).
:return:
"""
data = 'int entropy_threshold( char * a, data_t * h, int result )'
err_msg = 'file: test_suite_ut.function - Test functions not found!'
stream = StringIOWrapper('test_suite_ut.function', data)
self.assert_raises_regex(GeneratorInputError, err_msg,
parse_function_code, stream, [], [])
@patch("generate_test_code.gen_dispatch")
@patch("generate_test_code.gen_dependencies")
@patch("generate_test_code.gen_function_wrapper")
@patch("generate_test_code.parse_function_arguments")
def test_functio_name_on_newline(self, parse_function_arguments_mock,
gen_function_wrapper_mock,
gen_dependencies_mock,
gen_dispatch_mock):
"""
Test when exit label is present.
:return:
"""
parse_function_arguments_mock.return_value = ([], '', [])
gen_function_wrapper_mock.return_value = ''
gen_dependencies_mock.side_effect = gen_dependencies
gen_dispatch_mock.side_effect = gen_dispatch
data = '''
void
func()
{
ba ba black sheep
have you any wool
exit:
yes sir yes sir
3 bags full
}
/* END_CASE */
'''
stream = StringIOWrapper('test_suite_ut.function', data)
_, _, code, _ = parse_function_code(stream, [], [])
expected = '''#line 1 "test_suite_ut.function"
void
test_func()
{
ba ba black sheep
have you any wool
exit:
yes sir yes sir
3 bags full
}
'''
self.assertEqual(code, expected)
class ParseFunction(TestCase):
"""
Test Suite for testing parse_functions()
"""
@patch("generate_test_code.parse_until_pattern")
def test_begin_header(self, parse_until_pattern_mock):
"""
Test that begin header is checked and parse_until_pattern() is called.
:return:
"""
def stop(*_unused):
"""Stop when parse_until_pattern is called."""
raise Exception
parse_until_pattern_mock.side_effect = stop
data = '''/* BEGIN_HEADER */
#include "mbedtls/ecp.h"
#define ECP_PF_UNKNOWN -1
/* END_HEADER */
'''
stream = StringIOWrapper('test_suite_ut.function', data)
self.assertRaises(Exception, parse_functions, stream)
parse_until_pattern_mock.assert_called_with(stream, END_HEADER_REGEX)
self.assertEqual(stream.line_no, 1)
@patch("generate_test_code.parse_until_pattern")
def test_begin_helper(self, parse_until_pattern_mock):
"""
Test that begin helper is checked and parse_until_pattern() is called.
:return:
"""
def stop(*_unused):
"""Stop when parse_until_pattern is called."""
raise Exception
parse_until_pattern_mock.side_effect = stop
data = '''/* BEGIN_SUITE_HELPERS */
void print_hello_world()
{
printf("Hello World!\n");
}
/* END_SUITE_HELPERS */
'''
stream = StringIOWrapper('test_suite_ut.function', data)
self.assertRaises(Exception, parse_functions, stream)
parse_until_pattern_mock.assert_called_with(stream,
END_SUITE_HELPERS_REGEX)
self.assertEqual(stream.line_no, 1)
@patch("generate_test_code.parse_suite_dependencies")
def test_begin_dep(self, parse_suite_dependencies_mock):
"""
Test that begin dep is checked and parse_suite_dependencies() is
called.
:return:
"""
def stop(*_unused):
"""Stop when parse_until_pattern is called."""
raise Exception
parse_suite_dependencies_mock.side_effect = stop
data = '''/* BEGIN_DEPENDENCIES
* depends_on:MBEDTLS_ECP_C
* END_DEPENDENCIES
*/
'''
stream = StringIOWrapper('test_suite_ut.function', data)
self.assertRaises(Exception, parse_functions, stream)
parse_suite_dependencies_mock.assert_called_with(stream)
self.assertEqual(stream.line_no, 1)
@patch("generate_test_code.parse_function_dependencies")
def test_begin_function_dep(self, func_mock):
"""
Test that begin dep is checked and parse_function_dependencies() is
called.
:return:
"""
def stop(*_unused):
"""Stop when parse_until_pattern is called."""
raise Exception
func_mock.side_effect = stop
dependencies_str = '/* BEGIN_CASE ' \
'depends_on:MBEDTLS_ENTROPY_NV_SEED:MBEDTLS_FS_IO */\n'
data = '''%svoid test_func()
{
}
''' % dependencies_str
stream = StringIOWrapper('test_suite_ut.function', data)
self.assertRaises(Exception, parse_functions, stream)
func_mock.assert_called_with(dependencies_str)
self.assertEqual(stream.line_no, 1)
@patch("generate_test_code.parse_function_code")
@patch("generate_test_code.parse_function_dependencies")
def test_return(self, func_mock1, func_mock2):
"""
Test that begin case is checked and parse_function_code() is called.
:return:
"""
func_mock1.return_value = []
in_func_code = '''void test_func()
{
}
'''
func_dispatch = '''
test_func_wrapper,
'''
func_mock2.return_value = 'test_func', [],\
in_func_code, func_dispatch
dependencies_str = '/* BEGIN_CASE ' \
'depends_on:MBEDTLS_ENTROPY_NV_SEED:MBEDTLS_FS_IO */\n'
data = '''%svoid test_func()
{
}
''' % dependencies_str
stream = StringIOWrapper('test_suite_ut.function', data)
suite_dependencies, dispatch_code, func_code, func_info = \
parse_functions(stream)
func_mock1.assert_called_with(dependencies_str)
func_mock2.assert_called_with(stream, [], [])
self.assertEqual(stream.line_no, 5)
self.assertEqual(suite_dependencies, [])
expected_dispatch_code = '''/* Function Id: 0 */
test_func_wrapper,
'''
self.assertEqual(dispatch_code, expected_dispatch_code)
self.assertEqual(func_code, in_func_code)
self.assertEqual(func_info, {'test_func': (0, [])})
def test_parsing(self):
"""
Test case parsing.
:return:
"""
data = '''/* BEGIN_HEADER */
#include "mbedtls/ecp.h"
#define ECP_PF_UNKNOWN -1
/* END_HEADER */
/* BEGIN_DEPENDENCIES
* depends_on:MBEDTLS_ECP_C
* END_DEPENDENCIES
*/
/* BEGIN_CASE depends_on:MBEDTLS_ENTROPY_NV_SEED:MBEDTLS_FS_IO */
void func1()
{
}
/* END_CASE */
/* BEGIN_CASE depends_on:MBEDTLS_ENTROPY_NV_SEED:MBEDTLS_FS_IO */
void func2()
{
}
/* END_CASE */
'''
stream = StringIOWrapper('test_suite_ut.function', data)
suite_dependencies, dispatch_code, func_code, func_info = \
parse_functions(stream)
self.assertEqual(stream.line_no, 23)
self.assertEqual(suite_dependencies, ['MBEDTLS_ECP_C'])
expected_dispatch_code = '''/* Function Id: 0 */
#if defined(MBEDTLS_ECP_C) && defined(MBEDTLS_ENTROPY_NV_SEED) && defined(MBEDTLS_FS_IO)
test_func1_wrapper,
#else
NULL,
#endif
/* Function Id: 1 */
#if defined(MBEDTLS_ECP_C) && defined(MBEDTLS_ENTROPY_NV_SEED) && defined(MBEDTLS_FS_IO)
test_func2_wrapper,
#else
NULL,
#endif
'''
self.assertEqual(dispatch_code, expected_dispatch_code)
expected_func_code = '''#if defined(MBEDTLS_ECP_C)
#line 2 "test_suite_ut.function"
#include "mbedtls/ecp.h"
#define ECP_PF_UNKNOWN -1
#if defined(MBEDTLS_ENTROPY_NV_SEED)
#if defined(MBEDTLS_FS_IO)
#line 13 "test_suite_ut.function"
void test_func1()
{
exit:
;
}
void test_func1_wrapper( void ** params )
{
(void)params;
test_func1( );
}
#endif /* MBEDTLS_FS_IO */
#endif /* MBEDTLS_ENTROPY_NV_SEED */
#if defined(MBEDTLS_ENTROPY_NV_SEED)
#if defined(MBEDTLS_FS_IO)
#line 19 "test_suite_ut.function"
void test_func2()
{
exit:
;
}
void test_func2_wrapper( void ** params )
{
(void)params;
test_func2( );
}
#endif /* MBEDTLS_FS_IO */
#endif /* MBEDTLS_ENTROPY_NV_SEED */
#endif /* MBEDTLS_ECP_C */
'''
self.assertEqual(func_code, expected_func_code)
self.assertEqual(func_info, {'test_func1': (0, []),
'test_func2': (1, [])})
def test_same_function_name(self):
"""
Test name conflict.
:return:
"""
data = '''/* BEGIN_HEADER */
#include "mbedtls/ecp.h"
#define ECP_PF_UNKNOWN -1
/* END_HEADER */
/* BEGIN_DEPENDENCIES
* depends_on:MBEDTLS_ECP_C
* END_DEPENDENCIES
*/
/* BEGIN_CASE depends_on:MBEDTLS_ENTROPY_NV_SEED:MBEDTLS_FS_IO */
void func()
{
}
/* END_CASE */
/* BEGIN_CASE depends_on:MBEDTLS_ENTROPY_NV_SEED:MBEDTLS_FS_IO */
void func()
{
}
/* END_CASE */
'''
stream = StringIOWrapper('test_suite_ut.function', data)
self.assertRaises(GeneratorInputError, parse_functions, stream)
class EscapedSplit(TestCase):
"""
Test suite for testing escaped_split().
Note: Since escaped_split() output is used to write back to the
intermediate data file. Any escape characters in the input are
retained in the output.
"""
def test_invalid_input(self):
"""
Test when input split character is not a character.
:return:
"""
self.assertRaises(ValueError, escaped_split, '', 'string')
def test_empty_string(self):
"""
Test empty string input.
:return:
"""
splits = escaped_split('', ':')
self.assertEqual(splits, [])
def test_no_escape(self):
"""
Test with no escape character. The behaviour should be same as
str.split()
:return:
"""
test_str = 'yahoo:google'
splits = escaped_split(test_str, ':')
self.assertEqual(splits, test_str.split(':'))
def test_escaped_input(self):
"""
Test input that has escaped delimiter.
:return:
"""
test_str = r'yahoo\:google:facebook'
splits = escaped_split(test_str, ':')
self.assertEqual(splits, [r'yahoo\:google', 'facebook'])
def test_escaped_escape(self):
"""
Test input that has escaped delimiter.
:return:
"""
test_str = r'yahoo\\:google:facebook'
splits = escaped_split(test_str, ':')
self.assertEqual(splits, [r'yahoo\\', 'google', 'facebook'])
def test_all_at_once(self):
"""
Test input that has escaped delimiter.
:return:
"""
test_str = r'yahoo\\:google:facebook\:instagram\\:bbc\\:wikipedia'
splits = escaped_split(test_str, ':')
self.assertEqual(splits, [r'yahoo\\', r'google',
r'facebook\:instagram\\',
r'bbc\\', r'wikipedia'])
class ParseTestData(TestCase):
"""
Test suite for parse test data.
"""
def test_parser(self):
"""
Test that tests are parsed correctly from data file.
:return:
"""
data = """
Diffie-Hellman full exchange #1
dhm_do_dhm:10:"23":10:"5"
Diffie-Hellman full exchange #2
dhm_do_dhm:10:"93450983094850938450983409623":10:"9345098304850938450983409622"
Diffie-Hellman full exchange #3
dhm_do_dhm:10:"9345098382739712938719287391879381271":10:"9345098792137312973297123912791271"
Diffie-Hellman selftest
dhm_selftest:
"""
stream = StringIOWrapper('test_suite_ut.function', data)
tests = [(name, test_function, dependencies, args)
for name, test_function, dependencies, args in
parse_test_data(stream)]
test1, test2, test3, test4 = tests
self.assertEqual(test1[0], 'Diffie-Hellman full exchange #1')
self.assertEqual(test1[1], 'dhm_do_dhm')
self.assertEqual(test1[2], [])
self.assertEqual(test1[3], ['10', '"23"', '10', '"5"'])
self.assertEqual(test2[0], 'Diffie-Hellman full exchange #2')
self.assertEqual(test2[1], 'dhm_do_dhm')
self.assertEqual(test2[2], [])
self.assertEqual(test2[3], ['10', '"93450983094850938450983409623"',
'10', '"9345098304850938450983409622"'])
self.assertEqual(test3[0], 'Diffie-Hellman full exchange #3')
self.assertEqual(test3[1], 'dhm_do_dhm')
self.assertEqual(test3[2], [])
self.assertEqual(test3[3], ['10',
'"9345098382739712938719287391879381271"',
'10',
'"9345098792137312973297123912791271"'])
self.assertEqual(test4[0], 'Diffie-Hellman selftest')
self.assertEqual(test4[1], 'dhm_selftest')
self.assertEqual(test4[2], [])
self.assertEqual(test4[3], [])
def test_with_dependencies(self):
"""
Test that tests with dependencies are parsed.
:return:
"""
data = """
Diffie-Hellman full exchange #1
depends_on:YAHOO
dhm_do_dhm:10:"23":10:"5"
Diffie-Hellman full exchange #2
dhm_do_dhm:10:"93450983094850938450983409623":10:"9345098304850938450983409622"
"""
stream = StringIOWrapper('test_suite_ut.function', data)
tests = [(name, function_name, dependencies, args)
for name, function_name, dependencies, args in
parse_test_data(stream)]
test1, test2 = tests
self.assertEqual(test1[0], 'Diffie-Hellman full exchange #1')
self.assertEqual(test1[1], 'dhm_do_dhm')
self.assertEqual(test1[2], ['YAHOO'])
self.assertEqual(test1[3], ['10', '"23"', '10', '"5"'])
self.assertEqual(test2[0], 'Diffie-Hellman full exchange #2')
self.assertEqual(test2[1], 'dhm_do_dhm')
self.assertEqual(test2[2], [])
self.assertEqual(test2[3], ['10', '"93450983094850938450983409623"',
'10', '"9345098304850938450983409622"'])
def test_no_args(self):
"""
Test GeneratorInputError is raised when test function name and
args line is missing.
:return:
"""
data = """
Diffie-Hellman full exchange #1
depends_on:YAHOO
Diffie-Hellman full exchange #2
dhm_do_dhm:10:"93450983094850938450983409623":10:"9345098304850938450983409622"
"""
stream = StringIOWrapper('test_suite_ut.function', data)
err = None
try:
for _, _, _, _ in parse_test_data(stream):
pass
except GeneratorInputError as err:
self.assertEqual(type(err), GeneratorInputError)
def test_incomplete_data(self):
"""
Test GeneratorInputError is raised when test function name
and args line is missing.
:return:
"""
data = """
Diffie-Hellman full exchange #1
depends_on:YAHOO
"""
stream = StringIOWrapper('test_suite_ut.function', data)
err = None
try:
for _, _, _, _ in parse_test_data(stream):
pass
except GeneratorInputError as err:
self.assertEqual(type(err), GeneratorInputError)
class GenDepCheck(TestCase):
"""
Test suite for gen_dep_check(). It is assumed this function is
called with valid inputs.
"""
def test_gen_dep_check(self):
"""
Test that dependency check code generated correctly.
:return:
"""
expected = """
case 5:
{
#if defined(YAHOO)
ret = DEPENDENCY_SUPPORTED;
#else
ret = DEPENDENCY_NOT_SUPPORTED;
#endif
}
break;"""
out = gen_dep_check(5, 'YAHOO')
self.assertEqual(out, expected)
def test_not_defined_dependency(self):
"""
Test dependency with !.
:return:
"""
expected = """
case 5:
{
#if !defined(YAHOO)
ret = DEPENDENCY_SUPPORTED;
#else
ret = DEPENDENCY_NOT_SUPPORTED;
#endif
}
break;"""
out = gen_dep_check(5, '!YAHOO')
self.assertEqual(out, expected)
def test_empty_dependency(self):
"""
Test invalid dependency input.
:return:
"""
self.assertRaises(GeneratorInputError, gen_dep_check, 5, '!')
def test_negative_dep_id(self):
"""
Test invalid dependency input.
:return:
"""
self.assertRaises(GeneratorInputError, gen_dep_check, -1, 'YAHOO')
class GenExpCheck(TestCase):
"""
Test suite for gen_expression_check(). It is assumed this function
is called with valid inputs.
"""
def test_gen_exp_check(self):
"""
Test that expression check code generated correctly.
:return:
"""
expected = """
case 5:
{
*out_value = YAHOO;
}
break;"""
out = gen_expression_check(5, 'YAHOO')
self.assertEqual(out, expected)
def test_invalid_expression(self):
"""
Test invalid expression input.
:return:
"""
self.assertRaises(GeneratorInputError, gen_expression_check, 5, '')
def test_negative_exp_id(self):
"""
Test invalid expression id.
:return:
"""
self.assertRaises(GeneratorInputError, gen_expression_check,
-1, 'YAHOO')
class WriteDependencies(TestCase):
"""
Test suite for testing write_dependencies.
"""
def test_no_test_dependencies(self):
"""
Test when test dependencies input is empty.
:return:
"""
stream = StringIOWrapper('test_suite_ut.data', '')
unique_dependencies = []
dep_check_code = write_dependencies(stream, [], unique_dependencies)
self.assertEqual(dep_check_code, '')
self.assertEqual(len(unique_dependencies), 0)
self.assertEqual(stream.getvalue(), '')
def test_unique_dep_ids(self):
"""
:return:
"""
stream = StringIOWrapper('test_suite_ut.data', '')
unique_dependencies = []
dep_check_code = write_dependencies(stream, ['DEP3', 'DEP2', 'DEP1'],
unique_dependencies)
expect_dep_check_code = '''
case 0:
{
#if defined(DEP3)
ret = DEPENDENCY_SUPPORTED;
#else
ret = DEPENDENCY_NOT_SUPPORTED;
#endif
}
break;
case 1:
{
#if defined(DEP2)
ret = DEPENDENCY_SUPPORTED;
#else
ret = DEPENDENCY_NOT_SUPPORTED;
#endif
}
break;
case 2:
{
#if defined(DEP1)
ret = DEPENDENCY_SUPPORTED;
#else
ret = DEPENDENCY_NOT_SUPPORTED;
#endif
}
break;'''
self.assertEqual(dep_check_code, expect_dep_check_code)
self.assertEqual(len(unique_dependencies), 3)
self.assertEqual(stream.getvalue(), 'depends_on:0:1:2\n')
def test_dep_id_repeat(self):
"""
:return:
"""
stream = StringIOWrapper('test_suite_ut.data', '')
unique_dependencies = []
dep_check_code = ''
dep_check_code += write_dependencies(stream, ['DEP3', 'DEP2'],
unique_dependencies)
dep_check_code += write_dependencies(stream, ['DEP2', 'DEP1'],
unique_dependencies)
dep_check_code += write_dependencies(stream, ['DEP1', 'DEP3'],
unique_dependencies)
expect_dep_check_code = '''
case 0:
{
#if defined(DEP3)
ret = DEPENDENCY_SUPPORTED;
#else
ret = DEPENDENCY_NOT_SUPPORTED;
#endif
}
break;
case 1:
{
#if defined(DEP2)
ret = DEPENDENCY_SUPPORTED;
#else
ret = DEPENDENCY_NOT_SUPPORTED;
#endif
}
break;
case 2:
{
#if defined(DEP1)
ret = DEPENDENCY_SUPPORTED;
#else
ret = DEPENDENCY_NOT_SUPPORTED;
#endif
}
break;'''
self.assertEqual(dep_check_code, expect_dep_check_code)
self.assertEqual(len(unique_dependencies), 3)
self.assertEqual(stream.getvalue(),
'depends_on:0:1\ndepends_on:1:2\ndepends_on:2:0\n')
class WriteParams(TestCase):
"""
Test Suite for testing write_parameters().
"""
def test_no_params(self):
"""
Test with empty test_args
:return:
"""
stream = StringIOWrapper('test_suite_ut.data', '')
unique_expressions = []
expression_code = write_parameters(stream, [], [], unique_expressions)
self.assertEqual(len(unique_expressions), 0)
self.assertEqual(expression_code, '')
self.assertEqual(stream.getvalue(), '\n')
def test_no_exp_param(self):
"""
Test when there is no macro or expression in the params.
:return:
"""
stream = StringIOWrapper('test_suite_ut.data', '')
unique_expressions = []
expression_code = write_parameters(stream, ['"Yahoo"', '"abcdef00"',
'0'],
['char*', 'hex', 'int'],
unique_expressions)
self.assertEqual(len(unique_expressions), 0)
self.assertEqual(expression_code, '')
self.assertEqual(stream.getvalue(),
':char*:"Yahoo":hex:"abcdef00":int:0\n')
def test_hex_format_int_param(self):
"""
Test int parameter in hex format.
:return:
"""
stream = StringIOWrapper('test_suite_ut.data', '')
unique_expressions = []
expression_code = write_parameters(stream,
['"Yahoo"', '"abcdef00"', '0xAA'],
['char*', 'hex', 'int'],
unique_expressions)
self.assertEqual(len(unique_expressions), 0)
self.assertEqual(expression_code, '')
self.assertEqual(stream.getvalue(),
':char*:"Yahoo":hex:"abcdef00":int:0xAA\n')
def test_with_exp_param(self):
"""
Test when there is macro or expression in the params.
:return:
"""
stream = StringIOWrapper('test_suite_ut.data', '')
unique_expressions = []
expression_code = write_parameters(stream,
['"Yahoo"', '"abcdef00"', '0',
'MACRO1', 'MACRO2', 'MACRO3'],
['char*', 'hex', 'int',
'int', 'int', 'int'],
unique_expressions)
self.assertEqual(len(unique_expressions), 3)
self.assertEqual(unique_expressions, ['MACRO1', 'MACRO2', 'MACRO3'])
expected_expression_code = '''
case 0:
{
*out_value = MACRO1;
}
break;
case 1:
{
*out_value = MACRO2;
}
break;
case 2:
{
*out_value = MACRO3;
}
break;'''
self.assertEqual(expression_code, expected_expression_code)
self.assertEqual(stream.getvalue(),
':char*:"Yahoo":hex:"abcdef00":int:0:exp:0:exp:1'
':exp:2\n')
def test_with_repeat_calls(self):
"""
Test when write_parameter() is called with same macro or expression.
:return:
"""
stream = StringIOWrapper('test_suite_ut.data', '')
unique_expressions = []
expression_code = ''
expression_code += write_parameters(stream,
['"Yahoo"', 'MACRO1', 'MACRO2'],
['char*', 'int', 'int'],
unique_expressions)
expression_code += write_parameters(stream,
['"abcdef00"', 'MACRO2', 'MACRO3'],
['hex', 'int', 'int'],
unique_expressions)
expression_code += write_parameters(stream,
['0', 'MACRO3', 'MACRO1'],
['int', 'int', 'int'],
unique_expressions)
self.assertEqual(len(unique_expressions), 3)
self.assertEqual(unique_expressions, ['MACRO1', 'MACRO2', 'MACRO3'])
expected_expression_code = '''
case 0:
{
*out_value = MACRO1;
}
break;
case 1:
{
*out_value = MACRO2;
}
break;
case 2:
{
*out_value = MACRO3;
}
break;'''
self.assertEqual(expression_code, expected_expression_code)
expected_data_file = ''':char*:"Yahoo":exp:0:exp:1
:hex:"abcdef00":exp:1:exp:2
:int:0:exp:2:exp:0
'''
self.assertEqual(stream.getvalue(), expected_data_file)
class GenTestSuiteDependenciesChecks(TestCase):
"""
Test suite for testing gen_suite_dep_checks()
"""
def test_empty_suite_dependencies(self):
"""
Test with empty suite_dependencies list.
:return:
"""
dep_check_code, expression_code = \
gen_suite_dep_checks([], 'DEP_CHECK_CODE', 'EXPRESSION_CODE')
self.assertEqual(dep_check_code, 'DEP_CHECK_CODE')
self.assertEqual(expression_code, 'EXPRESSION_CODE')
def test_suite_dependencies(self):
"""
Test with suite_dependencies list.
:return:
"""
dep_check_code, expression_code = \
gen_suite_dep_checks(['SUITE_DEP'], 'DEP_CHECK_CODE',
'EXPRESSION_CODE')
expected_dep_check_code = '''
#if defined(SUITE_DEP)
DEP_CHECK_CODE
#endif
'''
expected_expression_code = '''
#if defined(SUITE_DEP)
EXPRESSION_CODE
#endif
'''
self.assertEqual(dep_check_code, expected_dep_check_code)
self.assertEqual(expression_code, expected_expression_code)
def test_no_dep_no_exp(self):
"""
Test when there are no dependency and expression code.
:return:
"""
dep_check_code, expression_code = gen_suite_dep_checks([], '', '')
self.assertEqual(dep_check_code, '')
self.assertEqual(expression_code, '')
class GenFromTestData(TestCase):
"""
Test suite for gen_from_test_data()
"""
@staticmethod
@patch("generate_test_code.write_dependencies")
@patch("generate_test_code.write_parameters")
@patch("generate_test_code.gen_suite_dep_checks")
def test_intermediate_data_file(func_mock1,
write_parameters_mock,
write_dependencies_mock):
"""
Test that intermediate data file is written with expected data.
:return:
"""
data = '''
My test
depends_on:DEP1
func1:0
'''
data_f = StringIOWrapper('test_suite_ut.data', data)
out_data_f = StringIOWrapper('test_suite_ut.datax', '')
func_info = {'test_func1': (1, ('int',))}
suite_dependencies = []
write_parameters_mock.side_effect = write_parameters
write_dependencies_mock.side_effect = write_dependencies
func_mock1.side_effect = gen_suite_dep_checks
gen_from_test_data(data_f, out_data_f, func_info, suite_dependencies)
write_dependencies_mock.assert_called_with(out_data_f,
['DEP1'], ['DEP1'])
write_parameters_mock.assert_called_with(out_data_f, ['0'],
('int',), [])
expected_dep_check_code = '''
case 0:
{
#if defined(DEP1)
ret = DEPENDENCY_SUPPORTED;
#else
ret = DEPENDENCY_NOT_SUPPORTED;
#endif
}
break;'''
func_mock1.assert_called_with(
suite_dependencies, expected_dep_check_code, '')
def test_function_not_found(self):
"""
Test that AssertError is raised when function info in not found.
:return:
"""
data = '''
My test
depends_on:DEP1
func1:0
'''
data_f = StringIOWrapper('test_suite_ut.data', data)
out_data_f = StringIOWrapper('test_suite_ut.datax', '')
func_info = {'test_func2': (1, ('int',))}
suite_dependencies = []
self.assertRaises(GeneratorInputError, gen_from_test_data,
data_f, out_data_f, func_info, suite_dependencies)
def test_different_func_args(self):
"""
Test that AssertError is raised when no. of parameters and
function args differ.
:return:
"""
data = '''
My test
depends_on:DEP1
func1:0
'''
data_f = StringIOWrapper('test_suite_ut.data', data)
out_data_f = StringIOWrapper('test_suite_ut.datax', '')
func_info = {'test_func2': (1, ('int', 'hex'))}
suite_dependencies = []
self.assertRaises(GeneratorInputError, gen_from_test_data, data_f,
out_data_f, func_info, suite_dependencies)
def test_output(self):
"""
Test that intermediate data file is written with expected data.
:return:
"""
data = '''
My test 1
depends_on:DEP1
func1:0:0xfa:MACRO1:MACRO2
My test 2
depends_on:DEP1:DEP2
func2:"yahoo":88:MACRO1
'''
data_f = StringIOWrapper('test_suite_ut.data', data)
out_data_f = StringIOWrapper('test_suite_ut.datax', '')
func_info = {'test_func1': (0, ('int', 'int', 'int', 'int')),
'test_func2': (1, ('char*', 'int', 'int'))}
suite_dependencies = []
dep_check_code, expression_code = \
gen_from_test_data(data_f, out_data_f, func_info,
suite_dependencies)
expected_dep_check_code = '''
case 0:
{
#if defined(DEP1)
ret = DEPENDENCY_SUPPORTED;
#else
ret = DEPENDENCY_NOT_SUPPORTED;
#endif
}
break;
case 1:
{
#if defined(DEP2)
ret = DEPENDENCY_SUPPORTED;
#else
ret = DEPENDENCY_NOT_SUPPORTED;
#endif
}
break;'''
expected_data = '''My test 1
depends_on:0
0:int:0:int:0xfa:exp:0:exp:1
My test 2
depends_on:0:1
1:char*:"yahoo":int:88:exp:0
'''
expected_expression_code = '''
case 0:
{
*out_value = MACRO1;
}
break;
case 1:
{
*out_value = MACRO2;
}
break;'''
self.assertEqual(dep_check_code, expected_dep_check_code)
self.assertEqual(out_data_f.getvalue(), expected_data)
self.assertEqual(expression_code, expected_expression_code)
if __name__ == '__main__':
unittest_main()
| bsd-3-clause |
yoava333/servo | tests/wpt/web-platform-tests/tools/html5lib/html5lib/tests/performance/concatenation.py | 451 | 1145 | from __future__ import absolute_import, division, unicode_literals
def f1():
x = "ABCDEFGHIJKLMNOPQRSTUVWXYZABCDEFGHIJKLMNOPQRSTUVWXYZ"
y = "ABCDEFGHIJKLMNOPQRSTUVWXYZABCDEFGHIJKLMNOPQRSTUVWXYZ"
z = "ABCDEFGHIJKLMNOPQRSTUVWXYZABCDEFGHIJKLMNOPQRSTUVWXYZ"
x += y + z
def f2():
x = "ABCDEFGHIJKLMNOPQRSTUVWXYZABCDEFGHIJKLMNOPQRSTUVWXYZ"
y = "ABCDEFGHIJKLMNOPQRSTUVWXYZABCDEFGHIJKLMNOPQRSTUVWXYZ"
z = "ABCDEFGHIJKLMNOPQRSTUVWXYZABCDEFGHIJKLMNOPQRSTUVWXYZ"
x = x + y + z
def f3():
x = "ABCDEFGHIJKLMNOPQRSTUVWXYZABCDEFGHIJKLMNOPQRSTUVWXYZ"
y = "ABCDEFGHIJKLMNOPQRSTUVWXYZABCDEFGHIJKLMNOPQRSTUVWXYZ"
z = "ABCDEFGHIJKLMNOPQRSTUVWXYZABCDEFGHIJKLMNOPQRSTUVWXYZ"
x = "".join((x, y, z))
def f4():
x = "ABCDEFGHIJKLMNOPQRSTUVWXYZABCDEFGHIJKLMNOPQRSTUVWXYZ"
y = "ABCDEFGHIJKLMNOPQRSTUVWXYZABCDEFGHIJKLMNOPQRSTUVWXYZ"
z = "ABCDEFGHIJKLMNOPQRSTUVWXYZABCDEFGHIJKLMNOPQRSTUVWXYZ"
x = "%s%s%s" % (x, y, z)
import timeit
for x in range(4):
statement = "f%s" % (x + 1)
t = timeit.Timer(statement, "from __main__ import " + statement)
r = t.repeat(3, 1000000)
print(r, min(r))
| mpl-2.0 |
pierreroth64/robotframework-template | scripts/run_acceptancetest.py | 1 | 1743 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Runner script for acceptance testing"""
import os
import re
import sys
import string
from subprocess import call
def rework_args(args):
"""Rework args to be able to exclude list of tags (pybot hack)"""
# first loop to distinguish between previous, --exclude, and next args
prev_args = []
next_args = []
found_next_arg = False
parse_next_args = False
excluded_tags_string = None
for arg in args:
if arg == "--exclude":
found_next_arg = True
elif found_next_arg:
excluded_tags_string = arg
found_next_arg = False
parse_next_args = True
elif parse_next_args:
next_args.append(arg)
else:
prev_args.append(arg)
# Rebuild the final args
final_args = prev_args
if excluded_tags_string is not None:
excluded_tags = excluded_tags_string.split("AND")
for exc in excluded_tags:
final_args.append("--exclude")
final_args.append(exc)
final_args += next_args
return final_args
def run_tests(args):
"""Run pybot command with given args"""
extra_args = ['--removekeywords', 'wuks'] # remove keywords
extra_args += ['--suitestatlevel', '1'] # limit stat size
extra_args += ['--splitlog'] # split logs to limit per-file size
args = rework_args(args=args)
args = extra_args + args
cmd_line = string.join(['pybot'] + args)
print "pybot command line: ", cmd_line
retcode = call(['pybot'] + args , shell=(os.sep == '\\'))
print "pybot command return code was:", retcode
return retcode
if __name__ == '__main__':
sys.exit(run_tests(sys.argv[1:]))
| mit |
ponyorm/pony | pony/thirdparty/compiler/pyassem.py | 2 | 24342 | """A flow graph representation for Python bytecode"""
from __future__ import absolute_import, print_function
from pony.py23compat import imap, items_list
import dis
import types
import sys
from . import misc
from .consts import CO_OPTIMIZED, CO_NEWLOCALS, CO_VARARGS, CO_VARKEYWORDS
class FlowGraph:
def __init__(self):
self.current = self.entry = Block()
self.exit = Block("exit")
self.blocks = misc.Set()
self.blocks.add(self.entry)
self.blocks.add(self.exit)
def startBlock(self, block):
if self._debug:
if self.current:
print("end", repr(self.current))
print(" next", self.current.next)
print(" prev", self.current.prev)
print(" ", self.current.get_children())
print(repr(block))
self.current = block
def nextBlock(self, block=None):
# XXX think we need to specify when there is implicit transfer
# from one block to the next. might be better to represent this
# with explicit JUMP_ABSOLUTE instructions that are optimized
# out when they are unnecessary.
#
# I think this strategy works: each block has a child
# designated as "next" which is returned as the last of the
# children. because the nodes in a graph are emitted in
# reverse post order, the "next" block will always be emitted
# immediately after its parent.
# Worry: maintaining this invariant could be tricky
if block is None:
block = self.newBlock()
# Note: If the current block ends with an unconditional control
# transfer, then it is techically incorrect to add an implicit
# transfer to the block graph. Doing so results in code generation
# for unreachable blocks. That doesn't appear to be very common
# with Python code and since the built-in compiler doesn't optimize
# it out we don't either.
self.current.addNext(block)
self.startBlock(block)
def newBlock(self):
b = Block()
self.blocks.add(b)
return b
def startExitBlock(self):
self.startBlock(self.exit)
_debug = 0
def _enable_debug(self):
self._debug = 1
def _disable_debug(self):
self._debug = 0
def emit(self, *inst):
if self._debug:
print("\t", inst)
if len(inst) == 2 and isinstance(inst[1], Block):
self.current.addOutEdge(inst[1])
self.current.emit(inst)
def getBlocksInOrder(self):
"""Return the blocks in reverse postorder
i.e. each node appears before all of its successors
"""
order = order_blocks(self.entry, self.exit)
return order
def getBlocks(self):
return self.blocks.elements()
def getRoot(self):
"""Return nodes appropriate for use with dominator"""
return self.entry
def getContainedGraphs(self):
l = []
for b in self.getBlocks():
l.extend(b.getContainedGraphs())
return l
def order_blocks(start_block, exit_block):
"""Order blocks so that they are emitted in the right order"""
# Rules:
# - when a block has a next block, the next block must be emitted just after
# - when a block has followers (relative jumps), it must be emitted before
# them
# - all reachable blocks must be emitted
order = []
# Find all the blocks to be emitted.
remaining = set()
todo = [start_block]
while todo:
b = todo.pop()
if b in remaining:
continue
remaining.add(b)
for c in b.get_children():
if c not in remaining:
todo.append(c)
# A block is dominated by another block if that block must be emitted
# before it.
dominators = {}
for b in remaining:
if __debug__ and b.next:
assert b is b.next[0].prev[0], (b, b.next)
# Make sure every block appears in dominators, even if no
# other block must precede it.
dominators.setdefault(b, set())
# preceding blocks dominate following blocks
for c in b.get_followers():
while 1:
dominators.setdefault(c, set()).add(b)
# Any block that has a next pointer leading to c is also
# dominated because the whole chain will be emitted at once.
# Walk backwards and add them all.
if c.prev and c.prev[0] is not b:
c = c.prev[0]
else:
break
def find_next():
# Find a block that can be emitted next.
for b in remaining:
for c in dominators[b]:
if c in remaining:
break # can't emit yet, dominated by a remaining block
else:
return b
assert 0, 'circular dependency, cannot find next block'
b = start_block
while 1:
order.append(b)
remaining.discard(b)
if b.next:
b = b.next[0]
continue
elif b is not exit_block and not b.has_unconditional_transfer():
order.append(exit_block)
if not remaining:
break
b = find_next()
return order
class Block:
_count = 0
def __init__(self, label=''):
self.insts = []
self.outEdges = set()
self.label = label
self.bid = Block._count
self.next = []
self.prev = []
Block._count = Block._count + 1
def __repr__(self):
if self.label:
return "<block %s id=%d>" % (self.label, self.bid)
else:
return "<block id=%d>" % (self.bid)
def __str__(self):
insts = imap(str, self.insts)
return "<block %s %d:\n%s>" % (self.label, self.bid, '\n'.join(insts))
def emit(self, inst):
op = inst[0]
self.insts.append(inst)
def getInstructions(self):
return self.insts
def addOutEdge(self, block):
self.outEdges.add(block)
def addNext(self, block):
self.next.append(block)
assert len(self.next) == 1, list(imap(str, self.next))
block.prev.append(self)
assert len(block.prev) == 1, list(imap(str, block.prev))
_uncond_transfer = ('RETURN_VALUE', 'RAISE_VARARGS',
'JUMP_ABSOLUTE', 'JUMP_FORWARD', 'CONTINUE_LOOP',
)
def has_unconditional_transfer(self):
"""Returns True if there is an unconditional transfer to an other block
at the end of this block. This means there is no risk for the bytecode
executer to go past this block's bytecode."""
try:
op, arg = self.insts[-1]
except (IndexError, ValueError):
return
return op in self._uncond_transfer
def get_children(self):
return list(self.outEdges) + self.next
def get_followers(self):
"""Get the whole list of followers, including the next block."""
followers = set(self.next)
# Blocks that must be emitted *after* this one, because of
# bytecode offsets (e.g. relative jumps) pointing to them.
for inst in self.insts:
if inst[0] in PyFlowGraph.hasjrel:
followers.add(inst[1])
return followers
def getContainedGraphs(self):
"""Return all graphs contained within this block.
For example, a MAKE_FUNCTION block will contain a reference to
the graph for the function body.
"""
contained = []
for inst in self.insts:
if len(inst) == 1:
continue
op = inst[1]
if hasattr(op, 'graph'):
contained.append(op.graph)
return contained
# flags for code objects
# the FlowGraph is transformed in place; it exists in one of these states
RAW = "RAW"
FLAT = "FLAT"
CONV = "CONV"
DONE = "DONE"
class PyFlowGraph(FlowGraph):
super_init = FlowGraph.__init__
def __init__(self, name, filename, args=(), optimized=0, klass=None):
self.super_init()
self.name = name
self.filename = filename
self.docstring = None
self.args = args # XXX
self.argcount = getArgCount(args)
self.klass = klass
if optimized:
self.flags = CO_OPTIMIZED | CO_NEWLOCALS
else:
self.flags = 0
self.consts = []
self.names = []
# Free variables found by the symbol table scan, including
# variables used only in nested scopes, are included here.
self.freevars = []
self.cellvars = []
# The closure list is used to track the order of cell
# variables and free variables in the resulting code object.
# The offsets used by LOAD_CLOSURE/LOAD_DEREF refer to both
# kinds of variables.
self.closure = []
self.varnames = list(args) or []
for i in range(len(self.varnames)):
var = self.varnames[i]
if isinstance(var, TupleArg):
self.varnames[i] = var.getName()
self.stage = RAW
def setDocstring(self, doc):
self.docstring = doc
def setFlag(self, flag):
self.flags = self.flags | flag
if flag == CO_VARARGS:
self.argcount = self.argcount - 1
def checkFlag(self, flag):
if self.flags & flag:
return 1
def setFreeVars(self, names):
self.freevars = list(names)
def setCellVars(self, names):
self.cellvars = names
def getCode(self):
"""Get a Python code object"""
assert self.stage == RAW
self.computeStackDepth()
self.flattenGraph()
assert self.stage == FLAT
self.convertArgs()
assert self.stage == CONV
self.makeByteCode()
assert self.stage == DONE
return self.newCodeObject()
def dump(self, io=None):
if io:
save = sys.stdout
sys.stdout = io
pc = 0
for t in self.insts:
opname = t[0]
if opname == "SET_LINENO":
print()
if len(t) == 1:
print("\t", "%3d" % pc, opname)
pc = pc + 1
else:
print("\t", "%3d" % pc, opname, t[1])
pc = pc + 3
if io:
sys.stdout = save
def computeStackDepth(self):
"""Compute the max stack depth.
Approach is to compute the stack effect of each basic block.
Then find the path through the code with the largest total
effect.
"""
depth = {}
exit = None
for b in self.getBlocks():
depth[b] = findDepth(b.getInstructions())
seen = {}
def max_depth(b, d):
if b in seen:
return d
seen[b] = 1
d = d + depth[b]
children = b.get_children()
if children:
return max([max_depth(c, d) for c in children])
else:
if not b.label == "exit":
return max_depth(self.exit, d)
else:
return d
self.stacksize = max_depth(self.entry, 0)
def flattenGraph(self):
"""Arrange the blocks in order and resolve jumps"""
assert self.stage == RAW
self.insts = insts = []
pc = 0
begin = {}
end = {}
for b in self.getBlocksInOrder():
begin[b] = pc
for inst in b.getInstructions():
insts.append(inst)
if len(inst) == 1:
pc = pc + 1
elif inst[0] != "SET_LINENO":
# arg takes 2 bytes
pc = pc + 3
end[b] = pc
pc = 0
for i in range(len(insts)):
inst = insts[i]
if len(inst) == 1:
pc = pc + 1
elif inst[0] != "SET_LINENO":
pc = pc + 3
opname = inst[0]
if opname in self.hasjrel:
oparg = inst[1]
offset = begin[oparg] - pc
insts[i] = opname, offset
elif opname in self.hasjabs:
insts[i] = opname, begin[inst[1]]
self.stage = FLAT
hasjrel = set()
for i in dis.hasjrel:
hasjrel.add(dis.opname[i])
hasjabs = set()
for i in dis.hasjabs:
hasjabs.add(dis.opname[i])
def convertArgs(self):
"""Convert arguments from symbolic to concrete form"""
assert self.stage == FLAT
self.consts.insert(0, self.docstring)
self.sort_cellvars()
for i in range(len(self.insts)):
t = self.insts[i]
if len(t) == 2:
opname, oparg = t
conv = self._converters.get(opname, None)
if conv:
self.insts[i] = opname, conv(self, oparg)
self.stage = CONV
def sort_cellvars(self):
"""Sort cellvars in the order of varnames and prune from freevars.
"""
cells = {}
for name in self.cellvars:
cells[name] = 1
self.cellvars = [name for name in self.varnames
if name in cells]
for name in self.cellvars:
del cells[name]
self.cellvars = self.cellvars + cells.keys()
self.closure = self.cellvars + self.freevars
def _lookupName(self, name, list):
"""Return index of name in list, appending if necessary
This routine uses a list instead of a dictionary, because a
dictionary can't store two different keys if the keys have the
same value but different types, e.g. 2 and 2L. The compiler
must treat these two separately, so it does an explicit type
comparison before comparing the values.
"""
t = type(name)
for i in range(len(list)):
if t == type(list[i]) and list[i] == name:
return i
end = len(list)
list.append(name)
return end
_converters = {}
def _convert_LOAD_CONST(self, arg):
if hasattr(arg, 'getCode'):
arg = arg.getCode()
return self._lookupName(arg, self.consts)
def _convert_LOAD_FAST(self, arg):
self._lookupName(arg, self.names)
return self._lookupName(arg, self.varnames)
_convert_STORE_FAST = _convert_LOAD_FAST
_convert_DELETE_FAST = _convert_LOAD_FAST
def _convert_LOAD_NAME(self, arg):
if self.klass is None:
self._lookupName(arg, self.varnames)
return self._lookupName(arg, self.names)
def _convert_NAME(self, arg):
if self.klass is None:
self._lookupName(arg, self.varnames)
return self._lookupName(arg, self.names)
_convert_STORE_NAME = _convert_NAME
_convert_DELETE_NAME = _convert_NAME
_convert_IMPORT_NAME = _convert_NAME
_convert_IMPORT_FROM = _convert_NAME
_convert_STORE_ATTR = _convert_NAME
_convert_LOAD_ATTR = _convert_NAME
_convert_DELETE_ATTR = _convert_NAME
_convert_LOAD_GLOBAL = _convert_NAME
_convert_STORE_GLOBAL = _convert_NAME
_convert_DELETE_GLOBAL = _convert_NAME
def _convert_DEREF(self, arg):
self._lookupName(arg, self.names)
self._lookupName(arg, self.varnames)
return self._lookupName(arg, self.closure)
_convert_LOAD_DEREF = _convert_DEREF
_convert_STORE_DEREF = _convert_DEREF
def _convert_LOAD_CLOSURE(self, arg):
self._lookupName(arg, self.varnames)
return self._lookupName(arg, self.closure)
_cmp = list(dis.cmp_op)
def _convert_COMPARE_OP(self, arg):
return self._cmp.index(arg)
# similarly for other opcodes...
for name, obj in items_list(locals()):
if name[:9] == "_convert_":
opname = name[9:]
_converters[opname] = obj
del name, obj, opname
def makeByteCode(self):
assert self.stage == CONV
self.lnotab = lnotab = LineAddrTable()
for t in self.insts:
opname = t[0]
if len(t) == 1:
lnotab.addCode(self.opnum[opname])
else:
oparg = t[1]
if opname == "SET_LINENO":
lnotab.nextLine(oparg)
continue
hi, lo = twobyte(oparg)
try:
lnotab.addCode(self.opnum[opname], lo, hi)
except ValueError:
print(opname, oparg)
print(self.opnum[opname], lo, hi)
raise
self.stage = DONE
opnum = {}
for num in range(len(dis.opname)):
opnum[dis.opname[num]] = num
del num
def newCodeObject(self):
assert self.stage == DONE
if (self.flags & CO_NEWLOCALS) == 0:
nlocals = 0
else:
nlocals = len(self.varnames)
argcount = self.argcount
if self.flags & CO_VARKEYWORDS:
argcount = argcount - 1
return types.CodeType(argcount, nlocals, self.stacksize, self.flags,
self.lnotab.getCode(), self.getConsts(),
tuple(self.names), tuple(self.varnames),
self.filename, self.name, self.lnotab.firstline,
self.lnotab.getTable(), tuple(self.freevars),
tuple(self.cellvars))
def getConsts(self):
"""Return a tuple for the const slot of the code object
Must convert references to code (MAKE_FUNCTION) to code
objects recursively.
"""
l = []
for elt in self.consts:
if isinstance(elt, PyFlowGraph):
elt = elt.getCode()
l.append(elt)
return tuple(l)
def isJump(opname):
if opname[:4] == 'JUMP':
return 1
class TupleArg:
"""Helper for marking func defs with nested tuples in arglist"""
def __init__(self, count, names):
self.count = count
self.names = names
def __repr__(self):
return "TupleArg(%s, %s)" % (self.count, self.names)
def getName(self):
return ".%d" % self.count
def getArgCount(args):
argcount = len(args)
if args:
for arg in args:
if isinstance(arg, TupleArg):
numNames = len(misc.flatten(arg.names))
argcount = argcount - numNames
return argcount
def twobyte(val):
"""Convert an int argument into high and low bytes"""
assert isinstance(val, int)
return divmod(val, 256)
class LineAddrTable:
"""lnotab
This class builds the lnotab, which is documented in compile.c.
Here's a brief recap:
For each SET_LINENO instruction after the first one, two bytes are
added to lnotab. (In some cases, multiple two-byte entries are
added.) The first byte is the distance in bytes between the
instruction for the last SET_LINENO and the current SET_LINENO.
The second byte is offset in line numbers. If either offset is
greater than 255, multiple two-byte entries are added -- see
compile.c for the delicate details.
"""
def __init__(self):
self.code = []
self.codeOffset = 0
self.firstline = 0
self.lastline = 0
self.lastoff = 0
self.lnotab = []
def addCode(self, *args):
for arg in args:
self.code.append(chr(arg))
self.codeOffset = self.codeOffset + len(args)
def nextLine(self, lineno):
if self.firstline == 0:
self.firstline = lineno
self.lastline = lineno
else:
# compute deltas
addr = self.codeOffset - self.lastoff
line = lineno - self.lastline
# Python assumes that lineno always increases with
# increasing bytecode address (lnotab is unsigned char).
# Depending on when SET_LINENO instructions are emitted
# this is not always true. Consider the code:
# a = (1,
# b)
# In the bytecode stream, the assignment to "a" occurs
# after the loading of "b". This works with the C Python
# compiler because it only generates a SET_LINENO instruction
# for the assignment.
if line >= 0:
push = self.lnotab.append
while addr > 255:
push(255); push(0)
addr -= 255
while line > 255:
push(addr); push(255)
line -= 255
addr = 0
if addr > 0 or line > 0:
push(addr); push(line)
self.lastline = lineno
self.lastoff = self.codeOffset
def getCode(self):
return ''.join(self.code)
def getTable(self):
return ''.join(imap(chr, self.lnotab))
class StackDepthTracker:
# XXX 1. need to keep track of stack depth on jumps
# XXX 2. at least partly as a result, this code is broken
def findDepth(self, insts, debug=0):
depth = 0
maxDepth = 0
for i in insts:
opname = i[0]
if debug:
print(i, end=' ')
delta = self.effect.get(opname, None)
if delta is not None:
depth = depth + delta
else:
# now check patterns
for pat, pat_delta in self.patterns:
if opname[:len(pat)] == pat:
delta = pat_delta
depth = depth + delta
break
# if we still haven't found a match
if delta is None:
meth = getattr(self, opname, None)
if meth is not None:
depth = depth + meth(i[1])
if depth > maxDepth:
maxDepth = depth
if debug:
print(depth, maxDepth)
return maxDepth
effect = {
'POP_TOP': -1,
'DUP_TOP': 1,
'LIST_APPEND': -1,
'SET_ADD': -1,
'MAP_ADD': -2,
'SLICE+1': -1,
'SLICE+2': -1,
'SLICE+3': -2,
'STORE_SLICE+0': -1,
'STORE_SLICE+1': -2,
'STORE_SLICE+2': -2,
'STORE_SLICE+3': -3,
'DELETE_SLICE+0': -1,
'DELETE_SLICE+1': -2,
'DELETE_SLICE+2': -2,
'DELETE_SLICE+3': -3,
'STORE_SUBSCR': -3,
'DELETE_SUBSCR': -2,
# PRINT_EXPR?
'PRINT_ITEM': -1,
'RETURN_VALUE': -1,
'YIELD_VALUE': -1,
'EXEC_STMT': -3,
'BUILD_CLASS': -2,
'STORE_NAME': -1,
'STORE_ATTR': -2,
'DELETE_ATTR': -1,
'STORE_GLOBAL': -1,
'BUILD_MAP': 1,
'COMPARE_OP': -1,
'STORE_FAST': -1,
'IMPORT_STAR': -1,
'IMPORT_NAME': -1,
'IMPORT_FROM': 1,
'LOAD_ATTR': 0, # unlike other loads
# close enough...
'SETUP_EXCEPT': 3,
'SETUP_FINALLY': 3,
'FOR_ITER': 1,
'WITH_CLEANUP': -1,
}
# use pattern match
patterns = [
('BINARY_', -1),
('LOAD_', 1),
]
def UNPACK_SEQUENCE(self, count):
return count-1
def BUILD_TUPLE(self, count):
return -count+1
def BUILD_LIST(self, count):
return -count+1
def BUILD_SET(self, count):
return -count+1
def CALL_FUNCTION(self, argc):
hi, lo = divmod(argc, 256)
return -(lo + hi * 2)
def CALL_FUNCTION_VAR(self, argc):
return self.CALL_FUNCTION(argc)-1
def CALL_FUNCTION_KW(self, argc):
return self.CALL_FUNCTION(argc)-1
def CALL_FUNCTION_VAR_KW(self, argc):
return self.CALL_FUNCTION(argc)-2
def MAKE_FUNCTION(self, argc):
return -argc
def MAKE_CLOSURE(self, argc):
# XXX need to account for free variables too!
return -argc
def BUILD_SLICE(self, argc):
if argc == 2:
return -1
elif argc == 3:
return -2
def DUP_TOPX(self, argc):
return argc
findDepth = StackDepthTracker().findDepth
| apache-2.0 |
dwaynebailey/pootle | pootle/apps/pootle_fs/utils.py | 8 | 3105 | # -*- coding: utf-8 -*-
#
# Copyright (C) Pootle contributors.
#
# This file is a part of the Pootle project. It is distributed under the GPL3
# or later license. See the LICENSE file for a copy of the license and the
# AUTHORS file for copyright and authorship information.
from fnmatch import translate
from django.utils.functional import cached_property
from pootle.core.exceptions import MissingPluginError, NotConfiguredError
from .delegate import fs_plugins
class PathFilter(object):
def path_regex(self, path):
return translate(path).replace("\Z(?ms)", "$")
class StorePathFilter(PathFilter):
"""Filters Stores (only pootle_path)
pootle_path should be file a glob
the glob is converted to a regex and used to filter a qs
"""
def __init__(self, pootle_path=None):
self.pootle_path = pootle_path
@cached_property
def pootle_regex(self):
if not self.pootle_path:
return
return self.path_regex(self.pootle_path)
def filtered(self, qs):
if not self.pootle_regex:
return qs
return qs.filter(pootle_path__regex=self.pootle_regex)
class StoreFSPathFilter(StorePathFilter):
"""Filters StoreFS
pootle_path and fs_path should be file globs
these are converted to regexes and used to filter a qs
"""
def __init__(self, pootle_path=None, fs_path=None):
super(StoreFSPathFilter, self).__init__(pootle_path=pootle_path)
self.fs_path = fs_path
@cached_property
def fs_regex(self):
if not self.fs_path:
return
return self.path_regex(self.fs_path)
def filtered(self, qs):
qs = super(StoreFSPathFilter, self).filtered(qs)
if not self.fs_regex:
return qs
return qs.filter(path__regex=self.fs_regex)
class FSPlugin(object):
"""Wraps a Project to access the configured FS plugin"""
def __init__(self, project):
self.project = project
plugins = fs_plugins.gather(self.project.__class__)
fs_type = project.config.get("pootle_fs.fs_type")
fs_url = project.config.get("pootle_fs.fs_url")
if not fs_type or not fs_url:
missing_key = "pootle_fs.fs_url" if fs_type else "pootle_fs.fs_type"
raise NotConfiguredError('Missing "%s" in project configuration.' %
missing_key)
try:
self.plugin = plugins[fs_type](self.project)
except KeyError:
raise MissingPluginError(
"No such plugin: %s" % fs_type)
@property
def __class__(self):
return self.plugin.__class__
def __getattr__(self, k):
return getattr(self.plugin, k)
def __eq__(self, other):
return self.plugin.__eq__(other)
def __str__(self):
return str(self.plugin)
def parse_fs_url(fs_url):
fs_type = 'localfs'
chunks = fs_url.split('+', 1)
if len(chunks) > 1:
if chunks[0] in fs_plugins.gather().keys():
fs_type = chunks[0]
fs_url = chunks[1]
return fs_type, fs_url
| gpl-3.0 |
rgommers/pywt | benchmarks/benchmarks/cwt_benchmarks.py | 3 | 1583 | import numpy as np
import pywt
class CwtTimeSuiteBase(object):
"""
Set-up for CWT timing.
"""
params = ([32, 128, 512, 2048],
['cmor', 'cgau4', 'fbsp', 'gaus4', 'mexh', 'morl', 'shan'],
[16, 64, 256],
[np.float32, np.float64],
['conv', 'fft'],
)
param_names = ('n', 'wavelet', 'max_scale', 'dtype', 'method')
def setup(self, n, wavelet, max_scale, dtype, method):
try:
from pywt import cwt
except ImportError:
raise NotImplementedError("cwt not available")
self.data = np.ones(n, dtype=dtype)
self.batch_data = np.ones((5, n), dtype=dtype)
self.scales = np.arange(1, max_scale + 1)
class CwtTimeSuite(CwtTimeSuiteBase):
def time_cwt(self, n, wavelet, max_scale, dtype, method):
try:
pywt.cwt(self.data, self.scales, wavelet, method=method)
except TypeError:
# older PyWavelets does not support use of the method argument
if method == 'fft':
raise NotImplementedError(
"fft-based convolution not available.")
pywt.cwt(self.data, self.scales, wavelet)
def time_cwt_batch(self, n, wavelet, max_scale, dtype, method):
try:
pywt.cwt(self.batch_data, self.scales, wavelet, method=method,
axis=-1)
except TypeError:
# older PyWavelets does not support the axis argument
raise NotImplementedError(
"axis argument not available.")
| mit |
tedi3231/openerp | openerp/addons/procurement/procurement.py | 3 | 28473 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv
from openerp.tools.translate import _
from openerp import netsvc
import time
import openerp.addons.decimal_precision as dp
# Procurement
# ------------------------------------------------------------------
#
# Produce, Buy or Find products and place a move
# then wizard for picking lists & move
#
class mrp_property_group(osv.osv):
"""
Group of mrp properties.
"""
_name = 'mrp.property.group'
_description = 'Property Group'
_columns = {
'name': fields.char('Property Group', size=64, required=True),
'description': fields.text('Description'),
}
mrp_property_group()
class mrp_property(osv.osv):
"""
Properties of mrp.
"""
_name = 'mrp.property'
_description = 'Property'
_columns = {
'name': fields.char('Name', size=64, required=True),
'composition': fields.selection([('min','min'),('max','max'),('plus','plus')], 'Properties composition', required=True, help="Not used in computations, for information purpose only."),
'group_id': fields.many2one('mrp.property.group', 'Property Group', required=True),
'description': fields.text('Description'),
}
_defaults = {
'composition': lambda *a: 'min',
}
mrp_property()
class StockMove(osv.osv):
_inherit = 'stock.move'
_columns= {
'procurements': fields.one2many('procurement.order', 'move_id', 'Procurements'),
}
def copy(self, cr, uid, id, default=None, context=None):
default = default or {}
default['procurements'] = []
return super(StockMove, self).copy(cr, uid, id, default, context=context)
StockMove()
class procurement_order(osv.osv):
"""
Procurement Orders
"""
_name = "procurement.order"
_description = "Procurement"
_order = 'priority desc,date_planned'
_inherit = ['mail.thread']
_log_create = False
_columns = {
'name': fields.text('Description', required=True),
'origin': fields.char('Source Document', size=64,
help="Reference of the document that created this Procurement.\n"
"This is automatically completed by OpenERP."),
'priority': fields.selection([('0','Not urgent'),('1','Normal'),('2','Urgent'),('3','Very Urgent')], 'Priority', required=True, select=True),
'date_planned': fields.datetime('Scheduled date', required=True, select=True),
'date_close': fields.datetime('Date Closed'),
'product_id': fields.many2one('product.product', 'Product', required=True, states={'draft':[('readonly',False)]}, readonly=True),
'product_qty': fields.float('Quantity', digits_compute=dp.get_precision('Product Unit of Measure'), required=True, states={'draft':[('readonly',False)]}, readonly=True),
'product_uom': fields.many2one('product.uom', 'Product Unit of Measure', required=True, states={'draft':[('readonly',False)]}, readonly=True),
'product_uos_qty': fields.float('UoS Quantity', states={'draft':[('readonly',False)]}, readonly=True),
'product_uos': fields.many2one('product.uom', 'Product UoS', states={'draft':[('readonly',False)]}, readonly=True),
'move_id': fields.many2one('stock.move', 'Reservation', ondelete='set null'),
'close_move': fields.boolean('Close Move at end'),
'location_id': fields.many2one('stock.location', 'Location', required=True, states={'draft':[('readonly',False)]}, readonly=True),
'procure_method': fields.selection([('make_to_stock','Make to Stock'),('make_to_order','Make to Order')], 'Procurement Method', states={'draft':[('readonly',False)], 'confirmed':[('readonly',False)]},
readonly=True, required=True, help="If you encode manually a Procurement, you probably want to use" \
" a make to order method."),
'note': fields.text('Note'),
'message': fields.char('Latest error', size=124, help="Exception occurred while computing procurement orders."),
'state': fields.selection([
('draft','Draft'),
('cancel','Cancelled'),
('confirmed','Confirmed'),
('exception','Exception'),
('running','Running'),
('ready','Ready'),
('done','Done'),
('waiting','Waiting')], 'Status', required=True, track_visibility='onchange',
help='When a procurement is created the status is set to \'Draft\'.\n If the procurement is confirmed, the status is set to \'Confirmed\'.\
\nAfter confirming the status is set to \'Running\'.\n If any exception arises in the order then the status is set to \'Exception\'.\n Once the exception is removed the status becomes \'Ready\'.\n It is in \'Waiting\'. status when the procurement is waiting for another one to finish.'),
'note': fields.text('Note'),
'company_id': fields.many2one('res.company','Company',required=True),
}
_defaults = {
'state': 'draft',
'priority': '1',
'date_planned': lambda *a: time.strftime('%Y-%m-%d %H:%M:%S'),
'close_move': 0,
'procure_method': 'make_to_order',
'company_id': lambda self, cr, uid, c: self.pool.get('res.company')._company_default_get(cr, uid, 'procurement.order', context=c)
}
def unlink(self, cr, uid, ids, context=None):
procurements = self.read(cr, uid, ids, ['state'], context=context)
unlink_ids = []
for s in procurements:
if s['state'] in ['draft','cancel']:
unlink_ids.append(s['id'])
else:
raise osv.except_osv(_('Invalid Action!'),
_('Cannot delete Procurement Order(s) which are in %s state.') % \
s['state'])
return osv.osv.unlink(self, cr, uid, unlink_ids, context=context)
def onchange_product_id(self, cr, uid, ids, product_id, context=None):
""" Finds UoM and UoS of changed product.
@param product_id: Changed id of product.
@return: Dictionary of values.
"""
if product_id:
w = self.pool.get('product.product').browse(cr, uid, product_id, context=context)
v = {
'product_uom': w.uom_id.id,
'product_uos': w.uos_id and w.uos_id.id or w.uom_id.id
}
return {'value': v}
return {}
def is_product(self, cr, uid, ids, context=None):
""" Checks product type to decide which transition of the workflow to follow.
@return: True if all product ids received in argument are of type 'product' or 'consummable'. False if any is of type 'service'
"""
return all(proc.product_id.type in ('product', 'consu') for proc in self.browse(cr, uid, ids, context=context))
def check_move_cancel(self, cr, uid, ids, context=None):
""" Checks if move is cancelled or not.
@return: True or False.
"""
return all(procurement.move_id.state == 'cancel' for procurement in self.browse(cr, uid, ids, context=context))
#This Function is create to avoid a server side Error Like 'ERROR:tests.mrp:name 'check_move' is not defined'
def check_move(self, cr, uid, ids, context=None):
pass
def check_move_done(self, cr, uid, ids, context=None):
""" Checks if move is done or not.
@return: True or False.
"""
return all(proc.product_id.type == 'service' or (proc.move_id and proc.move_id.state == 'done') \
for proc in self.browse(cr, uid, ids, context=context))
#
# This method may be overrided by objects that override procurement.order
# for computing their own purpose
#
def _quantity_compute_get(self, cr, uid, proc, context=None):
""" Finds sold quantity of product.
@param proc: Current procurement.
@return: Quantity or False.
"""
if proc.product_id.type == 'product' and proc.move_id:
if proc.move_id.product_uos:
return proc.move_id.product_uos_qty
return False
def _uom_compute_get(self, cr, uid, proc, context=None):
""" Finds UoS if product is Stockable Product.
@param proc: Current procurement.
@return: UoS or False.
"""
if proc.product_id.type == 'product' and proc.move_id:
if proc.move_id.product_uos:
return proc.move_id.product_uos.id
return False
#
# Return the quantity of product shipped/produced/served, which may be
# different from the planned quantity
#
def quantity_get(self, cr, uid, id, context=None):
""" Finds quantity of product used in procurement.
@return: Quantity of product.
"""
proc = self.browse(cr, uid, id, context=context)
result = self._quantity_compute_get(cr, uid, proc, context=context)
if not result:
result = proc.product_qty
return result
def uom_get(self, cr, uid, id, context=None):
""" Finds UoM of product used in procurement.
@return: UoM of product.
"""
proc = self.browse(cr, uid, id, context=context)
result = self._uom_compute_get(cr, uid, proc, context=context)
if not result:
result = proc.product_uom.id
return result
def check_waiting(self, cr, uid, ids, context=None):
""" Checks state of move.
@return: True or False
"""
for procurement in self.browse(cr, uid, ids, context=context):
if procurement.move_id and procurement.move_id.state == 'auto':
return True
return False
def check_produce_service(self, cr, uid, procurement, context=None):
""" Depicts the capacity of the procurement workflow to deal with production of services.
By default, it's False. Overwritten by project_mrp module.
"""
return False
def check_produce_product(self, cr, uid, procurement, context=None):
""" Depicts the capacity of the procurement workflow to deal with production of products.
By default, it's False. Overwritten by mrp module.
"""
return False
def check_make_to_stock(self, cr, uid, ids, context=None):
""" Checks product type.
@return: True or False
"""
ok = True
for procurement in self.browse(cr, uid, ids, context=context):
if procurement.product_id.type == 'service':
ok = ok and self._check_make_to_stock_service(cr, uid, procurement, context)
else:
ok = ok and self._check_make_to_stock_product(cr, uid, procurement, context)
return ok
def check_produce(self, cr, uid, ids, context=None):
""" Checks product type.
@return: True or False
"""
user = self.pool.get('res.users').browse(cr, uid, uid, context=context)
for procurement in self.browse(cr, uid, ids, context=context):
product = procurement.product_id
#TOFIX: if product type is 'service' but supply_method is 'buy'.
if product.supply_method <> 'produce':
return False
if product.type=='service':
res = self.check_produce_service(cr, uid, procurement, context)
else:
res = self.check_produce_product(cr, uid, procurement, context)
if not res:
return False
return True
def check_buy(self, cr, uid, ids):
""" Depicts the capacity of the procurement workflow to manage the supply_method == 'buy'.
By default, it's False. Overwritten by purchase module.
"""
return False
def check_conditions_confirm2wait(self, cr, uid, ids):
""" condition on the transition to go from 'confirm' activity to 'confirm_wait' activity """
return not self.test_cancel(cr, uid, ids)
def test_cancel(self, cr, uid, ids):
""" Tests whether state of move is cancelled or not.
@return: True or False
"""
for record in self.browse(cr, uid, ids):
if record.move_id and record.move_id.state == 'cancel':
return True
return False
#Initialize get_phantom_bom_id method as it is raising an error from yml of mrp_jit
#when one install first mrp and after that, mrp_jit. get_phantom_bom_id defined in mrp module
#which is not dependent for mrp_jit.
def get_phantom_bom_id(self, cr, uid, ids, context=None):
return False
def action_confirm(self, cr, uid, ids, context=None):
""" Confirms procurement and writes exception message if any.
@return: True
"""
move_obj = self.pool.get('stock.move')
for procurement in self.browse(cr, uid, ids, context=context):
if procurement.product_qty <= 0.00:
raise osv.except_osv(_('Data Insufficient !'),
_('Please check the quantity in procurement order(s) for the product "%s", it should not be 0 or less!' % procurement.product_id.name))
if procurement.product_id.type in ('product', 'consu'):
if not procurement.move_id:
source = procurement.location_id.id
if procurement.procure_method == 'make_to_order':
source = procurement.product_id.property_stock_procurement.id
id = move_obj.create(cr, uid, {
'name': procurement.name,
'location_id': source,
'location_dest_id': procurement.location_id.id,
'product_id': procurement.product_id.id,
'product_qty': procurement.product_qty,
'product_uom': procurement.product_uom.id,
'date_expected': procurement.date_planned,
'state': 'draft',
'company_id': procurement.company_id.id,
'auto_validate': True,
})
move_obj.action_confirm(cr, uid, [id], context=context)
self.write(cr, uid, [procurement.id], {'move_id': id, 'close_move': 1})
self.write(cr, uid, ids, {'state': 'confirmed', 'message': ''})
return True
def action_move_assigned(self, cr, uid, ids, context=None):
""" Changes procurement state to Running and writes message.
@return: True
"""
message = _('Products reserved from stock.')
self.write(cr, uid, ids, {'state': 'running',
'message': message}, context=context)
self.message_post(cr, uid, ids, body=message, context=context)
return True
def _check_make_to_stock_service(self, cr, uid, procurement, context=None):
"""
This method may be overrided by objects that override procurement.order
for computing their own purpose
@return: True"""
return True
def _check_make_to_stock_product(self, cr, uid, procurement, context=None):
""" Checks procurement move state.
@param procurement: Current procurement.
@return: True or move id.
"""
ok = True
if procurement.move_id:
message = False
id = procurement.move_id.id
if not (procurement.move_id.state in ('done','assigned','cancel')):
ok = ok and self.pool.get('stock.move').action_assign(cr, uid, [id])
order_point_id = self.pool.get('stock.warehouse.orderpoint').search(cr, uid, [('product_id', '=', procurement.product_id.id)], context=context)
if not order_point_id and not ok:
message = _("Not enough stock and no minimum orderpoint rule defined.")
elif not ok:
message = _("Not enough stock.")
if message:
message = _("Procurement '%s' is in exception: ") % (procurement.name) + message
cr.execute('update procurement_order set message=%s where id=%s', (message, procurement.id))
self.message_post(cr, uid, [procurement.id], body=message, context=context)
return ok
def action_produce_assign_service(self, cr, uid, ids, context=None):
""" Changes procurement state to Running.
@return: True
"""
for procurement in self.browse(cr, uid, ids, context=context):
self.write(cr, uid, [procurement.id], {'state': 'running'})
return True
def action_produce_assign_product(self, cr, uid, ids, context=None):
""" This is action which call from workflow to assign production order to procurements
@return: True
"""
return 0
def action_po_assign(self, cr, uid, ids, context=None):
""" This is action which call from workflow to assign purchase order to procurements
@return: True
"""
return 0
# XXX action_cancel() should accept a context argument
def action_cancel(self, cr, uid, ids):
"""Cancel Procurements and either cancel or assign the related Stock Moves, depending on the procurement configuration.
@return: True
"""
to_assign = []
to_cancel = []
move_obj = self.pool.get('stock.move')
for proc in self.browse(cr, uid, ids):
if proc.close_move and proc.move_id:
if proc.move_id.state not in ('done', 'cancel'):
to_cancel.append(proc.move_id.id)
else:
if proc.move_id and proc.move_id.state == 'waiting':
to_assign.append(proc.move_id.id)
if len(to_cancel):
move_obj.action_cancel(cr, uid, to_cancel)
if len(to_assign):
move_obj.write(cr, uid, to_assign, {'state': 'assigned'})
self.write(cr, uid, ids, {'state': 'cancel'})
wf_service = netsvc.LocalService("workflow")
for id in ids:
wf_service.trg_trigger(uid, 'procurement.order', id, cr)
return True
def action_check_finished(self, cr, uid, ids):
return self.check_move_done(cr, uid, ids)
def action_check(self, cr, uid, ids):
""" Checks procurement move state whether assigned or done.
@return: True
"""
ok = False
for procurement in self.browse(cr, uid, ids):
if procurement.move_id and procurement.move_id.state == 'assigned' or procurement.move_id.state == 'done':
self.action_done(cr, uid, [procurement.id])
ok = True
return ok
def action_ready(self, cr, uid, ids):
""" Changes procurement state to Ready.
@return: True
"""
res = self.write(cr, uid, ids, {'state': 'ready'})
return res
def action_done(self, cr, uid, ids):
""" Changes procurement state to Done and writes Closed date.
@return: True
"""
move_obj = self.pool.get('stock.move')
for procurement in self.browse(cr, uid, ids):
if procurement.move_id:
if procurement.close_move and (procurement.move_id.state <> 'done'):
move_obj.action_done(cr, uid, [procurement.move_id.id])
res = self.write(cr, uid, ids, {'state': 'done', 'date_close': time.strftime('%Y-%m-%d')})
wf_service = netsvc.LocalService("workflow")
for id in ids:
wf_service.trg_trigger(uid, 'procurement.order', id, cr)
return res
class StockPicking(osv.osv):
_inherit = 'stock.picking'
def test_finished(self, cursor, user, ids):
wf_service = netsvc.LocalService("workflow")
res = super(StockPicking, self).test_finished(cursor, user, ids)
for picking in self.browse(cursor, user, ids):
for move in picking.move_lines:
if move.state == 'done' and move.procurements:
for procurement in move.procurements:
wf_service.trg_validate(user, 'procurement.order',
procurement.id, 'button_check', cursor)
return res
class stock_warehouse_orderpoint(osv.osv):
"""
Defines Minimum stock rules.
"""
_name = "stock.warehouse.orderpoint"
_description = "Minimum Inventory Rule"
def _get_draft_procurements(self, cr, uid, ids, field_name, arg, context=None):
if context is None:
context = {}
result = {}
procurement_obj = self.pool.get('procurement.order')
for orderpoint in self.browse(cr, uid, ids, context=context):
procurement_ids = procurement_obj.search(cr, uid , [('state', '=', 'draft'), ('product_id', '=', orderpoint.product_id.id), ('location_id', '=', orderpoint.location_id.id)])
result[orderpoint.id] = procurement_ids
return result
def _check_product_uom(self, cr, uid, ids, context=None):
'''
Check if the UoM has the same category as the product standard UoM
'''
if not context:
context = {}
for rule in self.browse(cr, uid, ids, context=context):
if rule.product_id.uom_id.category_id.id != rule.product_uom.category_id.id:
return False
return True
_columns = {
'name': fields.char('Name', size=32, required=True),
'active': fields.boolean('Active', help="If the active field is set to False, it will allow you to hide the orderpoint without removing it."),
'logic': fields.selection([('max','Order to Max'),('price','Best price (not yet active!)')], 'Reordering Mode', required=True),
'warehouse_id': fields.many2one('stock.warehouse', 'Warehouse', required=True, ondelete="cascade"),
'location_id': fields.many2one('stock.location', 'Location', required=True, ondelete="cascade"),
'product_id': fields.many2one('product.product', 'Product', required=True, ondelete='cascade', domain=[('type','!=','service')]),
'product_uom': fields.many2one('product.uom', 'Product Unit of Measure', required=True),
'product_min_qty': fields.float('Minimum Quantity', required=True,
help="When the virtual stock goes below the Min Quantity specified for this field, OpenERP generates "\
"a procurement to bring the forecasted quantity to the Max Quantity."),
'product_max_qty': fields.float('Maximum Quantity', required=True,
help="When the virtual stock goes below the Min Quantity, OpenERP generates "\
"a procurement to bring the forecasted quantity to the Quantity specified as Max Quantity."),
'qty_multiple': fields.integer('Qty Multiple', required=True,
help="The procurement quantity will be rounded up to this multiple."),
'procurement_id': fields.many2one('procurement.order', 'Latest procurement', ondelete="set null"),
'company_id': fields.many2one('res.company','Company',required=True),
'procurement_draft_ids': fields.function(_get_draft_procurements, type='many2many', relation="procurement.order", \
string="Related Procurement Orders",help="Draft procurement of the product and location of that orderpoint"),
}
_defaults = {
'active': lambda *a: 1,
'logic': lambda *a: 'max',
'qty_multiple': lambda *a: 1,
'name': lambda x,y,z,c: x.pool.get('ir.sequence').get(y,z,'stock.orderpoint') or '',
'product_uom': lambda sel, cr, uid, context: context.get('product_uom', False),
'company_id': lambda self, cr, uid, c: self.pool.get('res.company')._company_default_get(cr, uid, 'stock.warehouse.orderpoint', context=c)
}
_sql_constraints = [
('qty_multiple_check', 'CHECK( qty_multiple > 0 )', 'Qty Multiple must be greater than zero.'),
]
_constraints = [
(_check_product_uom, 'You have to select a product unit of measure in the same category than the default unit of measure of the product', ['product_id', 'product_uom']),
]
def default_get(self, cr, uid, fields, context=None):
res = super(stock_warehouse_orderpoint, self).default_get(cr, uid, fields, context)
# default 'warehouse_id' and 'location_id'
if 'warehouse_id' not in res:
warehouse = self.pool.get('ir.model.data').get_object(cr, uid, 'stock', 'warehouse0', context)
res['warehouse_id'] = warehouse.id
if 'location_id' not in res:
warehouse = self.pool.get('stock.warehouse').browse(cr, uid, res['warehouse_id'], context)
res['location_id'] = warehouse.lot_stock_id.id
return res
def onchange_warehouse_id(self, cr, uid, ids, warehouse_id, context=None):
""" Finds location id for changed warehouse.
@param warehouse_id: Changed id of warehouse.
@return: Dictionary of values.
"""
if warehouse_id:
w = self.pool.get('stock.warehouse').browse(cr, uid, warehouse_id, context=context)
v = {'location_id': w.lot_stock_id.id}
return {'value': v}
return {}
def onchange_product_id(self, cr, uid, ids, product_id, context=None):
""" Finds UoM for changed product.
@param product_id: Changed id of product.
@return: Dictionary of values.
"""
if product_id:
prod = self.pool.get('product.product').browse(cr, uid, product_id, context=context)
d = {'product_uom': [('category_id', '=', prod.uom_id.category_id.id)]}
v = {'product_uom': prod.uom_id.id}
return {'value': v, 'domain': d}
return {'domain': {'product_uom': []}}
def copy(self, cr, uid, id, default=None, context=None):
if not default:
default = {}
default.update({
'name': self.pool.get('ir.sequence').get(cr, uid, 'stock.orderpoint') or '',
})
return super(stock_warehouse_orderpoint, self).copy(cr, uid, id, default, context=context)
class product_template(osv.osv):
_inherit="product.template"
_columns = {
'type': fields.selection([('product','Stockable Product'),('consu', 'Consumable'),('service','Service')], 'Product Type', required=True, help="Consumable: Will not imply stock management for this product. \nStockable product: Will imply stock management for this product."),
'procure_method': fields.selection([('make_to_stock','Make to Stock'),('make_to_order','Make to Order')], 'Procurement Method', required=True, help="Make to Stock: When needed, the product is taken from the stock or we wait for replenishment. \nMake to Order: When needed, the product is purchased or produced."),
'supply_method': fields.selection([('produce','Manufacture'),('buy','Buy')], 'Supply Method', required=True, help="Manufacture: When procuring the product, a manufacturing order or a task will be generated, depending on the product type. \nBuy: When procuring the product, a purchase order will be generated."),
}
_defaults = {
'procure_method': 'make_to_stock',
'supply_method': 'buy',
}
class product_product(osv.osv):
_inherit="product.product"
_columns = {
'orderpoint_ids': fields.one2many('stock.warehouse.orderpoint', 'product_id', 'Minimum Stock Rules'),
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
pratikmallya/heat | heat/engine/resources/openstack/monasca/alarm_definition.py | 2 | 7151 | #
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from heat.common.i18n import _
from heat.engine import clients
from heat.engine import constraints
from heat.engine import properties
from heat.engine import resource
from heat.engine import support
class MonascaAlarmDefinition(resource.Resource):
"""Heat Template Resource for Monasca Alarm definition.
This plug-in requires python-monascaclient>=1.0.22. So to enable this
plug-in, install this client library and restart the heat-engine.
"""
support_status = support.SupportStatus(
version='5.0.0',
status=support.UNSUPPORTED)
default_client_name = 'monasca'
entity = 'alarm_definitions'
SEVERITY_LEVELS = (
LOW, MEDIUM, HIGH, CRITICAL
) = (
'low', 'medium', 'high', 'critical'
)
PROPERTIES = (
NAME, DESCRIPTION, EXPRESSION, MATCH_BY, SEVERITY,
OK_ACTIONS, ALARM_ACTIONS, UNDETERMINED_ACTIONS,
ACTIONS_ENABLED
) = (
'name', 'description', 'expression', 'match_by', 'severity',
'ok_actions', 'alarm_actions', 'undetermined_actions',
'actions_enabled'
)
properties_schema = {
NAME: properties.Schema(
properties.Schema.STRING,
_('Name of the alarm. By default, physical resource name is '
'used.'),
update_allowed=True
),
DESCRIPTION: properties.Schema(
properties.Schema.STRING,
_('Description of the alarm.'),
update_allowed=True
),
EXPRESSION: properties.Schema(
properties.Schema.STRING,
_('Expression of the alarm to evaluate.'),
update_allowed=False,
required=True
),
MATCH_BY: properties.Schema(
properties.Schema.LIST,
_('The metric dimensions to match to the alarm dimensions. '
'One or more dimension key names separated by a comma.')
),
SEVERITY: properties.Schema(
properties.Schema.STRING,
_('Severity of the alarm.'),
update_allowed=True,
constraints=[constraints.AllowedValues(
SEVERITY_LEVELS
)],
default=LOW
),
OK_ACTIONS: properties.Schema(
properties.Schema.LIST,
_('The notification methods to use when an alarm state is OK.'),
update_allowed=True,
schema=properties.Schema(
properties.Schema.STRING,
_('Monasca notification'),
constraints=[constraints.CustomConstraint(
'monasca.notification')
]
)
),
ALARM_ACTIONS: properties.Schema(
properties.Schema.LIST,
_('The notification methods to use when an alarm state is ALARM.'),
update_allowed=True,
schema=properties.Schema(
properties.Schema.STRING,
_('Monasca notification'),
constraints=[constraints.CustomConstraint(
'monasca.notification')
]
)
),
UNDETERMINED_ACTIONS: properties.Schema(
properties.Schema.LIST,
_('The notification methods to use when an alarm state is '
'UNDETERMINED.'),
update_allowed=True,
schema=properties.Schema(
properties.Schema.STRING,
_('Monasca notification'),
constraints=[constraints.CustomConstraint(
'monasca.notification')
]
)
),
ACTIONS_ENABLED: properties.Schema(
properties.Schema.BOOLEAN,
_('Whether to enable the actions or not.'),
update_allowed=True,
default=True,
),
}
def handle_create(self):
args = dict(
name=(self.properties[self.NAME] or
self.physical_resource_name()),
description=self.properties[self.DESCRIPTION],
expression=self.properties[self.EXPRESSION],
match_by=self.properties[self.MATCH_BY],
severity=self.properties[self.SEVERITY],
ok_actions=self.properties[self.OK_ACTIONS],
alarm_actions=self.properties[self.ALARM_ACTIONS],
undetermined_actions=self.properties[
self.UNDETERMINED_ACTIONS]
)
alarm = self.client().alarm_definitions.create(**args)
self.resource_id_set(alarm['id'])
# Monasca enables action by default
actions_enabled = self.properties[self.ACTIONS_ENABLED]
if not actions_enabled:
self.client().alarm_definitions.patch(
alarm_id=self.resource_id,
actions_enabled=actions_enabled
)
def handle_update(self, prop_diff, json_snippet=None, tmpl_diff=None):
args = dict(alarm_id=self.resource_id)
if prop_diff.get(self.NAME):
args['name'] = prop_diff.get(self.NAME)
if prop_diff.get(self.DESCRIPTION):
args['description'] = prop_diff.get(self.DESCRIPTION)
if prop_diff.get(self.SEVERITY):
args['severity'] = prop_diff.get(self.SEVERITY)
if prop_diff.get(self.OK_ACTIONS):
args['ok_actions'] = prop_diff.get(self.OK_ACTIONS)
if prop_diff.get(self.ALARM_ACTIONS):
args['alarm_actions'] = prop_diff.get(self.ALARM_ACTIONS)
if prop_diff.get(self.UNDETERMINED_ACTIONS):
args['undetermined_actions'] = prop_diff.get(
self.UNDETERMINED_ACTIONS
)
if prop_diff.get(self.ACTIONS_ENABLED):
args['actions_enabled'] = prop_diff.get(self.ACTIONS_ENABLED)
self.client().alarm_definitions.patch(**args)
def handle_delete(self):
if self.resource_id is not None:
try:
self.client().alarm_definitions.delete(
alarm_id=self.resource_id)
except Exception as ex:
self.client_plugin().ignore_not_found(ex)
# FIXME(kanagaraj-manickam) Remove this method once monasca defect 1484900
# is fixed.
def _show_resource(self):
return self.client().alarm_definitions.get(self.resource_id)
def resource_mapping():
return {
'OS::Monasca::AlarmDefinition': MonascaAlarmDefinition
}
def available_resource_mapping():
if not clients.has_client(MonascaAlarmDefinition.default_client_name):
return {}
return resource_mapping()
| apache-2.0 |
Venturi/cms | env/lib/python2.7/site-packages/phonenumbers/data/region_KY.py | 9 | 1921 | """Auto-generated file, do not edit by hand. KY metadata"""
from ..phonemetadata import NumberFormat, PhoneNumberDesc, PhoneMetadata
PHONE_METADATA_KY = PhoneMetadata(id='KY', country_code=1, international_prefix='011',
general_desc=PhoneNumberDesc(national_number_pattern='[3589]\\d{9}', possible_number_pattern='\\d{7}(?:\\d{3})?'),
fixed_line=PhoneNumberDesc(national_number_pattern='345(?:2(?:22|44)|444|6(?:23|38|40)|7(?:4[35-79]|6[6-9]|77)|8(?:00|1[45]|25|[48]8)|9(?:14|4[035-9]))\\d{4}', possible_number_pattern='\\d{7}(?:\\d{3})?', example_number='3452221234'),
mobile=PhoneNumberDesc(national_number_pattern='345(?:32[1-9]|5(?:1[67]|2[5-7]|4[6-8]|76)|9(?:1[67]|2[2-9]|3[689]))\\d{4}', possible_number_pattern='\\d{10}', example_number='3453231234'),
toll_free=PhoneNumberDesc(national_number_pattern='8(?:00|44|55|66|77|88)[2-9]\\d{6}', possible_number_pattern='\\d{10}', example_number='8002345678'),
premium_rate=PhoneNumberDesc(national_number_pattern='900[2-9]\\d{6}|345976\\d{4}', possible_number_pattern='\\d{10}', example_number='9002345678'),
shared_cost=PhoneNumberDesc(national_number_pattern='NA', possible_number_pattern='NA'),
personal_number=PhoneNumberDesc(national_number_pattern='5(?:00|33|44|66|77)[2-9]\\d{6}', possible_number_pattern='\\d{10}', example_number='5002345678'),
voip=PhoneNumberDesc(national_number_pattern='NA', possible_number_pattern='NA'),
pager=PhoneNumberDesc(national_number_pattern='345849\\d{4}', possible_number_pattern='\\d{10}', example_number='3458491234'),
uan=PhoneNumberDesc(national_number_pattern='NA', possible_number_pattern='NA'),
voicemail=PhoneNumberDesc(national_number_pattern='NA', possible_number_pattern='NA'),
no_international_dialling=PhoneNumberDesc(national_number_pattern='NA', possible_number_pattern='NA'),
national_prefix='1',
national_prefix_for_parsing='1',
leading_digits='345')
| gpl-2.0 |
anarchivist/pyflag | src/plugins/Tests/dftt.py | 7 | 9380 | """ The Digital Forensics Tool Testing Images are a standard set of forensic tool testing images.
This file contains unit tests against these. In order to run these tests you need to download all the images from:
http://dftt.sourceforge.net/
and unzip them in the upload directory.
"""
import unittest
import pyflag.pyflagsh as pyflagsh
import pyflag.tests
import pyflag.DB as DB
import pyflag.FileSystem as FileSystem
import pyflag.conf
config=pyflag.conf.ConfObject()
class KeyWordSearchTest(pyflag.tests.ScannerTest):
""" Test DFTT image 2: FAT Keyword Test """
test_case = 'dftt'
test_file = "2-kwsrch-fat/fat-img-kw.dd"
subsystem = 'Standard'
## Copied from DFTT page - id,string, sector, offset, file, note
case_sensitive_keywords = [
[1,'first', 271, 167, '/file1.dat','in file'],
[2,'SECOND', 272, 288, '/file2.dat','in file'],
[2,'SECOND', 239, 480, None, 'in dentry - file name'],
[3,'1cross1', 271, 508, '/file1.dat','crosses two allocated files'],
[4,'2cross2', 273, 508, '/file3.dat','crosses consecutive sectors in a file'],
[5,'3cross3', 282, 1020, '/_unallocated_/o00000001', 'crosses in unalloc'],
[6,'1slack1', 272, 396, '/file2.dat','crosses a file into slack'],
## This was change to measure the offset from the start of the file:
[7,'2slack2', 273, 1020, '/file3.dat','crosses slack into a file'],
[8,'3slack3', 276, 897, '/file4.dat','in slack'],
[9,'1fragment1', 275, 507, '/file4.dat','crosses fragmented sectors'],
[10,'2fragment sentence2', 278, 502, '/file6.dat', 'crosses fragmented sectors on ' ''],
## We seem to find this twice:
[11,'deleted', 276, 230, '/_unallocated_/o00000001', 'deleted file'],
[11,'deleted', 276, 230, '/_ILE5.DAT', 'deleted file'],
[12,'a?b\c*d$e#f[g^',279, 160, '/file7.dat', 'regexp values'],
]
case_insesitive_keywords = []
regex_keywords = [
[14,r'f[\w]rst', ['first']],
[15,r'f[a-z]r[0-9]?s[\s]*t', ["first"]],
[16,r'd[a-z]l.?t.?d', ["deleted"]],
[17,r'[0-9][r-t][\s]?[j-m][\s]?[a-c]{2,2}[\s]?[j-m][0-9]',
['1slack1', '2slack2', '3slack']],
[18,r'[1572943][\s]?fr.{2,3}ent[\s]?',
['1fragment', '2fragment ']],
[19,r'a\??[a-c]\\*[a-c]\**', ['a?b\c*']],
[20,r'\s\??x?y?Q?[a-c]\\*u*[a-c]\**d\$[0-9]*e#',
['a?b\c*d$e#']],
]
def find_expected_output(self, word, id, filename, offset, array, data):
for i in range(len(self.case_sensitive_keywords)):
row = self.case_sensitive_keywords[i]
if id==row[0] and filename==row[4] and offset==row[3]:
self.case_sensitive_keywords.pop(i)
return
if data==row[1]:
self.case_sensitive_keywords.pop(i)
break
for i in range(len(self.regex_keywords)):
if id==self.regex_keywords[i][0]:
array = self.regex_keywords[i][2]
for j in range(len(array)):
if array[j]==data:
array.pop(j)
return
#self.fail("Unable to find a match for %s" % word)
print "Unable to find a match for %s" % word
def test01RunScanner(self):
""" Running scanners """
## Populate the key words into the dictionary:
dbh = DB.DBO()
for row in self.case_sensitive_keywords:
id = row[0]
w = row[1]
dbh.delete('dictionary','id=%r' % (id+1000), _fast=True)
dbh.insert('dictionary', _fast=True,
**{'id':id+1000, 'class':"DFTT",
'type': 'literal', 'word':w})
for row in self.regex_keywords:
id = row[0]
w = row[1]
dbh.delete('dictionary','id=%r' % (id+1000), _fast=True)
dbh.insert('dictionary', _fast = True,
**{'id':id+1000, 'class':"DFTT",
'type': 'regex', 'word':w})
env = pyflagsh.environment(case=self.test_case)
pyflagsh.shell_execv(env=env, command="scan",
argv=["*",'IndexScan'])
def test02TestOutput(self):
""" Testing output """
dbh = DB.DBO(self.test_case)
fsfd = FileSystem.DBFS(self.test_case)
dbh.execute("select inode_id, word_id, word,offset,length from LogicalIndexOffsets join %s.dictionary on LogicalIndexOffsets.word_id=%s.dictionary.id where id>1000 and id<1020", (config.FLAGDB,config.FLAGDB))
for row in dbh:
patg, inode, inode_id = fsfd.lookup(inode_id = row['inode_id'])
fd = fsfd.open(inode=inode)
fd.overread = True
fd.slack = True
fd.seek(row['offset'])
data = fd.read(row['length'])
filename, inode,inode_id = fsfd.lookup(inode = inode)
print "Looking for %s: Found in %s (%s) at offset %s length %s %r" % (
row['word'], filename, inode, row['offset'], row['length'],data)
#self.assertEqual(data.lower(), row['word'].lower())
self.find_expected_output(row['word'], row['word_id']-1000, filename,
row['offset'], self.case_sensitive_keywords, data)
print "Left over %s" % self.case_sensitive_keywords
class JpegSearchTest(pyflag.tests.ScannerTest):
""" Test DFTT image 8: Jpeg image search #1 """
test_case = "dftt"
test_file = "8-jpeg-search/8-jpeg-search.dd"
subsystem = 'Standard'
def check_for_file(self, sql='1'):
dbh=DB.DBO(self.test_case)
dbh.execute("select inode.inode as inode,type.type,path,name from file,type,inode where file.inode_id=type.inode_id and inode.inode_id=type.inode_id and type.type like '%%JPEG%%' and %s limit 1", sql)
row = dbh.fetch()
if not row: return None
## Check that its a real file:
fsfd = FileSystem.DBFS(self.test_case)
fd = fsfd.open(inode = row['inode'])
data = fd.read()
if len(data) == 0:
raise IOError("Can not read file %s%s (%s) %r" % (row['path'],row['name'], row['inode'], data))
return row
def test01RunScanner(self):
""" Running scanners """
env = pyflagsh.environment(case=self.test_case)
pyflagsh.shell_execv(env=env, command="scan",
argv=["*",'ZipScan', 'TarScan', 'GZScan'])
pyflagsh.shell_execv(env=env, command="scan",
argv=["*",'JPEGCarver', 'ZipScan', 'TarScan', 'GZScan', 'TypeScan', 'IndexScan'])
## The following are just tests against the results:
def test02(self):
""" Did the search results include the alloc\\file1.jpg picture? """
self.assert_(self.check_for_file('path="/alloc/" and name="file1.jpg"'))
def test03(self):
""" Did the search results include the alloc\\file2.dat picture? """
self.assert_(self.check_for_file('path="/alloc/" and name="file2.dat"'))
## We Dont recognise this an a jpeg
def test04(self):
""" Did the search results include the invalid\\file3.jpg file? """
self.assertEqual(None, self.check_for_file('path="/invalid/" and name="file3.dat"'))
## This is recognised as a jpeg even though its not, because of its magic header.
def test05(self):
""" Did the search results include the invalid\\file4.jpg file? """
self.assert_(self.check_for_file('path="/invalid/" and name="file4.jpg"'))
## We Dont recognise this an a jpeg
def test06(self):
""" Did the search results include the invalid\\file5.rtf file? """
self.assertEqual(None, self.check_for_file('path="/invalid/" and name="file5.rtf"'))
def test07(self):
""" Did the search results include the deleted picture in MFT entry #32 (del1/file6.jpg)? """
self.assert_(self.check_for_file('inode.inode like "%K32-128-3%"'))
def test08(self):
""" Did the search results include the deleted picture in MFT entry #31 (del2/file7.jpg)? """
self.assert_(self.check_for_file('inode.inode like "%K31-128-3%"'))
def test09(self):
""" Did the search results include the picture inside of archive\\file8.zip? """
self.assert_(self.check_for_file('path="/archive/file8.zip/"'))
def test10(self):
""" Did the search results include the picture inside of archive\\file9.boo? """
self.assert_(self.check_for_file('path="/archive/file9.boo/"'))
def test11(self):
""" Did the search results include the picture inside of archive\\file10.tar.gz? """
self.assert_(self.check_for_file('path="/archive/file10.tar.gz/file10.tar/"'))
def test12(self):
""" Did the search results include the misc\\file11.dat file? """
self.assert_(self.check_for_file('path="/misc/file11.dat/"'))
def test13(self):
""" Did the search results include the misc\\file12.doc file? """
self.assert_(self.check_for_file('path="/misc/file12.doc/"'))
def test14(self):
""" Did the search results include the misc\\file13.dll:here picture? """
self.assert_(self.check_for_file('path="/misc/" and name="file13.dll:here"'))
| gpl-2.0 |
figarocms/thumbor | thumbor/context.py | 4 | 8337 | #!/usr/bin/python
# -*- coding: utf-8 -*-
# thumbor imaging service
# https://github.com/thumbor/thumbor/wiki
# Licensed under the MIT license:
# http://www.opensource.org/licenses/mit-license
# Copyright (c) 2011 globo.com timehome@corp.globo.com
from os.path import abspath, exists
import tornado
from concurrent.futures import ThreadPoolExecutor, Future
import functools
from thumbor.filters import FiltersFactory
from thumbor.metrics.logger_metrics import Metrics
from thumbor.url import Url
class Context:
'''
Class responsible for containing:
* Server Configuration Parameters (port, ip, key, etc);
* Configurations read from config file (or defaults);
* Importer with imported modules (engine, filters, detectors, etc);
* Request Parameters (width, height, smart, meta, etc).
Each instance of this class MUST be unique per request. This class should not be cached in the server.
'''
def __init__(self, server=None, config=None, importer=None, request_handler=None):
self.server = server
self.config = config
if importer:
self.modules = ContextImporter(self, importer)
if importer.metrics:
self.metrics = importer.metrics(config)
else:
self.metrics = Metrics(config)
else:
self.modules = None
self.metrics = Metrics(config)
self.filters_factory = FiltersFactory(self.modules.filters if self.modules else [])
self.request_handler = request_handler
self.statsd_client = self.metrics # TODO statsd_client is deprecated, remove me on next minor version bump
self.thread_pool = ThreadPool.instance(getattr(config, 'ENGINE_THREADPOOL_SIZE', 0))
self.headers = {}
class ServerParameters(object):
def __init__(self, port, ip, config_path, keyfile, log_level, app_class, fd=None, gifsicle_path=None):
self.port = port
self.ip = ip
self.config_path = config_path
self.keyfile = keyfile
self.log_level = log_level
self.app_class = app_class
self._security_key = None
self.fd = fd
self.load_security_key()
self.gifsicle_path = gifsicle_path
@property
def security_key(self):
return self._security_key
@security_key.setter
def security_key(self, key):
if isinstance(key, unicode):
key = key.encode('utf-8')
self._security_key = key
def load_security_key(self):
if not self.keyfile:
return
path = abspath(self.keyfile)
if not exists(path):
raise ValueError('Could not find security key file at %s. Please verify the keypath argument.' % path)
with open(path, 'r') as f:
security_key = f.read().strip()
self.security_key = security_key
class RequestParameters:
def __init__(self,
debug=False,
meta=False,
trim=None,
crop_left=None,
crop_top=None,
crop_right=None,
crop_bottom=None,
crop=None,
adaptive=False,
full=False,
fit_in=False,
width=0,
height=0,
horizontal_flip=False,
vertical_flip=False,
halign='center',
valign='middle',
filters=None,
smart=False,
quality=80,
image=None,
url=None,
extension=None,
buffer=None,
focal_points=None,
unsafe=False,
hash=None,
accepts_webp=False,
request=None,
max_age=None):
self.debug = bool(debug)
self.meta = bool(meta)
self.trim = trim
if trim is not None:
trim_parts = trim.split(':')
self.trim_pos = trim_parts[1] if len(trim_parts) > 1 else "top-left"
self.trim_tolerance = int(trim_parts[2]) if len(trim_parts) > 2 else 0
if crop is not None:
self.crop = crop
else:
self.crop = {
'left': self.int_or_0(crop_left),
'right': self.int_or_0(crop_right),
'top': self.int_or_0(crop_top),
'bottom': self.int_or_0(crop_bottom)
}
self.should_crop = \
self.crop['left'] > 0 or \
self.crop['top'] > 0 or \
self.crop['right'] > 0 or \
self.crop['bottom'] > 0
self.adaptive = bool(adaptive)
self.full = bool(full)
self.fit_in = bool(fit_in)
self.width = width == "orig" and "orig" or self.int_or_0(width)
self.height = height == "orig" and "orig" or self.int_or_0(height)
self.horizontal_flip = bool(horizontal_flip)
self.vertical_flip = bool(vertical_flip)
self.halign = halign or 'center'
self.valign = valign or 'middle'
self.smart = bool(smart)
if filters is None:
filters = []
self.filters = filters
self.image_url = image
self.url = url
self.detection_error = None
self.quality = quality
self.buffer = None
if focal_points is None:
focal_points = []
self.focal_points = focal_points
self.hash = hash
self.prevent_result_storage = False
self.unsafe = unsafe == 'unsafe' or unsafe is True
self.format = None
self.accepts_webp = accepts_webp
self.max_bytes = None
self.max_age = max_age
if request:
self.url = request.path
self.accepts_webp = 'image/webp' in request.headers.get('Accept', '')
self.image_url = Url.encode_url(self.image_url.encode('utf-8'))
def int_or_0(self, value):
return 0 if value is None else int(value)
class ContextImporter:
def __init__(self, context, importer):
self.context = context
self.importer = importer
self.engine = None
if importer.engine:
self.engine = importer.engine(context)
self.gif_engine = None
if importer.gif_engine:
self.gif_engine = importer.gif_engine(context)
self.storage = None
if importer.storage:
self.storage = importer.storage(context)
self.result_storage = None
if importer.result_storage:
self.result_storage = importer.result_storage(context)
self.upload_photo_storage = None
if importer.upload_photo_storage:
self.upload_photo_storage = importer.upload_photo_storage(context)
self.loader = importer.loader
self.detectors = importer.detectors
self.filters = importer.filters
self.optimizers = importer.optimizers
self.url_signer = importer.url_signer
class ThreadPool(object):
@classmethod
def instance(cls, size):
"""
Cache threadpool since context is
recreated for each request
"""
if not getattr(cls, "_instance", None):
cls._instance = {}
if size not in cls._instance:
cls._instance[size] = ThreadPool(size)
return cls._instance[size]
def __init__(self, thread_pool_size):
if thread_pool_size:
self.pool = ThreadPoolExecutor(thread_pool_size)
else:
self.pool = None
def _execute_in_foreground(self, operation, callback):
result = Future()
result.set_result(operation())
callback(result)
def _execute_in_pool(self, operation, callback):
task = self.pool.submit(operation)
task.add_done_callback(
lambda future: tornado.ioloop.IOLoop.instance().add_callback(
functools.partial(callback, future)
)
)
def queue(self, operation, callback):
if not self.pool:
self._execute_in_foreground(operation, callback)
else:
self._execute_in_pool(operation, callback)
def cleanup(self):
if self.pool:
print "Joining threads...."
self.pool.shutdown()
| mit |
brajput24/fabric-bolt | docs/conf.py | 12 | 8470 | # -*- coding: utf-8 -*-
#
# Fabric Bolt documentation build configuration file, created by
# sphinx-quickstart on Thu Nov 14 16:43:47 2013.
#
# This file is execfile()d with the current directory set to its
# containing dir.
#
# Note that not all possible configuration values are present in this
# autogenerated file.
#
# All configuration values have a default; values that are commented out
# serve to show the default.
import sys
import os
if 'DJANGO_SETTINGS_MODULE' not in os.environ:
os.environ['DJANGO_SETTINGS_MODULE'] = 'fabric_bolt.core.settings.local'
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#sys.path.insert(0, os.path.abspath('.'))
# -- General configuration ------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.rst'
# The encoding of source files.
#source_encoding = 'utf-8-sig'
# The master toctree document.
master_doc = 'index'
# General information about the project.
project = u'Fabric Bolt'
copyright = u'2013, Dan Dietz, Jared Proffitt, Nathaniel Pardington'
# The version info for the project you're documenting, acts as replacement for
# |version| and |release|, also used in various other places throughout the
# built documents.
#
# The short X.Y version.
version = '0.1b1'
# The full version, including alpha/beta/rc tags.
release = '0.1b1'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#language = None
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used:
#today = ''
# Else, today_fmt is used as the format for a strftime call.
#today_fmt = '%B %d, %Y'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['_build']
# The reST default role (used for this markup: `text`) to use for all
# documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
#add_module_names = True
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'sphinx'
# A list of ignored prefixes for module index sorting.
#modindex_common_prefix = []
# If true, keep warnings as "system message" paragraphs in the built documents.
#keep_warnings = False
# -- Options for HTML output ----------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
html_theme = 'default'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#html_theme_options = {}
# Add any paths that contain custom themes here, relative to this directory.
#html_theme_path = []
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Add any extra paths that contain custom files (such as robots.txt or
# .htaccess) here, relative to this directory. These files are copied
# directly to the root of the documentation.
#html_extra_path = []
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
#html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
#html_domain_indices = True
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, links to the reST sources are added to the pages.
#html_show_sourcelink = True
# If true, "Created using Sphinx" is shown in the HTML footer. Default is True.
#html_show_sphinx = True
# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True.
#html_show_copyright = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# This is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = None
# Output file base name for HTML help builder.
htmlhelp_basename = 'FabricBoltdoc'
# -- Options for LaTeX output ---------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#'preamble': '',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
('index', 'FabricBolt.tex', u'Fabric Bolt Documentation',
u'Dan Dietz, Jared Proffitt, Nathaniel Pardington', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# If true, show page references after internal links.
#latex_show_pagerefs = False
# If true, show URL addresses after external links.
#latex_show_urls = False
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
#latex_domain_indices = True
# -- Options for manual page output ---------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
('index', 'fabricbolt', u'Fabric Bolt Documentation',
[u'Dan Dietz, Jared Proffitt, Nathaniel Pardington'], 1)
]
# If true, show URL addresses after external links.
#man_show_urls = False
# -- Options for Texinfo output -------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
('index', 'FabricBolt', u'Fabric Bolt Documentation',
u'Dan Dietz, Jared Proffitt, Nathaniel Pardington', 'FabricBolt', 'One line description of project.',
'Miscellaneous'),
]
# Documents to append as an appendix to all manuals.
#texinfo_appendices = []
# If false, no module index is generated.
#texinfo_domain_indices = True
# How to display URL addresses: 'footnote', 'no', or 'inline'.
#texinfo_show_urls = 'footnote'
# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False
| mit |
cyberphox/MissionPlanner | Lib/encodings/mac_centeuro.py | 93 | 14665 | """ Python Character Mapping Codec mac_centeuro generated from 'MAPPINGS/VENDORS/APPLE/CENTEURO.TXT' with gencodec.py.
"""#"
import codecs
### Codec APIs
class Codec(codecs.Codec):
def encode(self,input,errors='strict'):
return codecs.charmap_encode(input,errors,encoding_table)
def decode(self,input,errors='strict'):
return codecs.charmap_decode(input,errors,decoding_table)
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.charmap_encode(input,self.errors,encoding_table)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
return codecs.charmap_decode(input,self.errors,decoding_table)[0]
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='mac-centeuro',
encode=Codec().encode,
decode=Codec().decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamreader=StreamReader,
streamwriter=StreamWriter,
)
### Decoding Table
decoding_table = (
u'\x00' # 0x00 -> CONTROL CHARACTER
u'\x01' # 0x01 -> CONTROL CHARACTER
u'\x02' # 0x02 -> CONTROL CHARACTER
u'\x03' # 0x03 -> CONTROL CHARACTER
u'\x04' # 0x04 -> CONTROL CHARACTER
u'\x05' # 0x05 -> CONTROL CHARACTER
u'\x06' # 0x06 -> CONTROL CHARACTER
u'\x07' # 0x07 -> CONTROL CHARACTER
u'\x08' # 0x08 -> CONTROL CHARACTER
u'\t' # 0x09 -> CONTROL CHARACTER
u'\n' # 0x0A -> CONTROL CHARACTER
u'\x0b' # 0x0B -> CONTROL CHARACTER
u'\x0c' # 0x0C -> CONTROL CHARACTER
u'\r' # 0x0D -> CONTROL CHARACTER
u'\x0e' # 0x0E -> CONTROL CHARACTER
u'\x0f' # 0x0F -> CONTROL CHARACTER
u'\x10' # 0x10 -> CONTROL CHARACTER
u'\x11' # 0x11 -> CONTROL CHARACTER
u'\x12' # 0x12 -> CONTROL CHARACTER
u'\x13' # 0x13 -> CONTROL CHARACTER
u'\x14' # 0x14 -> CONTROL CHARACTER
u'\x15' # 0x15 -> CONTROL CHARACTER
u'\x16' # 0x16 -> CONTROL CHARACTER
u'\x17' # 0x17 -> CONTROL CHARACTER
u'\x18' # 0x18 -> CONTROL CHARACTER
u'\x19' # 0x19 -> CONTROL CHARACTER
u'\x1a' # 0x1A -> CONTROL CHARACTER
u'\x1b' # 0x1B -> CONTROL CHARACTER
u'\x1c' # 0x1C -> CONTROL CHARACTER
u'\x1d' # 0x1D -> CONTROL CHARACTER
u'\x1e' # 0x1E -> CONTROL CHARACTER
u'\x1f' # 0x1F -> CONTROL CHARACTER
u' ' # 0x20 -> SPACE
u'!' # 0x21 -> EXCLAMATION MARK
u'"' # 0x22 -> QUOTATION MARK
u'#' # 0x23 -> NUMBER SIGN
u'$' # 0x24 -> DOLLAR SIGN
u'%' # 0x25 -> PERCENT SIGN
u'&' # 0x26 -> AMPERSAND
u"'" # 0x27 -> APOSTROPHE
u'(' # 0x28 -> LEFT PARENTHESIS
u')' # 0x29 -> RIGHT PARENTHESIS
u'*' # 0x2A -> ASTERISK
u'+' # 0x2B -> PLUS SIGN
u',' # 0x2C -> COMMA
u'-' # 0x2D -> HYPHEN-MINUS
u'.' # 0x2E -> FULL STOP
u'/' # 0x2F -> SOLIDUS
u'0' # 0x30 -> DIGIT ZERO
u'1' # 0x31 -> DIGIT ONE
u'2' # 0x32 -> DIGIT TWO
u'3' # 0x33 -> DIGIT THREE
u'4' # 0x34 -> DIGIT FOUR
u'5' # 0x35 -> DIGIT FIVE
u'6' # 0x36 -> DIGIT SIX
u'7' # 0x37 -> DIGIT SEVEN
u'8' # 0x38 -> DIGIT EIGHT
u'9' # 0x39 -> DIGIT NINE
u':' # 0x3A -> COLON
u';' # 0x3B -> SEMICOLON
u'<' # 0x3C -> LESS-THAN SIGN
u'=' # 0x3D -> EQUALS SIGN
u'>' # 0x3E -> GREATER-THAN SIGN
u'?' # 0x3F -> QUESTION MARK
u'@' # 0x40 -> COMMERCIAL AT
u'A' # 0x41 -> LATIN CAPITAL LETTER A
u'B' # 0x42 -> LATIN CAPITAL LETTER B
u'C' # 0x43 -> LATIN CAPITAL LETTER C
u'D' # 0x44 -> LATIN CAPITAL LETTER D
u'E' # 0x45 -> LATIN CAPITAL LETTER E
u'F' # 0x46 -> LATIN CAPITAL LETTER F
u'G' # 0x47 -> LATIN CAPITAL LETTER G
u'H' # 0x48 -> LATIN CAPITAL LETTER H
u'I' # 0x49 -> LATIN CAPITAL LETTER I
u'J' # 0x4A -> LATIN CAPITAL LETTER J
u'K' # 0x4B -> LATIN CAPITAL LETTER K
u'L' # 0x4C -> LATIN CAPITAL LETTER L
u'M' # 0x4D -> LATIN CAPITAL LETTER M
u'N' # 0x4E -> LATIN CAPITAL LETTER N
u'O' # 0x4F -> LATIN CAPITAL LETTER O
u'P' # 0x50 -> LATIN CAPITAL LETTER P
u'Q' # 0x51 -> LATIN CAPITAL LETTER Q
u'R' # 0x52 -> LATIN CAPITAL LETTER R
u'S' # 0x53 -> LATIN CAPITAL LETTER S
u'T' # 0x54 -> LATIN CAPITAL LETTER T
u'U' # 0x55 -> LATIN CAPITAL LETTER U
u'V' # 0x56 -> LATIN CAPITAL LETTER V
u'W' # 0x57 -> LATIN CAPITAL LETTER W
u'X' # 0x58 -> LATIN CAPITAL LETTER X
u'Y' # 0x59 -> LATIN CAPITAL LETTER Y
u'Z' # 0x5A -> LATIN CAPITAL LETTER Z
u'[' # 0x5B -> LEFT SQUARE BRACKET
u'\\' # 0x5C -> REVERSE SOLIDUS
u']' # 0x5D -> RIGHT SQUARE BRACKET
u'^' # 0x5E -> CIRCUMFLEX ACCENT
u'_' # 0x5F -> LOW LINE
u'`' # 0x60 -> GRAVE ACCENT
u'a' # 0x61 -> LATIN SMALL LETTER A
u'b' # 0x62 -> LATIN SMALL LETTER B
u'c' # 0x63 -> LATIN SMALL LETTER C
u'd' # 0x64 -> LATIN SMALL LETTER D
u'e' # 0x65 -> LATIN SMALL LETTER E
u'f' # 0x66 -> LATIN SMALL LETTER F
u'g' # 0x67 -> LATIN SMALL LETTER G
u'h' # 0x68 -> LATIN SMALL LETTER H
u'i' # 0x69 -> LATIN SMALL LETTER I
u'j' # 0x6A -> LATIN SMALL LETTER J
u'k' # 0x6B -> LATIN SMALL LETTER K
u'l' # 0x6C -> LATIN SMALL LETTER L
u'm' # 0x6D -> LATIN SMALL LETTER M
u'n' # 0x6E -> LATIN SMALL LETTER N
u'o' # 0x6F -> LATIN SMALL LETTER O
u'p' # 0x70 -> LATIN SMALL LETTER P
u'q' # 0x71 -> LATIN SMALL LETTER Q
u'r' # 0x72 -> LATIN SMALL LETTER R
u's' # 0x73 -> LATIN SMALL LETTER S
u't' # 0x74 -> LATIN SMALL LETTER T
u'u' # 0x75 -> LATIN SMALL LETTER U
u'v' # 0x76 -> LATIN SMALL LETTER V
u'w' # 0x77 -> LATIN SMALL LETTER W
u'x' # 0x78 -> LATIN SMALL LETTER X
u'y' # 0x79 -> LATIN SMALL LETTER Y
u'z' # 0x7A -> LATIN SMALL LETTER Z
u'{' # 0x7B -> LEFT CURLY BRACKET
u'|' # 0x7C -> VERTICAL LINE
u'}' # 0x7D -> RIGHT CURLY BRACKET
u'~' # 0x7E -> TILDE
u'\x7f' # 0x7F -> CONTROL CHARACTER
u'\xc4' # 0x80 -> LATIN CAPITAL LETTER A WITH DIAERESIS
u'\u0100' # 0x81 -> LATIN CAPITAL LETTER A WITH MACRON
u'\u0101' # 0x82 -> LATIN SMALL LETTER A WITH MACRON
u'\xc9' # 0x83 -> LATIN CAPITAL LETTER E WITH ACUTE
u'\u0104' # 0x84 -> LATIN CAPITAL LETTER A WITH OGONEK
u'\xd6' # 0x85 -> LATIN CAPITAL LETTER O WITH DIAERESIS
u'\xdc' # 0x86 -> LATIN CAPITAL LETTER U WITH DIAERESIS
u'\xe1' # 0x87 -> LATIN SMALL LETTER A WITH ACUTE
u'\u0105' # 0x88 -> LATIN SMALL LETTER A WITH OGONEK
u'\u010c' # 0x89 -> LATIN CAPITAL LETTER C WITH CARON
u'\xe4' # 0x8A -> LATIN SMALL LETTER A WITH DIAERESIS
u'\u010d' # 0x8B -> LATIN SMALL LETTER C WITH CARON
u'\u0106' # 0x8C -> LATIN CAPITAL LETTER C WITH ACUTE
u'\u0107' # 0x8D -> LATIN SMALL LETTER C WITH ACUTE
u'\xe9' # 0x8E -> LATIN SMALL LETTER E WITH ACUTE
u'\u0179' # 0x8F -> LATIN CAPITAL LETTER Z WITH ACUTE
u'\u017a' # 0x90 -> LATIN SMALL LETTER Z WITH ACUTE
u'\u010e' # 0x91 -> LATIN CAPITAL LETTER D WITH CARON
u'\xed' # 0x92 -> LATIN SMALL LETTER I WITH ACUTE
u'\u010f' # 0x93 -> LATIN SMALL LETTER D WITH CARON
u'\u0112' # 0x94 -> LATIN CAPITAL LETTER E WITH MACRON
u'\u0113' # 0x95 -> LATIN SMALL LETTER E WITH MACRON
u'\u0116' # 0x96 -> LATIN CAPITAL LETTER E WITH DOT ABOVE
u'\xf3' # 0x97 -> LATIN SMALL LETTER O WITH ACUTE
u'\u0117' # 0x98 -> LATIN SMALL LETTER E WITH DOT ABOVE
u'\xf4' # 0x99 -> LATIN SMALL LETTER O WITH CIRCUMFLEX
u'\xf6' # 0x9A -> LATIN SMALL LETTER O WITH DIAERESIS
u'\xf5' # 0x9B -> LATIN SMALL LETTER O WITH TILDE
u'\xfa' # 0x9C -> LATIN SMALL LETTER U WITH ACUTE
u'\u011a' # 0x9D -> LATIN CAPITAL LETTER E WITH CARON
u'\u011b' # 0x9E -> LATIN SMALL LETTER E WITH CARON
u'\xfc' # 0x9F -> LATIN SMALL LETTER U WITH DIAERESIS
u'\u2020' # 0xA0 -> DAGGER
u'\xb0' # 0xA1 -> DEGREE SIGN
u'\u0118' # 0xA2 -> LATIN CAPITAL LETTER E WITH OGONEK
u'\xa3' # 0xA3 -> POUND SIGN
u'\xa7' # 0xA4 -> SECTION SIGN
u'\u2022' # 0xA5 -> BULLET
u'\xb6' # 0xA6 -> PILCROW SIGN
u'\xdf' # 0xA7 -> LATIN SMALL LETTER SHARP S
u'\xae' # 0xA8 -> REGISTERED SIGN
u'\xa9' # 0xA9 -> COPYRIGHT SIGN
u'\u2122' # 0xAA -> TRADE MARK SIGN
u'\u0119' # 0xAB -> LATIN SMALL LETTER E WITH OGONEK
u'\xa8' # 0xAC -> DIAERESIS
u'\u2260' # 0xAD -> NOT EQUAL TO
u'\u0123' # 0xAE -> LATIN SMALL LETTER G WITH CEDILLA
u'\u012e' # 0xAF -> LATIN CAPITAL LETTER I WITH OGONEK
u'\u012f' # 0xB0 -> LATIN SMALL LETTER I WITH OGONEK
u'\u012a' # 0xB1 -> LATIN CAPITAL LETTER I WITH MACRON
u'\u2264' # 0xB2 -> LESS-THAN OR EQUAL TO
u'\u2265' # 0xB3 -> GREATER-THAN OR EQUAL TO
u'\u012b' # 0xB4 -> LATIN SMALL LETTER I WITH MACRON
u'\u0136' # 0xB5 -> LATIN CAPITAL LETTER K WITH CEDILLA
u'\u2202' # 0xB6 -> PARTIAL DIFFERENTIAL
u'\u2211' # 0xB7 -> N-ARY SUMMATION
u'\u0142' # 0xB8 -> LATIN SMALL LETTER L WITH STROKE
u'\u013b' # 0xB9 -> LATIN CAPITAL LETTER L WITH CEDILLA
u'\u013c' # 0xBA -> LATIN SMALL LETTER L WITH CEDILLA
u'\u013d' # 0xBB -> LATIN CAPITAL LETTER L WITH CARON
u'\u013e' # 0xBC -> LATIN SMALL LETTER L WITH CARON
u'\u0139' # 0xBD -> LATIN CAPITAL LETTER L WITH ACUTE
u'\u013a' # 0xBE -> LATIN SMALL LETTER L WITH ACUTE
u'\u0145' # 0xBF -> LATIN CAPITAL LETTER N WITH CEDILLA
u'\u0146' # 0xC0 -> LATIN SMALL LETTER N WITH CEDILLA
u'\u0143' # 0xC1 -> LATIN CAPITAL LETTER N WITH ACUTE
u'\xac' # 0xC2 -> NOT SIGN
u'\u221a' # 0xC3 -> SQUARE ROOT
u'\u0144' # 0xC4 -> LATIN SMALL LETTER N WITH ACUTE
u'\u0147' # 0xC5 -> LATIN CAPITAL LETTER N WITH CARON
u'\u2206' # 0xC6 -> INCREMENT
u'\xab' # 0xC7 -> LEFT-POINTING DOUBLE ANGLE QUOTATION MARK
u'\xbb' # 0xC8 -> RIGHT-POINTING DOUBLE ANGLE QUOTATION MARK
u'\u2026' # 0xC9 -> HORIZONTAL ELLIPSIS
u'\xa0' # 0xCA -> NO-BREAK SPACE
u'\u0148' # 0xCB -> LATIN SMALL LETTER N WITH CARON
u'\u0150' # 0xCC -> LATIN CAPITAL LETTER O WITH DOUBLE ACUTE
u'\xd5' # 0xCD -> LATIN CAPITAL LETTER O WITH TILDE
u'\u0151' # 0xCE -> LATIN SMALL LETTER O WITH DOUBLE ACUTE
u'\u014c' # 0xCF -> LATIN CAPITAL LETTER O WITH MACRON
u'\u2013' # 0xD0 -> EN DASH
u'\u2014' # 0xD1 -> EM DASH
u'\u201c' # 0xD2 -> LEFT DOUBLE QUOTATION MARK
u'\u201d' # 0xD3 -> RIGHT DOUBLE QUOTATION MARK
u'\u2018' # 0xD4 -> LEFT SINGLE QUOTATION MARK
u'\u2019' # 0xD5 -> RIGHT SINGLE QUOTATION MARK
u'\xf7' # 0xD6 -> DIVISION SIGN
u'\u25ca' # 0xD7 -> LOZENGE
u'\u014d' # 0xD8 -> LATIN SMALL LETTER O WITH MACRON
u'\u0154' # 0xD9 -> LATIN CAPITAL LETTER R WITH ACUTE
u'\u0155' # 0xDA -> LATIN SMALL LETTER R WITH ACUTE
u'\u0158' # 0xDB -> LATIN CAPITAL LETTER R WITH CARON
u'\u2039' # 0xDC -> SINGLE LEFT-POINTING ANGLE QUOTATION MARK
u'\u203a' # 0xDD -> SINGLE RIGHT-POINTING ANGLE QUOTATION MARK
u'\u0159' # 0xDE -> LATIN SMALL LETTER R WITH CARON
u'\u0156' # 0xDF -> LATIN CAPITAL LETTER R WITH CEDILLA
u'\u0157' # 0xE0 -> LATIN SMALL LETTER R WITH CEDILLA
u'\u0160' # 0xE1 -> LATIN CAPITAL LETTER S WITH CARON
u'\u201a' # 0xE2 -> SINGLE LOW-9 QUOTATION MARK
u'\u201e' # 0xE3 -> DOUBLE LOW-9 QUOTATION MARK
u'\u0161' # 0xE4 -> LATIN SMALL LETTER S WITH CARON
u'\u015a' # 0xE5 -> LATIN CAPITAL LETTER S WITH ACUTE
u'\u015b' # 0xE6 -> LATIN SMALL LETTER S WITH ACUTE
u'\xc1' # 0xE7 -> LATIN CAPITAL LETTER A WITH ACUTE
u'\u0164' # 0xE8 -> LATIN CAPITAL LETTER T WITH CARON
u'\u0165' # 0xE9 -> LATIN SMALL LETTER T WITH CARON
u'\xcd' # 0xEA -> LATIN CAPITAL LETTER I WITH ACUTE
u'\u017d' # 0xEB -> LATIN CAPITAL LETTER Z WITH CARON
u'\u017e' # 0xEC -> LATIN SMALL LETTER Z WITH CARON
u'\u016a' # 0xED -> LATIN CAPITAL LETTER U WITH MACRON
u'\xd3' # 0xEE -> LATIN CAPITAL LETTER O WITH ACUTE
u'\xd4' # 0xEF -> LATIN CAPITAL LETTER O WITH CIRCUMFLEX
u'\u016b' # 0xF0 -> LATIN SMALL LETTER U WITH MACRON
u'\u016e' # 0xF1 -> LATIN CAPITAL LETTER U WITH RING ABOVE
u'\xda' # 0xF2 -> LATIN CAPITAL LETTER U WITH ACUTE
u'\u016f' # 0xF3 -> LATIN SMALL LETTER U WITH RING ABOVE
u'\u0170' # 0xF4 -> LATIN CAPITAL LETTER U WITH DOUBLE ACUTE
u'\u0171' # 0xF5 -> LATIN SMALL LETTER U WITH DOUBLE ACUTE
u'\u0172' # 0xF6 -> LATIN CAPITAL LETTER U WITH OGONEK
u'\u0173' # 0xF7 -> LATIN SMALL LETTER U WITH OGONEK
u'\xdd' # 0xF8 -> LATIN CAPITAL LETTER Y WITH ACUTE
u'\xfd' # 0xF9 -> LATIN SMALL LETTER Y WITH ACUTE
u'\u0137' # 0xFA -> LATIN SMALL LETTER K WITH CEDILLA
u'\u017b' # 0xFB -> LATIN CAPITAL LETTER Z WITH DOT ABOVE
u'\u0141' # 0xFC -> LATIN CAPITAL LETTER L WITH STROKE
u'\u017c' # 0xFD -> LATIN SMALL LETTER Z WITH DOT ABOVE
u'\u0122' # 0xFE -> LATIN CAPITAL LETTER G WITH CEDILLA
u'\u02c7' # 0xFF -> CARON
)
### Encoding table
encoding_table=codecs.charmap_build(decoding_table)
| gpl-3.0 |
nikesh-mahalka/cinder | cinder/volume/drivers/san/hp/hp_lefthand_cliq_proxy.py | 12 | 19099 | # (c) Copyright 2014 Hewlett-Packard Development Company, L.P.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
"""
HP LeftHand SAN ISCSI Driver.
The driver communicates to the backend aka Cliq via SSH to perform all the
operations on the SAN.
"""
from lxml import etree
from oslo_concurrency import processutils
from oslo_log import log as logging
from oslo_utils import units
from cinder import exception
from cinder.i18n import _, _LE, _LW
from cinder.volume.drivers.san import san
LOG = logging.getLogger(__name__)
class HPLeftHandCLIQProxy(san.SanISCSIDriver):
"""Executes commands relating to HP/LeftHand SAN ISCSI volumes.
We use the CLIQ interface, over SSH.
Rough overview of CLIQ commands used:
:createVolume: (creates the volume)
:deleteVolume: (deletes the volume)
:modifyVolume: (extends the volume)
:createSnapshot: (creates the snapshot)
:deleteSnapshot: (deletes the snapshot)
:cloneSnapshot: (creates the volume from a snapshot)
:getVolumeInfo: (to discover the IQN etc)
:getSnapshotInfo: (to discover the IQN etc)
:getClusterInfo: (to discover the iSCSI target IP address)
The 'trick' here is that the HP SAN enforces security by default, so
normally a volume mount would need both to configure the SAN in the volume
layer and do the mount on the compute layer. Multi-layer operations are
not catered for at the moment in the cinder architecture, so instead we
share the volume using CHAP at volume creation time. Then the mount need
only use those CHAP credentials, so can take place exclusively in the
compute layer.
Version history:
1.0.0 - Initial driver
1.1.0 - Added create/delete snapshot, extend volume, create volume
from snapshot support.
1.2.0 - Ported into the new HP LeftHand driver.
1.2.1 - Fixed bug #1279897, HP LeftHand CLIQ proxy may return incorrect
capacity values.
1.2.2 - Fixed driver with Paramiko 1.13.0, bug #1298608.
"""
VERSION = "1.2.2"
device_stats = {}
def __init__(self, *args, **kwargs):
super(HPLeftHandCLIQProxy, self).__init__(*args, **kwargs)
self.cluster_vip = None
LOG.warning(_LW('The HPLeftHandISCSIDriver CLIQ driver has been '
'DEPRECATED as of the 2015.2 release. This driver '
'will be removed in the 2016.1 release. Please use '
'the HPLeftHandISCSIDriver REST based driver '
'instead.'))
def do_setup(self, context):
pass
def check_for_setup_error(self):
pass
def get_version_string(self):
return (_('CLIQ %(proxy_ver)s') % {'proxy_ver': self.VERSION})
def _cliq_run(self, verb, cliq_args, check_exit_code=True):
"""Runs a CLIQ command over SSH, without doing any result parsing."""
cmd_list = [verb]
for k, v in cliq_args.items():
cmd_list.append("%s=%s" % (k, v))
return self._run_ssh(cmd_list, check_exit_code)
def _cliq_run_xml(self, verb, cliq_args, check_cliq_result=True):
"""Runs a CLIQ command over SSH, parsing and checking the output."""
cliq_args['output'] = 'XML'
(out, _err) = self._cliq_run(verb, cliq_args, check_cliq_result)
LOG.debug("CLIQ command returned %s", out)
result_xml = etree.fromstring(out.encode('utf8'))
if check_cliq_result:
response_node = result_xml.find("response")
if response_node is None:
msg = (_("Malformed response to CLIQ command "
"%(verb)s %(cliq_args)s. Result=%(out)s") %
{'verb': verb, 'cliq_args': cliq_args, 'out': out})
raise exception.VolumeBackendAPIException(data=msg)
result_code = response_node.attrib.get("result")
if result_code != "0":
msg = (_("Error running CLIQ command %(verb)s %(cliq_args)s. "
" Result=%(out)s") %
{'verb': verb, 'cliq_args': cliq_args, 'out': out})
raise exception.VolumeBackendAPIException(data=msg)
return result_xml
def _cliq_get_cluster_info(self, cluster_name):
"""Queries for info about the cluster (including IP)."""
cliq_args = {}
cliq_args['clusterName'] = cluster_name
cliq_args['searchDepth'] = '1'
cliq_args['verbose'] = '0'
result_xml = self._cliq_run_xml("getClusterInfo", cliq_args)
return result_xml
def _cliq_get_cluster_vip(self, cluster_name):
"""Gets the IP on which a cluster shares iSCSI volumes."""
cluster_xml = self._cliq_get_cluster_info(cluster_name)
vips = []
for vip in cluster_xml.findall("response/cluster/vip"):
vips.append(vip.attrib.get('ipAddress'))
if len(vips) == 1:
return vips[0]
_xml = etree.tostring(cluster_xml)
msg = (_("Unexpected number of virtual ips for cluster "
" %(cluster_name)s. Result=%(_xml)s") %
{'cluster_name': cluster_name, '_xml': _xml})
raise exception.VolumeBackendAPIException(data=msg)
def _cliq_get_volume_info(self, volume_name):
"""Gets the volume info, including IQN."""
cliq_args = {}
cliq_args['volumeName'] = volume_name
result_xml = self._cliq_run_xml("getVolumeInfo", cliq_args)
# Result looks like this:
# <gauche version="1.0">
# <response description="Operation succeeded." name="CliqSuccess"
# processingTime="87" result="0">
# <volume autogrowPages="4" availability="online" blockSize="1024"
# bytesWritten="0" checkSum="false" clusterName="Cluster01"
# created="2011-02-08T19:56:53Z" deleting="false" description=""
# groupName="Group01" initialQuota="536870912" isPrimary="true"
# iscsiIqn="iqn.2003-10.com.lefthandnetworks:group01:25366:vol-b"
# maxSize="6865387257856" md5="9fa5c8b2cca54b2948a63d833097e1ca"
# minReplication="1" name="vol-b" parity="0" replication="2"
# reserveQuota="536870912" scratchQuota="4194304"
# serialNumber="9fa5c8b2cca54b2948a63d833097e1ca0000000000006316"
# size="1073741824" stridePages="32" thinProvision="true">
# <status description="OK" value="2"/>
# <permission access="rw"
# authGroup="api-34281B815713B78-(trimmed)51ADD4B7030853AA7"
# chapName="chapusername" chapRequired="true" id="25369"
# initiatorSecret="" iqn="" iscsiEnabled="true"
# loadBalance="true" targetSecret="supersecret"/>
# </volume>
# </response>
# </gauche>
# Flatten the nodes into a dictionary; use prefixes to avoid collisions
volume_attributes = {}
volume_node = result_xml.find("response/volume")
for k, v in volume_node.attrib.items():
volume_attributes["volume." + k] = v
status_node = volume_node.find("status")
if status_node is not None:
for k, v in status_node.attrib.items():
volume_attributes["status." + k] = v
# We only consider the first permission node
permission_node = volume_node.find("permission")
if permission_node is not None:
for k, v in status_node.attrib.items():
volume_attributes["permission." + k] = v
LOG.debug("Volume info: %(volume_name)s => %(volume_attributes)s",
{'volume_name': volume_name,
'volume_attributes': volume_attributes})
return volume_attributes
def _cliq_get_snapshot_info(self, snapshot_name):
"""Gets the snapshot info, including IQN."""
cliq_args = {}
cliq_args['snapshotName'] = snapshot_name
result_xml = self._cliq_run_xml("getSnapshotInfo", cliq_args)
# Result looks like this:
# <gauche version="1.0">
# <response description="Operation succeeded." name="CliqSuccess"
# processingTime="87" result="0">
# <snapshot applicationManaged="false" autogrowPages="32768"
# automatic="false" availability="online" bytesWritten="0"
# clusterName="CloudCluster1" created="2013-08-26T07:03:44Z"
# deleting="false" description="" groupName="CloudMgmtGroup1"
# id="730" initialQuota="536870912" isPrimary="true"
# iscsiIqn="iqn.2003-10.com.lefthandnetworks:cloudmgmtgroup1:73"
# md5="a64b4f850539c07fb5ce3cee5db1fcce" minReplication="1"
# name="snapshot-7849288e-e5e8-42cb-9687-9af5355d674b"
# replication="2" reserveQuota="536870912" scheduleId="0"
# scratchQuota="4194304" scratchWritten="0"
# serialNumber="a64b4f850539c07fb5ce3cee5db1fcce00000000000002da"
# size="2147483648" stridePages="32"
# volumeSerial="a64b4f850539c07fb5ce3cee5db1fcce00000000000002d">
# <status description="OK" value="2"/>
# <permission access="rw"
# authGroup="api-34281B815713B78-(trimmed)51ADD4B7030853AA7"
# chapName="chapusername" chapRequired="true" id="25369"
# initiatorSecret="" iqn="" iscsiEnabled="true"
# loadBalance="true" targetSecret="supersecret"/>
# </snapshot>
# </response>
# </gauche>
# Flatten the nodes into a dictionary; use prefixes to avoid collisions
snapshot_attributes = {}
snapshot_node = result_xml.find("response/snapshot")
for k, v in snapshot_node.attrib.items():
snapshot_attributes["snapshot." + k] = v
status_node = snapshot_node.find("status")
if status_node is not None:
for k, v in status_node.attrib.items():
snapshot_attributes["status." + k] = v
# We only consider the first permission node
permission_node = snapshot_node.find("permission")
if permission_node is not None:
for k, v in status_node.attrib.items():
snapshot_attributes["permission." + k] = v
LOG.debug("Snapshot info: %(name)s => %(attributes)s",
{'name': snapshot_name, 'attributes': snapshot_attributes})
return snapshot_attributes
def create_volume(self, volume):
"""Creates a volume."""
cliq_args = {}
cliq_args['clusterName'] = self.configuration.san_clustername
if self.configuration.san_thin_provision:
cliq_args['thinProvision'] = '1'
else:
cliq_args['thinProvision'] = '0'
cliq_args['volumeName'] = volume['name']
cliq_args['size'] = '%sGB' % volume['size']
self._cliq_run_xml("createVolume", cliq_args)
return self._get_model_update(volume['name'])
def extend_volume(self, volume, new_size):
"""Extend the size of an existing volume."""
cliq_args = {}
cliq_args['volumeName'] = volume['name']
cliq_args['size'] = '%sGB' % new_size
self._cliq_run_xml("modifyVolume", cliq_args)
def create_volume_from_snapshot(self, volume, snapshot):
"""Creates a volume from a snapshot."""
cliq_args = {}
cliq_args['snapshotName'] = snapshot['name']
cliq_args['volumeName'] = volume['name']
self._cliq_run_xml("cloneSnapshot", cliq_args)
return self._get_model_update(volume['name'])
def create_snapshot(self, snapshot):
"""Creates a snapshot."""
cliq_args = {}
cliq_args['snapshotName'] = snapshot['name']
cliq_args['volumeName'] = snapshot['volume_name']
cliq_args['inheritAccess'] = 1
self._cliq_run_xml("createSnapshot", cliq_args)
def delete_volume(self, volume):
"""Deletes a volume."""
cliq_args = {}
cliq_args['volumeName'] = volume['name']
cliq_args['prompt'] = 'false' # Don't confirm
try:
self._cliq_get_volume_info(volume['name'])
except processutils.ProcessExecutionError:
LOG.error(_LE("Volume did not exist. It will not be deleted"))
return
self._cliq_run_xml("deleteVolume", cliq_args)
def delete_snapshot(self, snapshot):
"""Deletes a snapshot."""
cliq_args = {}
cliq_args['snapshotName'] = snapshot['name']
cliq_args['prompt'] = 'false' # Don't confirm
try:
self._cliq_get_snapshot_info(snapshot['name'])
except processutils.ProcessExecutionError:
LOG.error(_LE("Snapshot did not exist. It will not be deleted"))
return
try:
self._cliq_run_xml("deleteSnapshot", cliq_args)
except Exception as ex:
in_use_msg = 'cannot be deleted because it is a clone point'
if in_use_msg in ex.message:
raise exception.SnapshotIsBusy(ex)
raise exception.VolumeBackendAPIException(ex)
def local_path(self, volume):
msg = _("local_path not supported")
raise exception.VolumeBackendAPIException(data=msg)
def initialize_connection(self, volume, connector):
"""Assigns the volume to a server.
Assign any created volume to a compute node/host so that it can be
used from that host. HP VSA requires a volume to be assigned
to a server.
This driver returns a driver_volume_type of 'iscsi'.
The format of the driver data is defined in _get_iscsi_properties.
Example return value:
{
'driver_volume_type': 'iscsi'
'data': {
'target_discovered': True,
'target_iqn': 'iqn.2010-10.org.openstack:volume-00000001',
'target_protal': '127.0.0.1:3260',
'volume_id': 1,
}
}
"""
self._create_server(connector)
cliq_args = {}
cliq_args['volumeName'] = volume['name']
cliq_args['serverName'] = connector['host']
self._cliq_run_xml("assignVolumeToServer", cliq_args)
iscsi_data = self._get_iscsi_properties(volume)
return {
'driver_volume_type': 'iscsi',
'data': iscsi_data
}
def _create_server(self, connector):
cliq_args = {}
cliq_args['serverName'] = connector['host']
out = self._cliq_run_xml("getServerInfo", cliq_args, False)
response = out.find("response")
result = response.attrib.get("result")
if result != '0':
cliq_args = {}
cliq_args['serverName'] = connector['host']
cliq_args['initiator'] = connector['initiator']
self._cliq_run_xml("createServer", cliq_args)
def _get_model_update(self, volume_name):
volume_info = self._cliq_get_volume_info(volume_name)
cluster_name = volume_info['volume.clusterName']
iscsi_iqn = volume_info['volume.iscsiIqn']
# TODO(justinsb): Is this always 1? Does it matter?
cluster_interface = '1'
if not self.cluster_vip:
self.cluster_vip = self._cliq_get_cluster_vip(cluster_name)
iscsi_portal = self.cluster_vip + ":3260," + cluster_interface
model_update = {}
# NOTE(jdg): LH volumes always at lun 0 ?
model_update['provider_location'] = ("%s %s %s" %
(iscsi_portal,
iscsi_iqn,
0))
return model_update
def terminate_connection(self, volume, connector, **kwargs):
"""Unassign the volume from the host."""
cliq_args = {}
cliq_args['volumeName'] = volume['name']
cliq_args['serverName'] = connector['host']
self._cliq_run_xml("unassignVolumeToServer", cliq_args)
def get_volume_stats(self, refresh=False):
if refresh:
self._update_backend_status()
return self.device_stats
def _update_backend_status(self):
data = {}
backend_name = self.configuration.safe_get('volume_backend_name')
data['volume_backend_name'] = backend_name or self.__class__.__name__
data['reserved_percentage'] = 0
data['storage_protocol'] = 'iSCSI'
data['vendor_name'] = 'Hewlett-Packard'
result_xml = self._cliq_run_xml(
"getClusterInfo", {
'searchDepth': 1,
'clusterName': self.configuration.san_clustername})
cluster_node = result_xml.find("response/cluster")
total_capacity = cluster_node.attrib.get("spaceTotal")
free_capacity = cluster_node.attrib.get("unprovisionedSpace")
GB = units.Gi
data['total_capacity_gb'] = int(total_capacity) / GB
data['free_capacity_gb'] = int(free_capacity) / GB
self.device_stats = data
def create_cloned_volume(self, volume, src_vref):
raise NotImplementedError()
def create_export(self, context, volume, connector):
pass
def ensure_export(self, context, volume):
pass
def remove_export(self, context, volume):
pass
def retype(self, context, volume, new_type, diff, host):
"""Convert the volume to be of the new type.
Returns a boolean indicating whether the retype occurred.
:param ctxt: Context
:param volume: A dictionary describing the volume to migrate
:param new_type: A dictionary describing the volume type to convert to
:param diff: A dictionary with the difference between the two types
"""
return False
def migrate_volume(self, ctxt, volume, host):
"""Migrate the volume to the specified host.
Returns a boolean indicating whether the migration occurred, as well as
model_update.
:param ctxt: Context
:param volume: A dictionary describing the volume to migrate
:param host: A dictionary describing the host to migrate to, where
host['host'] is its name, and host['capabilities'] is a
dictionary of its reported capabilities.
"""
return (False, None)
| apache-2.0 |
tusharmakkar08/Diamond | src/collectors/ntp/test/testntp.py | 15 | 3021 | #!/usr/bin/python
# coding=utf-8
##########################################################################
from test import CollectorTestCase
from test import get_collector_config
from test import unittest
from mock import Mock
from mock import patch
from diamond.collector import Collector
from ntp import NtpCollector
##########################################################################
class TestNtpCollector(CollectorTestCase):
def setUp(self):
config = get_collector_config('NtpCollector', {})
self.collector = NtpCollector(config, None)
def test_import(self):
self.assertTrue(NtpCollector)
@patch.object(Collector, 'publish')
def test_should_work_wtih_real_data(self, publish_mock):
ntpdate_data = Mock(
return_value=(self.getFixture('ntpdate').getvalue(), None))
collector_mock = patch.object(NtpCollector,
'run_command',
ntpdate_data)
collector_mock.start()
self.collector.collect()
collector_mock.stop()
metrics = {
'server.count': 4,
'offset.milliseconds': 0
}
self.setDocExample(collector=self.collector.__class__.__name__,
metrics=metrics,
defaultpath=self.collector.config['path'])
self.assertPublishedMany(publish_mock, metrics)
@patch.object(Collector, 'publish')
def test_should_work_wtih_real_data_and_custom_config(self, publish_mock):
config = get_collector_config('NtpCollector', {
'time_scale': 'seconds',
'precision': 3,
})
self.collector = NtpCollector(config, None)
ntpdate_data = Mock(
return_value=(self.getFixture('ntpdate').getvalue(), None))
collector_mock = patch.object(NtpCollector,
'run_command',
ntpdate_data)
collector_mock.start()
self.collector.collect()
collector_mock.stop()
metrics = {
'server.count': 4,
'offset.seconds': -0.000128
}
self.setDocExample(collector=self.collector.__class__.__name__,
metrics=metrics,
defaultpath=self.collector.config['path'])
self.assertPublishedMany(publish_mock, metrics)
@patch.object(Collector, 'publish')
def test_should_fail_gracefully(self, publish_mock):
ntpdate_data = Mock(return_value=('', None))
collector_mock = patch.object(NtpCollector,
'run_command',
ntpdate_data)
collector_mock.start()
self.collector.collect()
collector_mock.stop()
self.assertPublishedMany(publish_mock, {})
##########################################################################
if __name__ == "__main__":
unittest.main()
| mit |
gbiggs/pykg-config | pykg_config/options.py | 1 | 3811 | # Copyright (c) 2009-2012, Geoffrey Biggs
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the Geoffrey Biggs nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
# File: options.py
# Author: Geoffrey Biggs
# Part of pykg-config.
"""Singleton containing option values."""
__version__ = "$Revision: $"
# $Source$
import sys
from pykg_config.exceptions import PykgConfigError
##############################################################################
# Exceptions
class NoSuchOptionError(PykgConfigError):
"""The requested option has not been set.
Attributes:
option -- The option that doesn't exist."""
def __init__(self, option):
self.option = option
def __str__(self):
return self.option
##############################################################################
# Options singleton class
class Options(object):
def __new__(cls, *p, **k):
if not '_the_instance' in cls.__dict__:
cls._the_instance = object.__new__(cls)
return cls._the_instance
def init_options(self):
self.options = {'use_msvc_syntax': True,
'dont_define_prefix': False,
'prefix_variable': 'prefix',
'verbose': False,
'pc_path': '',
'uninstalled_only': False,
'prefer_uninstalled': True,
'pc_sysrootdir': '/',
'pc_topbuilddir': '',
'print_errors': True,
'short_errors': False,
'error_dest': sys.stderr,
'debug': False,
'search_string': '',
'private_libs': False,
'forbidden_libdirs': [],
'forbidden_cflags': [],
'is_64bit': False,
'full_compatibility': False,
'normalise_paths': True}
def set_option(self, option, value):
if not hasattr(self, 'options'):
self.init_options()
self.options[option] = value
def get_option(self, option):
if not hasattr(self, 'options'):
self.init_options()
if not option in self.options:
raise NoSuchOptionError(option)
return self.options[option]
# vim: tw=79
| bsd-3-clause |
tunneln/CarnotKE | jyhton/lib-python/2.7/distutils/tests/test_file_util.py | 102 | 2483 | """Tests for distutils.file_util."""
import unittest
import os
import shutil
from distutils.file_util import move_file, write_file, copy_file
from distutils import log
from distutils.tests import support
from test.test_support import run_unittest
class FileUtilTestCase(support.TempdirManager, unittest.TestCase):
def _log(self, msg, *args):
if len(args) > 0:
self._logs.append(msg % args)
else:
self._logs.append(msg)
def setUp(self):
super(FileUtilTestCase, self).setUp()
self._logs = []
self.old_log = log.info
log.info = self._log
tmp_dir = self.mkdtemp()
self.source = os.path.join(tmp_dir, 'f1')
self.target = os.path.join(tmp_dir, 'f2')
self.target_dir = os.path.join(tmp_dir, 'd1')
def tearDown(self):
log.info = self.old_log
super(FileUtilTestCase, self).tearDown()
def test_move_file_verbosity(self):
f = open(self.source, 'w')
try:
f.write('some content')
finally:
f.close()
move_file(self.source, self.target, verbose=0)
wanted = []
self.assertEqual(self._logs, wanted)
# back to original state
move_file(self.target, self.source, verbose=0)
move_file(self.source, self.target, verbose=1)
wanted = ['moving %s -> %s' % (self.source, self.target)]
self.assertEqual(self._logs, wanted)
# back to original state
move_file(self.target, self.source, verbose=0)
self._logs = []
# now the target is a dir
os.mkdir(self.target_dir)
move_file(self.source, self.target_dir, verbose=1)
wanted = ['moving %s -> %s' % (self.source, self.target_dir)]
self.assertEqual(self._logs, wanted)
def test_write_file(self):
lines = ['a', 'b', 'c']
dir = self.mkdtemp()
foo = os.path.join(dir, 'foo')
write_file(foo, lines)
content = [line.strip() for line in open(foo).readlines()]
self.assertEqual(content, lines)
def test_copy_file(self):
src_dir = self.mkdtemp()
foo = os.path.join(src_dir, 'foo')
write_file(foo, 'content')
dst_dir = self.mkdtemp()
copy_file(foo, dst_dir)
self.assertTrue(os.path.exists(os.path.join(dst_dir, 'foo')))
def test_suite():
return unittest.makeSuite(FileUtilTestCase)
if __name__ == "__main__":
run_unittest(test_suite())
| apache-2.0 |
ptisserand/portage | pym/portage/env/loaders.py | 13 | 8420 | # config.py -- Portage Config
# Copyright 2007-2013 Gentoo Foundation
# Distributed under the terms of the GNU General Public License v2
import errno
import io
import stat
import portage
portage.proxy.lazyimport.lazyimport(globals(),
'portage.util:writemsg',
)
from portage import os
from portage import _encodings
from portage import _unicode_decode
from portage import _unicode_encode
from portage.localization import _
class LoaderError(Exception):
def __init__(self, resource, error_msg):
"""
@param resource: Resource that failed to load (file/sql/etc)
@type resource: String
@param error_msg: Error from underlying Loader system
@type error_msg: String
"""
self.resource = resource
self.error_msg = error_msg
def __str__(self):
return "Failed while loading resource: %s, error was: %s" % (
self.resource, self.error_msg)
def RecursiveFileLoader(filename):
"""
If filename is of type file, return a generate that yields filename
else if filename is of type directory, return a generator that fields
files in that directory.
Ignore files beginning with . or ending in ~.
Prune CVS directories.
@param filename: name of a file/directory to traverse
@rtype: list
@return: List of files to process
"""
try:
st = os.stat(filename)
except OSError:
return
if stat.S_ISDIR(st.st_mode):
for root, dirs, files in os.walk(filename):
for d in list(dirs):
if d[:1] == '.' or d == 'CVS':
dirs.remove(d)
for f in files:
try:
f = _unicode_decode(f,
encoding=_encodings['fs'], errors='strict')
except UnicodeDecodeError:
continue
if f[:1] == '.' or f[-1:] == '~':
continue
yield os.path.join(root, f)
else:
yield filename
class DataLoader(object):
def __init__(self, validator):
f = validator
if f is None:
# if they pass in no validator, just make a fake one
# that always returns true
def validate(key):
return True
f = validate
self._validate = f
def load(self):
"""
Function to do the actual work of a Loader
"""
raise NotImplementedError("Please override in a subclass")
class EnvLoader(DataLoader):
""" Class to access data in the environment """
def __init__(self, validator):
DataLoader.__init__(self, validator)
def load(self):
return os.environ
class TestTextLoader(DataLoader):
""" You give it some data, it 'loads' it for you, no filesystem access
"""
def __init__(self, validator):
DataLoader.__init__(self, validator)
self.data = {}
self.errors = {}
def setData(self, text):
"""Explicitly set the data field
Args:
text - a dict of data typical of Loaders
Returns:
None
"""
if isinstance(text, dict):
self.data = text
else:
raise ValueError("setData requires a dict argument")
def setErrors(self, errors):
self.errors = errors
def load(self):
return (self.data, self.errors)
class FileLoader(DataLoader):
""" Class to access data in files """
def __init__(self, filename, validator):
"""
Args:
filename : Name of file or directory to open
validator : class with validate() method to validate data.
"""
DataLoader.__init__(self, validator)
self.fname = filename
def load(self):
"""
Return the {source: {key: value}} pairs from a file
Return the {source: [list of errors] from a load
@param recursive: If set and self.fname is a directory;
load all files in self.fname
@type: Boolean
@rtype: tuple
@return:
Returns (data,errors), both may be empty dicts or populated.
"""
data = {}
errors = {}
# I tried to save a nasty lookup on lineparser by doing the lookup
# once, which may be expensive due to digging in child classes.
func = self.lineParser
for fn in RecursiveFileLoader(self.fname):
try:
with io.open(_unicode_encode(fn,
encoding=_encodings['fs'], errors='strict'), mode='r',
encoding=_encodings['content'], errors='replace') as f:
lines = f.readlines()
except EnvironmentError as e:
if e.errno == errno.EACCES:
writemsg(_("Permission denied: '%s'\n") % fn, noiselevel=-1)
del e
elif e.errno in (errno.ENOENT, errno.ESTALE):
del e
else:
raise
else:
for line_num, line in enumerate(lines):
func(line, line_num, data, errors)
return (data, errors)
def lineParser(self, line, line_num, data, errors):
""" This function parses 1 line at a time
Args:
line: a string representing 1 line of a file
line_num: an integer representing what line we are processing
data: a dict that contains the data we have extracted from the file
already
errors: a dict representing parse errors.
Returns:
Nothing (None). Writes to data and errors
"""
raise NotImplementedError("Please over-ride this in a child class")
class ItemFileLoader(FileLoader):
"""
Class to load data from a file full of items one per line
>>> item1
>>> item2
>>> item3
>>> item1
becomes { 'item1':None, 'item2':None, 'item3':None }
Note that due to the data store being a dict, duplicates
are removed.
"""
def __init__(self, filename, validator):
FileLoader.__init__(self, filename, validator)
def lineParser(self, line, line_num, data, errors):
line = line.strip()
if line.startswith('#'): # Skip commented lines
return
if not len(line): # skip empty lines
return
split = line.split()
if not len(split):
errors.setdefault(self.fname, []).append(
_("Malformed data at line: %s, data: %s")
% (line_num + 1, line))
return
key = split[0]
if not self._validate(key):
errors.setdefault(self.fname, []).append(
_("Validation failed at line: %s, data %s")
% (line_num + 1, key))
return
data[key] = None
class KeyListFileLoader(FileLoader):
"""
Class to load data from a file full of key [list] tuples
>>>>key foo1 foo2 foo3
becomes
{'key':['foo1','foo2','foo3']}
"""
def __init__(self, filename, validator=None, valuevalidator=None):
FileLoader.__init__(self, filename, validator)
f = valuevalidator
if f is None:
# if they pass in no validator, just make a fake one
# that always returns true
def validate(key):
return True
f = validate
self._valueValidate = f
def lineParser(self, line, line_num, data, errors):
line = line.strip()
if line.startswith('#'): # Skip commented lines
return
if not len(line): # skip empty lines
return
split = line.split()
if len(split) < 1:
errors.setdefault(self.fname, []).append(
_("Malformed data at line: %s, data: %s")
% (line_num + 1, line))
return
key = split[0]
value = split[1:]
if not self._validate(key):
errors.setdefault(self.fname, []).append(
_("Key validation failed at line: %s, data %s")
% (line_num + 1, key))
return
if not self._valueValidate(value):
errors.setdefault(self.fname, []).append(
_("Value validation failed at line: %s, data %s")
% (line_num + 1, value))
return
if key in data:
data[key].append(value)
else:
data[key] = value
class KeyValuePairFileLoader(FileLoader):
"""
Class to load data from a file full of key=value pairs
>>>>key=value
>>>>foo=bar
becomes:
{'key':'value',
'foo':'bar'}
"""
def __init__(self, filename, validator, valuevalidator=None):
FileLoader.__init__(self, filename, validator)
f = valuevalidator
if f is None:
# if they pass in no validator, just make a fake one
# that always returns true
def validate(key):
return True
f = validate
self._valueValidate = f
def lineParser(self, line, line_num, data, errors):
line = line.strip()
if line.startswith('#'): # skip commented lines
return
if not len(line): # skip empty lines
return
split = line.split('=', 1)
if len(split) < 2:
errors.setdefault(self.fname, []).append(
_("Malformed data at line: %s, data %s")
% (line_num + 1, line))
return
key = split[0].strip()
value = split[1].strip()
if not key:
errors.setdefault(self.fname, []).append(
_("Malformed key at line: %s, key %s")
% (line_num + 1, key))
return
if not self._validate(key):
errors.setdefault(self.fname, []).append(
_("Key validation failed at line: %s, data %s")
% (line_num + 1, key))
return
if not self._valueValidate(value):
errors.setdefault(self.fname, []).append(
_("Value validation failed at line: %s, data %s")
% (line_num + 1, value))
return
data[key] = value
| gpl-2.0 |
MagicSolutions/django-cms | cms/tests/menu_page_viewperm.py | 14 | 23365 | # -*- coding: utf-8 -*-
from __future__ import with_statement
from django.contrib.sites.models import Site
from django.contrib.auth.models import AnonymousUser, Group
from cms.api import create_page
from cms.menu import get_visible_pages
from cms.models import Page
from cms.models import ACCESS_DESCENDANTS, ACCESS_CHILDREN, ACCESS_PAGE
from cms.models import ACCESS_PAGE_AND_CHILDREN, ACCESS_PAGE_AND_DESCENDANTS
from cms.models.permissionmodels import GlobalPagePermission, PagePermission
from cms.test_utils.testcases import SettingsOverrideTestCase
from cms.utils.compat.dj import get_user_model, user_related_name
from menus.menu_pool import menu_pool
__all__ = [
'ViewPermissionTreeBugTests',
'ViewPermissionComplexMenuAllNodesTests'
]
class ViewPermissionTests(SettingsOverrideTestCase):
"""
Test various combinations of view permissions pages and menus
Focus on the different grant types and inheritance options of grant on
Given the tree:
|- Page_a
|- Page_b
| |- Page_b_a
| |- Page_b_b
| | |- Page_b_b_a
| | | |- Page_b_b_a_a
| | |- Page_b_b_b
| | |- Page_b_b_c
| |- Page_b_c
| |- Page_b_d
| | |- Page_b_d_a
| | |- Page_b_d_b
| | |- Page_b_d_c
|- Page_c
| |- Page_c_a
| |- Page_c_b
|- Page_d
| |- Page_d_a
| |- Page_d_b
| |- Page_d_c
"""
GROUPNAME_1 = 'group_b_ACCESS_PAGE_AND_CHILDREN'
GROUPNAME_2 = 'group_b_b_ACCESS_CHILDREN'
GROUPNAME_3 = 'group_b_ACCESS_PAGE_AND_DESCENDANTS'
GROUPNAME_4 = 'group_b_b_ACCESS_DESCENDANTS'
GROUPNAME_5 = 'group_d_ACCESS_PAGE'
def setUp(self):
self.site = Site()
self.site.pk = 1
super(ViewPermissionTests, self).setUp()
def tearDown(self):
super(ViewPermissionTests, self).tearDown()
def _setup_tree_pages(self):
stdkwargs = {
'template': 'nav_playground.html',
'language': 'en',
'published': True,
'in_navigation': True,
}
page_a = create_page("page_a", **stdkwargs) # first page slug is /
page_b = create_page("page_b", **stdkwargs)
page_c = create_page("page_c", **stdkwargs)
page_d = create_page("page_d", **stdkwargs)
page_b_a = create_page("page_b_a", parent=page_b, **stdkwargs)
page_b_b = create_page("page_b_b", parent=page_b, **stdkwargs)
page_b_b_a = create_page("page_b_b_a", parent=page_b_b, **stdkwargs)
page_b_b_b = create_page("page_b_b_b", parent=page_b_b, **stdkwargs)
page_b_b_c = create_page("page_b_b_c", parent=page_b_b, **stdkwargs)
page_b_b_a_a = create_page("page_b_b_a_a", parent=page_b_b_a, **stdkwargs)
page_b_c = create_page("page_b_c", parent=page_b, **stdkwargs)
page_b_d = create_page("page_b_d", parent=page_b, **stdkwargs)
page_b_d_a = create_page("page_b_d_a", parent=page_b_d, **stdkwargs)
page_b_d_b = create_page("page_b_d_b", parent=page_b_d, **stdkwargs)
page_b_d_c = create_page("page_b_d_c", parent=page_b_d, **stdkwargs)
page_c_a = create_page("page_c_a", parent=page_c, **stdkwargs)
page_c_b = create_page("page_c_b", parent=page_c, **stdkwargs)
page_d_a = create_page("page_d_a", parent=page_d, **stdkwargs)
page_d_b = create_page("page_d_b", parent=page_d, **stdkwargs)
page_d_c = create_page("page_d_c", parent=page_d, **stdkwargs)
page_d_d = create_page("page_d_d", parent=page_d, **stdkwargs)
pages = [
page_a,
page_b,
page_b_a,
page_b_b,
page_b_b_a,
page_b_b_a_a,
page_b_b_b,
page_b_b_c,
page_b_c,
page_b_d,
page_b_d_a,
page_b_d_b,
page_b_d_c,
page_c,
page_c_a,
page_c_b,
page_d,
page_d_a,
page_d_b,
page_d_c,
page_d_d,
]
new_pages = []
for page in pages:
new_pages.append(page.reload())
return new_pages
def _setup_user_groups(self):
"""
Setup a group for every grant on ACCESS TYPE
"""
userdata = [
('user_1', True, self.GROUPNAME_1),
('user_1_nostaff', False, self.GROUPNAME_1),
('user_2', True, self.GROUPNAME_2),
('user_2_nostaff', False, self.GROUPNAME_2),
('user_3', True, self.GROUPNAME_3),
('user_3_nostaff', False, self.GROUPNAME_3),
('user_4', True, self.GROUPNAME_4),
('user_4_nostaff', False, self.GROUPNAME_4),
('user_5', True, self.GROUPNAME_5),
('user_5_nostaff', False, self.GROUPNAME_5),
('user_staff', True, None),
]
default_users_count = get_user_model().objects.all().count()
for username, is_staff, groupname in userdata:
user = self._create_user(username, is_staff)
if groupname:
group, _ = Group.objects.get_or_create(name=groupname)
user_set = getattr(group, user_related_name)
user_set.add(user)
group.save()
self.assertEqual(11, get_user_model().objects.all().count()-default_users_count)
def _setup_view_restrictions(self):
"""
Setup a view restriction with every type of the grant_on ACCESS_*
"""
data = [("page_b", self.GROUPNAME_1, ACCESS_PAGE_AND_CHILDREN),
("page_b_b", self.GROUPNAME_2, ACCESS_CHILDREN),
("page_b", self.GROUPNAME_3, ACCESS_PAGE_AND_DESCENDANTS),
("page_b_b", self.GROUPNAME_4, ACCESS_DESCENDANTS),
("page_d", self.GROUPNAME_5, ACCESS_PAGE),
]
for title, groupname, inherit in data:
page = Page.objects.drafts().get(title_set__title=title)
group = Group.objects.get(name__iexact=groupname)
PagePermission.objects.create(can_view=True, group=group, page=page, grant_on=inherit)
self.assertEqual(5, PagePermission.objects.all().count())
self.assertEqual(0, GlobalPagePermission.objects.all().count())
def assertPageFound(self, url, client=None):
if not client:
client = self.client
response = client.get(url)
self.assertEqual(response.status_code, 200)
def assertPageNotFound(self, url, client=None):
if not client:
client = self.client
response = client.get(url)
self.assertEqual(response.status_code, 404)
def assertViewAllowed(self, page, user):
request = self.get_request(user, page)
self.assertTrue(page.has_view_permission(request))
def assertViewNotAllowed(self, page, user):
request = self.get_request(user, page)
self.assertFalse(page.has_view_permission(request))
def assertInMenu(self, page, user):
request = self.get_request(user, page)
nodes = menu_pool.get_nodes(request)
target_url = page.get_absolute_url()
found_in_menu = False
for node in nodes:
if node.get_absolute_url() == target_url:
found_in_menu = True
break
self.assertTrue(found_in_menu)
def assertNotInMenu(self, page, user):
request = self.get_request(user, page)
nodes = menu_pool.get_nodes(request)
target_url = page.get_absolute_url()
found_in_menu = False
for node in nodes:
if node.get_absolute_url() == target_url:
found_in_menu = True
break
self.assertFalse(found_in_menu)
def assertNodeMemberships(self, visible_page_ids, restricted_pages, public_page_ids):
"""
test all visible page ids are either in_public and not in_restricted
or not in_public and in_restricted
"""
for page_id in visible_page_ids:
in_restricted = False
in_public = False
if page_id in restricted_pages:
in_restricted = True
if page_id in public_page_ids:
in_public = True
self.assertTrue((in_public and not in_restricted) or
(not in_public and in_restricted),
msg="page_id %s in_public: %s, in_restricted: %s" % (page_id, in_public, in_restricted))
def assertGrantedVisibility(self, all_pages, expected_granted_pages, username=None):
"""
helper function to check the expected_granted_pages are
not in the restricted_pages list and
all visible pages are in the expected_granted_pages
"""
# log the user in if present
user = None
if username is not None:
if get_user_model().USERNAME_FIELD == 'email':
username = username + '@django-cms.org'
query = dict()
query[get_user_model().USERNAME_FIELD+'__iexact'] = username
user = get_user_model().objects.get(**query)
request = self.get_request(user)
visible_page_ids = get_visible_pages(request, all_pages, self.site)
public_page_ids = Page.objects.drafts().filter(title_set__title__in=expected_granted_pages).values_list('id',
flat=True)
self.assertEqual(len(visible_page_ids), len(expected_granted_pages))
restricted_pages = Page.objects.public().exclude(title_set__title__in=expected_granted_pages).values_list('id',
flat=True)
self.assertNodeMemberships(visible_page_ids, restricted_pages, public_page_ids)
def get_request(self, user=None, page=None):
# see tests/menu.py line 753
path = "/"
if page:
path = page.get_absolute_url()
attrs = {
'user': user or AnonymousUser(),
'REQUEST': {},
'GET': {},
'path': path,
'session': {},
}
return type('Request', (object,), attrs)
def get_url_dict(self, pages, language='en'):
return dict((page.get_absolute_url(language=language), page) for page in pages)
class ViewPermissionComplexMenuAllNodesTests(ViewPermissionTests):
"""
Test CMS_PUBLIC_FOR=all group access and menu nodes rendering
"""
settings_overrides = {
'CMS_PERMISSION': True,
'CMS_PUBLIC_FOR': 'all',
}
def test_public_pages_anonymous_norestrictions(self):
"""
All pages are visible to an anonymous user
"""
all_pages = self._setup_tree_pages()
request = self.get_request()
visible_page_ids = get_visible_pages(request, all_pages, self.site)
self.assertEqual(len(all_pages), len(visible_page_ids))
nodes = menu_pool.get_nodes(request)
self.assertEqual(len(nodes), len(all_pages))
def test_public_menu_anonymous_user(self):
"""
Anonymous user should only see the pages in the rendered menu
that have no permissions assigned,directly or indirectly
"""
self._setup_user_groups()
all_pages = self._setup_tree_pages()
self._setup_view_restrictions()
granted = ['page_a',
'page_c',
'page_c_a',
'page_c_b',
'page_d_a',
'page_d_b',
'page_d_c',
'page_d_d'
]
self.assertGrantedVisibility(all_pages, granted)
urls = self.get_url_dict(all_pages)
user = AnonymousUser()
request = self.get_request(user, urls['/en/'])
nodes = menu_pool.get_nodes(request)
self.assertEqual(len(nodes), 4)
self.assertInMenu(urls["/en/"], user)
self.assertInMenu(urls["/en/page_c/"], user)
self.assertInMenu(urls["/en/page_c/page_c_a/"], user)
self.assertInMenu(urls["/en/page_c/page_c_b/"], user)
self.assertViewNotAllowed(urls["/en/page_b/"], user)
self.assertNotInMenu(urls["/en/page_b/"], user)
self.assertViewNotAllowed(urls["/en/page_d/"], user)
self.assertNotInMenu(urls["/en/page_d/"], user)
def test_menu_access_page_and_children_group_1(self):
"""
simulate behaviour of group b member
group_b_ACCESS_PAGE_AND_CHILDREN to page_b
"""
self._setup_user_groups()
all_pages = self._setup_tree_pages()
self._setup_view_restrictions()
granted = ['page_a',
'page_c',
'page_c_a',
'page_c_b',
#group_1
'page_b', #page_id b has page_id and children restricted - group 1
'page_b_a',
'page_b_b', #page_id b_b children restricted - group 2
'page_b_c',
'page_b_d',
# not restricted
'page_d_a',
'page_d_b',
'page_d_c',
'page_d_d'
]
urls = self.get_url_dict(all_pages)
if get_user_model().USERNAME_FIELD == 'email':
user = get_user_model().objects.get(email='user_1@django-cms.org')
else:
user = get_user_model().objects.get(username='user_1')
self.assertGrantedVisibility(all_pages, granted, username='user_1')
self.assertViewAllowed(urls["/en/page_b/"], user)
self.assertInMenu(urls["/en/page_b/"], user)
self.assertViewAllowed(urls["/en/page_b/page_b_b/"], user)
self.assertInMenu(urls["/en/page_b/page_b_b/"], user)
# descendant
self.assertViewNotAllowed(urls["/en/page_b/page_b_b/page_b_b_a/"], user)
self.assertNotInMenu(urls["/en/page_b/page_b_b/page_b_b_a/"], user)
# group 5
self.assertViewNotAllowed(urls["/en/page_d/"], user)
self.assertNotInMenu(urls["/en/page_d/"], user)
# should be public as only page_d is restricted
self.assertViewAllowed(urls["/en/page_d/page_d_a/"], user)
self.assertNotInMenu(urls["/en/page_d/page_d_a/"], user)
def test_menu_access_children_group_2(self):
"""
simulate behaviour of group 2 member
GROUPNAME_2 = 'group_b_b_ACCESS_CHILDREN'
to page_b_b
"""
self._setup_user_groups()
all_pages = self._setup_tree_pages()
self._setup_view_restrictions()
granted = [
'page_a',
'page_c',
'page_c_a',
'page_c_b',
'page_b_b_a',
'page_b_b_b',
'page_b_b_c',
# not restricted
'page_d_a',
'page_d_b',
'page_d_c',
'page_d_d',
]
self.assertGrantedVisibility(all_pages, granted, username='user_2')
urls = self.get_url_dict(all_pages)
if get_user_model().USERNAME_FIELD == 'email':
user = get_user_model().objects.get(email='user_2@django-cms.org')
else:
user = get_user_model().objects.get(username='user_2')
self.assertViewNotAllowed(urls["/en/page_b/page_b_b/"], user)
self.assertViewAllowed(urls["/en/page_b/page_b_b/page_b_b_a/"], user)
self.assertViewNotAllowed(urls["/en/page_b/page_b_b/page_b_b_a/page_b_b_a_a/"], user)
self.assertViewNotAllowed(urls["/en/page_d/"], user)
self.assertViewAllowed(urls["/en/page_d/page_d_a/"], user)
def test_menu_access_page_and_descendants_group_3(self):
"""
simulate behaviour of group 3 member
group_b_ACCESS_PAGE_AND_DESCENDANTS to page_b
"""
self._setup_user_groups()
all_pages = self._setup_tree_pages()
self._setup_view_restrictions()
granted = ['page_a',
'page_b',
'page_b_a',
'page_b_b',
'page_b_b_a',
'page_b_b_a_a',
'page_b_b_b',
'page_b_b_c',
'page_b_c',
'page_b_d',
'page_b_d_a',
'page_b_d_b',
'page_b_d_c',
'page_c',
'page_c_a',
'page_c_b',
'page_d_a',
'page_d_b',
'page_d_c',
'page_d_d',
]
self.assertGrantedVisibility(all_pages, granted, username='user_3')
urls = self.get_url_dict(all_pages)
if get_user_model().USERNAME_FIELD == 'email':
user = get_user_model().objects.get(email='user_3@django-cms.org')
else:
user = get_user_model().objects.get(username='user_3')
self.assertViewAllowed(urls["/en/page_b/"], user)
self.assertViewAllowed(urls["/en/page_b/page_b_d/page_b_d_a/"], user)
self.assertViewNotAllowed(urls["/en/page_d/"], user)
self.assertViewAllowed(urls["/en/page_d/page_d_a/"], user)
def test_menu_access_descendants_group_4(self):
"""
simulate behaviour of group 4 member
group_b_b_ACCESS_DESCENDANTS to page_b_b
"""
self._setup_user_groups()
all_pages = self._setup_tree_pages()
self._setup_view_restrictions()
granted = ['page_a',
'page_b_b_a',
'page_b_b_a_a',
'page_b_b_b',
'page_b_b_c',
'page_c',
'page_c_a',
'page_c_b',
'page_d_a',
'page_d_b',
'page_d_c',
'page_d_d',
]
self.assertGrantedVisibility(all_pages, granted, username='user_4')
urls = self.get_url_dict(all_pages)
if get_user_model().USERNAME_FIELD == 'email':
user = get_user_model().objects.get(email='user_4@django-cms.org')
else:
user = get_user_model().objects.get(username='user_4')
self.assertViewNotAllowed(urls["/en/page_b/"], user)
self.assertViewNotAllowed(urls["/en/page_b/page_b_b/"], user)
self.assertViewAllowed(urls["/en/page_b/page_b_b/page_b_b_a/"], user)
self.assertViewNotAllowed(urls["/en/page_d/"], user)
self.assertViewAllowed(urls["/en/page_d/page_d_a/"], user)
def test_menu_access_page_group_5(self):
"""
simulate behaviour of group b member
group_d_ACCESS_PAGE to page_d
"""
self._setup_user_groups()
all_pages = self._setup_tree_pages()
self._setup_view_restrictions()
granted = ['page_a',
'page_c',
'page_c_a',
'page_c_b',
'page_d',
'page_d_a',
'page_d_b',
'page_d_c',
'page_d_d',
]
self.assertGrantedVisibility(all_pages, granted, username='user_5')
urls = self.get_url_dict(all_pages)
if get_user_model().USERNAME_FIELD == 'email':
user = get_user_model().objects.get(email='user_5@django-cms.org')
else:
user = get_user_model().objects.get(username='user_5')
# call /
self.assertViewNotAllowed(urls["/en/page_b/"], user)
self.assertViewNotAllowed(urls["/en/page_b/page_b_b/"], user)
self.assertViewNotAllowed(urls["/en/page_b/page_b_b/page_b_b_a/"], user)
self.assertViewAllowed(urls["/en/page_d/"], user)
self.assertViewAllowed(urls["/en/page_d/page_d_a/"], user)
class ViewPermissionTreeBugTests(ViewPermissionTests):
"""Test issue 1113
https://github.com/divio/django-cms/issues/1113
Wrong view permission calculation in PagePermission.objects.for_page
grant_on=ACCESS_PAGE_AND_CHILDREN or ACCESS_PAGE_AND_DESCENDANTS to page 6
Test if this affects the menu entries and page visibility
"""
settings_overrides = {
'CMS_PERMISSION': True,
'CMS_PUBLIC_FOR': 'all',
}
GROUPNAME_6 = 'group_6_ACCESS_PAGE'
def _setup_pages(self):
"""
Tree Structure
|- Page_1
| |- Page_2
| |- Page_3
| |- Page_4 (false positive)
| |- Page_5
| | |- Page_6 (group 6 page access)
"""
stdkwargs = {
'template': 'nav_playground.html',
'language': 'en',
'published': True,
'in_navigation': True,
}
page_1 = create_page("page_1", **stdkwargs) # first page slug is /
page_2 = create_page("page_2", parent=page_1, **stdkwargs)
page_3 = create_page("page_3", parent=page_2, **stdkwargs)
page_4 = create_page("page_4", parent=page_3, **stdkwargs)
page_5 = create_page("page_5", parent=page_1, **stdkwargs)
page_6 = create_page("page_6", parent=page_5, **stdkwargs)
return [page_1,
page_2,
page_3,
page_4,
page_5,
page_6,
]
def _setup_user(self):
user = self._create_user('user_6', True)
group = Group.objects.create(name=self.GROUPNAME_6)
user_set = getattr(group, user_related_name)
user_set.add(user)
group.save()
def _setup_permviewbug(self):
"""
Setup group_6_ACCESS_PAGE view restriction
"""
page = Page.objects.drafts().get(title_set__title="page_6")
group = Group.objects.get(name__iexact=self.GROUPNAME_6)
PagePermission.objects.create(can_view=True, group=group, page=page, grant_on=ACCESS_PAGE_AND_CHILDREN)
PagePermission.objects.create(can_view=True, group=group, page=page, grant_on=ACCESS_PAGE_AND_DESCENDANTS)
def test_pageforbug(self):
all_pages = self._setup_pages()
self._setup_user()
self._setup_permviewbug()
for page in all_pages:
perm = PagePermission.objects.for_page(page=page)
# only page_6 has a permission assigned
if page.get_title() == 'page_6':
self.assertEqual(len(perm), 2)
else:
msg = "Permission wrong at page %s" % (page.get_title())
self.assertEqual(len(perm), 0, msg)
granted = ['page_1',
'page_2',
'page_3',
'page_4',
'page_5',
]
urls = self.get_url_dict(all_pages)
user = AnonymousUser()
# anonymous doesn't see page_6
self.assertGrantedVisibility(all_pages, granted)
self.assertViewAllowed(urls["/en/page_2/page_3/page_4/"], user)
self.assertViewAllowed(urls["/en/page_5/"], user)
self.assertViewNotAllowed(urls["/en/page_5/page_6/"], user)
# group member
granted = ['page_1',
'page_2',
'page_3',
'page_4',
'page_5',
'page_6',
]
self.assertGrantedVisibility(all_pages, granted, username='user_6')
if get_user_model().USERNAME_FIELD == 'email':
user = get_user_model().objects.get(email='user_6@django-cms.org')
else:
user = get_user_model().objects.get(username='user_6')
url = "/en/page_2/page_3/page_4/"
self.assertViewAllowed(urls[url], user)
url = "/en/page_5/page_6/"
self.assertViewAllowed(urls[url], user)
| bsd-3-clause |
GageGaskins/osf.io | tests/test_utils.py | 4 | 17448 | # -*- coding: utf-8 -*-
import os
import mock
import blinker
import unittest
from flask import Flask
from nose.tools import * # noqa (PEP8 asserts)
import datetime
from tests.base import OsfTestCase
from tests.factories import RegistrationFactory
from framework.routing import Rule, json_renderer
from framework.utils import secure_filename
from website.routes import process_rules, OsfWebRenderer
from website import settings
from website import util
from website.util import paths
from website.util.mimetype import get_mimetype
from website.util import web_url_for, api_url_for, is_json_request, waterbutler_url_for, conjunct, api_v2_url
from website.project import utils as project_utils
try:
import magic # noqa
LIBMAGIC_AVAILABLE = True
except ImportError:
LIBMAGIC_AVAILABLE = False
HERE = os.path.dirname(os.path.abspath(__file__))
class TestUrlForHelpers(unittest.TestCase):
def setUp(self):
def dummy_view():
return {}
def dummy_guid_project_view():
return {}
def dummy_guid_profile_view():
return {}
self.app = Flask(__name__)
api_rule = Rule([
'/api/v1/<pid>/',
'/api/v1/<pid>/component/<nid>/'
], 'get', dummy_view, json_renderer)
web_rule = Rule([
'/<pid>/',
'/<pid>/component/<nid>/'
], 'get', dummy_view, OsfWebRenderer)
web_guid_project_rule = Rule([
'/project/<pid>/',
'/project/<pid>/node/<nid>/',
], 'get', dummy_guid_project_view, OsfWebRenderer)
web_guid_profile_rule = Rule([
'/profile/<pid>/',
], 'get', dummy_guid_profile_view, OsfWebRenderer)
process_rules(self.app, [api_rule, web_rule, web_guid_project_rule, web_guid_profile_rule])
def test_api_url_for(self):
with self.app.test_request_context():
assert api_url_for('dummy_view', pid='123') == '/api/v1/123/'
def test_api_v2_url_with_port(self):
full_url = api_v2_url('/nodes/abcd3/contributors/',
base_route='http://localhost:8000/',
base_prefix='v2/')
assert_equal(full_url, "http://localhost:8000/v2/nodes/abcd3/contributors/")
# Handles URL the same way whether or not user enters a leading slash
full_url = api_v2_url('nodes/abcd3/contributors/',
base_route='http://localhost:8000/',
base_prefix='v2/')
assert_equal(full_url, "http://localhost:8000/v2/nodes/abcd3/contributors/")
# User is still responsible for the trailing slash. If they omit it, it doesn't appear at end of URL
full_url = api_v2_url('/nodes/abcd3/contributors',
base_route='http://localhost:8000/',
base_prefix='v2/')
assert_not_equal(full_url, "http://localhost:8000/v2/nodes/abcd3/contributors/")
def test_api_v2_url_with_params(self):
"""Handles- and encodes- URLs with parameters (dict and kwarg) correctly"""
full_url = api_v2_url('/nodes/abcd3/contributors/',
params={'filter[fullname]': 'bob'},
base_route='https://api.osf.io/',
base_prefix='v2/',
page_size=10)
assert_equal(full_url, "https://api.osf.io/v2/nodes/abcd3/contributors/?filter%5Bfullname%5D=bob&page_size=10")
def test_api_v2_url_base_path(self):
"""Given a blank string, should return the base path (domain + port + prefix) with no extra cruft at end"""
full_url = api_v2_url('',
base_route='http://localhost:8000/',
base_prefix='v2/')
assert_equal(full_url, "http://localhost:8000/v2/")
def test_web_url_for(self):
with self.app.test_request_context():
assert web_url_for('dummy_view', pid='123') == '/123/'
def test_web_url_for_guid(self):
with self.app.test_request_context():
# check /project/<pid>
assert_equal('/pid123/', web_url_for('dummy_guid_project_view', pid='pid123', _guid=True))
assert_equal('/project/pid123/', web_url_for('dummy_guid_project_view', pid='pid123', _guid=False))
assert_equal('/project/pid123/', web_url_for('dummy_guid_project_view', pid='pid123'))
# check /project/<pid>/node/<nid>
assert_equal('/nid321/', web_url_for('dummy_guid_project_view', pid='pid123', nid='nid321', _guid=True))
assert_equal(
'/project/pid123/node/nid321/',
web_url_for('dummy_guid_project_view', pid='pid123', nid='nid321', _guid=False))
assert_equal(
'/project/pid123/node/nid321/',
web_url_for('dummy_guid_project_view', pid='pid123', nid='nid321'))
# check /profile/<pid>
assert_equal('/pro123/', web_url_for('dummy_guid_profile_view', pid='pro123', _guid=True))
assert_equal('/profile/pro123/', web_url_for('dummy_guid_profile_view', pid='pro123', _guid=False))
assert_equal('/profile/pro123/', web_url_for('dummy_guid_profile_view', pid='pro123'))
def test_web_url_for_guid_regex_conditions(self):
with self.app.test_request_context():
# regex matches limit keys to a minimum of 5 alphanumeric characters.
# check /project/<pid>
assert_not_equal('/123/', web_url_for('dummy_guid_project_view', pid='123', _guid=True))
assert_equal('/123456/', web_url_for('dummy_guid_project_view', pid='123456', _guid=True))
# check /project/<pid>/node/<nid>
assert_not_equal('/321/', web_url_for('dummy_guid_project_view', pid='123', nid='321', _guid=True))
assert_equal('/654321/', web_url_for('dummy_guid_project_view', pid='123456', nid='654321', _guid=True))
# check /profile/<pid>
assert_not_equal('/123/', web_url_for('dummy_guid_profile_view', pid='123', _guid=True))
assert_equal('/123456/', web_url_for('dummy_guid_profile_view', pid='123456', _guid=True))
def test_web_url_for_guid_case_sensitive(self):
with self.app.test_request_context():
# check /project/<pid>
assert_equal('/ABCdef/', web_url_for('dummy_guid_project_view', pid='ABCdef', _guid=True))
# check /project/<pid>/node/<nid>
assert_equal('/GHIjkl/', web_url_for('dummy_guid_project_view', pid='ABCdef', nid='GHIjkl', _guid=True))
# check /profile/<pid>
assert_equal('/MNOpqr/', web_url_for('dummy_guid_profile_view', pid='MNOpqr', _guid=True))
def test_web_url_for_guid_invalid_unicode(self):
with self.app.test_request_context():
# unicode id's are not supported when encoding guid url's.
# check /project/<pid>
assert_not_equal('/ø∆≤µ©/', web_url_for('dummy_guid_project_view', pid='ø∆≤µ©', _guid=True))
assert_equal(
'/project/%C3%B8%CB%86%E2%88%86%E2%89%A4%C2%B5%CB%86/',
web_url_for('dummy_guid_project_view', pid='øˆ∆≤µˆ', _guid=True))
# check /project/<pid>/node/<nid>
assert_not_equal(
'/ø∆≤µ©/',
web_url_for('dummy_guid_project_view', pid='ø∆≤µ©', nid='©µ≤∆ø', _guid=True))
assert_equal(
'/project/%C3%B8%CB%86%E2%88%86%E2%89%A4%C2%B5%CB%86/node/%C2%A9%C2%B5%E2%89%A4%E2%88%86%C3%B8/',
web_url_for('dummy_guid_project_view', pid='øˆ∆≤µˆ', nid='©µ≤∆ø', _guid=True))
# check /profile/<pid>
assert_not_equal('/ø∆≤µ©/', web_url_for('dummy_guid_profile_view', pid='ø∆≤µ©', _guid=True))
assert_equal(
'/profile/%C3%B8%CB%86%E2%88%86%E2%89%A4%C2%B5%CB%86/',
web_url_for('dummy_guid_profile_view', pid='øˆ∆≤µˆ', _guid=True))
def test_api_url_for_with_multiple_urls(self):
with self.app.test_request_context():
url = api_url_for('dummy_view', pid='123', nid='abc')
assert url == '/api/v1/123/component/abc/'
def test_web_url_for_with_multiple_urls(self):
with self.app.test_request_context():
url = web_url_for('dummy_view', pid='123', nid='abc')
assert url == '/123/component/abc/'
def test_is_json_request(self):
with self.app.test_request_context(content_type='application/json'):
assert_true(is_json_request())
with self.app.test_request_context(content_type=None):
assert_false(is_json_request())
with self.app.test_request_context(content_type='application/json;charset=UTF-8'):
assert_true(is_json_request())
def test_waterbutler_url_for(self):
with self.app.test_request_context():
url = waterbutler_url_for('upload', 'provider', 'path', mock.Mock(_id='_id'))
assert_in('nid=_id', url)
assert_in('/file?', url)
assert_in('path=path', url)
assert_in('provider=provider', url)
def test_waterbutler_url_for_implicit_cookie(self):
with self.app.test_request_context() as context:
context.request.cookies = {settings.COOKIE_NAME: 'cookie'}
url = waterbutler_url_for('upload', 'provider', 'path', mock.Mock(_id='_id'))
assert_in('nid=_id', url)
assert_in('/file?', url)
assert_in('path=path', url)
assert_in('cookie=cookie', url)
assert_in('provider=provider', url)
def test_waterbutler_url_for_cookie_not_required(self):
with self.app.test_request_context():
url = waterbutler_url_for('upload', 'provider', 'path', mock.Mock(_id='_id'))
assert_not_in('cookie', url)
assert_in('nid=_id', url)
assert_in('/file?', url)
assert_in('path=path', url)
assert_in('provider=provider', url)
class TestGetMimeTypes(unittest.TestCase):
def test_get_markdown_mimetype_from_filename(self):
name = 'test.md'
mimetype = get_mimetype(name)
assert_equal('text/x-markdown', mimetype)
@unittest.skipIf(not LIBMAGIC_AVAILABLE, 'Must have python-magic and libmagic installed')
def test_unknown_extension_with_no_contents_not_real_file_results_in_exception(self):
name = 'test.thisisnotarealextensionidonotcarwhatyousay'
with assert_raises(IOError):
get_mimetype(name)
@unittest.skipIf(LIBMAGIC_AVAILABLE, 'This test only runs if python-magic and libmagic are not installed')
def test_unknown_extension_with_no_contents_not_real_file_results_in_exception2(self):
name = 'test.thisisnotarealextensionidonotcarwhatyousay'
mime_type = get_mimetype(name)
assert_equal(None, mime_type)
@unittest.skipIf(not LIBMAGIC_AVAILABLE, 'Must have python-magic and libmagic installed')
def test_unknown_extension_with_real_file_results_in_python_mimetype(self):
name = 'test_views.notarealfileextension'
maybe_python_file = os.path.join(HERE, 'test_files', name)
mimetype = get_mimetype(maybe_python_file)
assert_equal('text/x-python', mimetype)
@unittest.skipIf(not LIBMAGIC_AVAILABLE, 'Must have python-magic and libmagic installed')
def test_unknown_extension_with_python_contents_results_in_python_mimetype(self):
name = 'test.thisisnotarealextensionidonotcarwhatyousay'
python_file = os.path.join(HERE, 'test_utils.py')
with open(python_file, 'r') as the_file:
content = the_file.read()
mimetype = get_mimetype(name, content)
assert_equal('text/x-python', mimetype)
class TestFrameworkUtils(unittest.TestCase):
def test_leading_underscores(self):
assert_equal(
'__init__.py',
secure_filename('__init__.py')
)
def test_werkzeug_cases(self):
"""Test that Werkzeug's tests still pass for our wrapped version"""
# Copied from Werkzeug
# BSD licensed - original at github.com/mitsuhiko/werkzeug,
# /tests/test_utils.py, line 282, commit 811b438
assert_equal(
'My_cool_movie.mov',
secure_filename('My cool movie.mov')
)
assert_equal(
'etc_passwd',
secure_filename('../../../etc/passwd')
)
assert_equal(
'i_contain_cool_umlauts.txt',
secure_filename(u'i contain cool \xfcml\xe4uts.txt')
)
class TestWebpackFilter(unittest.TestCase):
def setUp(self):
self.asset_paths = {'assets': 'assets.07123e.js'}
def test_resolve_asset(self):
asset = paths.webpack_asset('assets.js', self.asset_paths, debug=False)
assert_equal(asset, '/static/public/js/assets.07123e.js')
def test_resolve_asset_not_found_and_not_in_debug_mode(self):
with assert_raises(KeyError):
paths.webpack_asset('bundle.js', self.asset_paths, debug=False)
class TestWebsiteUtils(unittest.TestCase):
def test_conjunct(self):
words = []
assert_equal(conjunct(words), '')
words = ['a']
assert_equal(conjunct(words), 'a')
words = ['a', 'b']
assert_equal(conjunct(words), 'a and b')
words = ['a', 'b', 'c']
assert_equal(conjunct(words), 'a, b, and c')
assert_equal(conjunct(words, conj='or'), 'a, b, or c')
def test_rapply(self):
inputs = {
'foo': 'bar',
'baz': {
'boom': ['kapow'],
'bang': 'bam'
},
'bat': ['man']
}
outputs = util.rapply(inputs, str.upper)
assert_equal(outputs['foo'], 'bar'.upper())
assert_equal(outputs['baz']['boom'], ['kapow'.upper()])
assert_equal(outputs['baz']['bang'], 'bam'.upper())
assert_equal(outputs['bat'], ['man'.upper()])
r_assert = lambda s: assert_equal(s.upper(), s)
util.rapply(outputs, r_assert)
def test_rapply_on_list(self):
inputs = range(5)
add_one = lambda n: n + 1
outputs = util.rapply(inputs, add_one)
for i in inputs:
assert_equal(outputs[i], i + 1)
def test_rapply_on_tuple(self):
inputs = tuple(i for i in range(5))
add_one = lambda n: n + 1
outputs = util.rapply(inputs, add_one)
for i in inputs:
assert_equal(outputs[i], i + 1)
assert_equal(type(outputs), tuple)
def test_rapply_on_set(self):
inputs = set(i for i in range(5))
add_one = lambda n: n + 1
outputs = util.rapply(inputs, add_one)
for i in inputs:
assert_in(i + 1, outputs)
assert_true(isinstance(outputs, set))
def test_rapply_on_str(self):
input = "bob"
convert = lambda s: s.upper()
outputs = util.rapply(input, convert)
assert_equal("BOB", outputs)
assert_true(isinstance(outputs, basestring))
def test_rapply_preserves_args_and_kwargs(self):
def zero_if_not_check(item, check, checkFn=lambda n: n):
if check and checkFn(item):
return item
return 0
inputs = range(5)
outputs = util.rapply(inputs, zero_if_not_check, True, checkFn=lambda n: n % 2)
assert_equal(outputs, [0, 1, 0, 3, 0])
outputs = util.rapply(inputs, zero_if_not_check, False, checkFn=lambda n: n % 2)
assert_equal(outputs, [0, 0, 0, 0, 0])
class TestProjectUtils(OsfTestCase):
def set_registered_date(self, reg, date):
reg._fields['registered_date'].__set__(
reg,
date,
safe=True
)
reg.save()
def test_get_recent_public_registrations(self):
count = 0
for i in range(5):
reg = RegistrationFactory()
reg.is_public = True
count = count + 1
tdiff = datetime.datetime.now() - datetime.timedelta(days=count)
self.set_registered_date(reg, tdiff)
regs = [r for r in project_utils.recent_public_registrations()]
assert_equal(len(regs), 5)
for i in range(4):
assert_true(regs[i].registered_date > regs[i + 1].registered_date)
for i in range(5):
reg = RegistrationFactory()
reg.is_public = True
count = count + 1
tdiff = datetime.datetime.now() - datetime.timedelta(days=count)
self.set_registered_date(reg, tdiff)
regs = [r for r in project_utils.recent_public_registrations(7)]
assert_equal(len(regs), 7)
class TestSignalUtils(unittest.TestCase):
def setUp(self):
self.signals = blinker.Namespace()
self.signal_ = self.signals.signal('signal-')
self.mock_listener = mock.MagicMock()
def listener(self, signal):
self.mock_listener()
def test_signal(self):
self.signal_.connect(self.listener)
self.signal_.send()
self.mock_listener.assert_called()
def test_temporary_disconnect(self):
self.signal_.connect(self.listener)
with util.disconnected_from(self.signal_, self.listener):
self.signal_.send()
self.mock_listener.assert_not_called()
| apache-2.0 |
bwrsandman/OpenUpgrade | addons/subscription/__openerp__.py | 261 | 1885 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
{
'name': 'Recurring Documents',
'version': '1.0',
'category': 'Tools',
'description': """
Create recurring documents.
===========================
This module allows to create new documents and add subscriptions on that document.
e.g. To have an invoice generated automatically periodically:
-------------------------------------------------------------
* Define a document type based on Invoice object
* Define a subscription whose source document is the document defined as
above. Specify the interval information and partner to be invoice.
""",
'author': 'OpenERP SA',
'depends': ['base'],
'data': ['security/subcription_security.xml', 'security/ir.model.access.csv', 'subscription_view.xml'],
'demo': ['subscription_demo.xml',],
'installable': True,
'auto_install': False,
}
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
redbear/micropython | esp8266/scripts/inisetup.py | 10 | 1267 | import uos
import network
from flashbdev import bdev
def wifi():
import ubinascii
ap_if = network.WLAN(network.AP_IF)
essid = b"MicroPython-%s" % ubinascii.hexlify(ap_if.config("mac")[-3:])
ap_if.config(essid=essid, authmode=network.AUTH_WPA_WPA2_PSK, password=b"micropythoN")
def check_bootsec():
buf = bytearray(bdev.SEC_SIZE)
bdev.readblocks(0, buf)
empty = True
for b in buf:
if b != 0xff:
empty = False
break
if empty:
return True
fs_corrupted()
def fs_corrupted():
import time
while 1:
print("""\
FAT filesystem appears to be corrupted. If you had important data there, you
may want to make a flash snapshot to try to recover it. Otherwise, perform
factory reprogramming of MicroPython firmware (completely erase flash, followed
by firmware programming).
""")
time.sleep(3)
def setup():
check_bootsec()
print("Performing initial setup")
wifi()
uos.VfsFat.mkfs(bdev)
vfs = uos.VfsFat(bdev, "")
with open("/boot.py", "w") as f:
f.write("""\
# This file is executed on every boot (including wake-boot from deepsleep)
#import esp
#esp.osdebug(None)
import gc
#import webrepl
#webrepl.start()
gc.collect()
""")
return vfs
| mit |
tadhg-ohiggins/regulations-site | regulations/tests/views_preamble_tests.py | 1 | 11069 | # -*- coding: utf-8 -*-
from mock import patch
from unittest import TestCase
from datetime import date, timedelta
from nose.tools import assert_equal
from django.http import Http404
from django.test import RequestFactory, override_settings
from fr_notices.navigation import make_preamble_nav
from regulations.generator.layers import diff_applier, layers_applier
from regulations.views import preamble
from regulations.views.preamble import CommentState
class PreambleViewTests(TestCase):
_mock_preamble = dict(text='1', label=['1'], node_type='', children=[
dict(text='2', label=['1', 'c'], node_type='', children=[
dict(text='3', label=['1', 'c', 'i'], node_type='', children=[]),
dict(text='4', label=['1', 'c', 'x'], node_type='', children=[])
]),
dict(text='5', label=['1', '1'], node_type='', children=[])
])
def test_find_subtree(self):
"""When a node is present in a tree, we should be able to find it.
When it is not, we should get None"""
root = self._mock_preamble
fn = preamble.find_subtree
self.assertEqual(fn(root, ['1'])['text'], '1')
self.assertEqual(fn(root, ['1', 'c'])['text'], '2')
self.assertEqual(fn(root, ['1', 'c', 'i'])['text'], '3')
self.assertEqual(fn(root, ['1', 'c', 'x'])['text'], '4')
self.assertEqual(fn(root, ['1', '1'])['text'], '5')
self.assertIsNone(fn(root, ['2']))
self.assertIsNone(fn(root, ['1', '2']))
self.assertIsNone(fn(root, ['1', 'c', 'r']))
self.assertIsNone(fn(root, ['1', 'c', 'i', 'r']))
@patch('fr_notices.navigation.CFRChangeBuilder')
@patch('regulations.generator.generator.api_reader')
@patch('regulations.views.preamble.ApiReader')
def test_get_integration(self, ApiReader, api_reader, CFRChangeBuilder):
"""Verify that the contexts are built correctly before being sent to
the template. AJAX/partial=true requests should only get the inner
context (i.e. no UI-related context)"""
ApiReader.return_value.preamble.return_value = self._mock_preamble
api_reader.ApiReader.return_value.layer.return_value = {
'1-c-x': ['something']
}
view = preamble.PreambleView.as_view()
path = '/preamble/1/c/x?layers=meta'
response = view(RequestFactory().get(path), paragraphs='1/c/x')
self.assertEqual(
response.context_data['sub_context']['node']['text'], '4')
self.assertEqual(
response.context_data['sub_context']['node']['children'], [])
# layer data is present
self.assertEqual(
response.context_data['sub_context']['node']['meta'], 'something')
self.assertEqual(
response.context_data['preamble_toc'],
make_preamble_nav(self._mock_preamble['children']),
)
self.assertNotIn('node', response.context_data)
response = view(RequestFactory().get(path + '&partial=true'),
paragraphs='1/c/x')
self.assertIn('sub_context', response.context_data)
self.assertEqual(
response.context_data['sub_context']['node']['text'],
'4',
)
request = RequestFactory().get(
path, HTTP_X_REQUESTED_WITH='XMLHttpRequest')
response = view(request, paragraphs='1/c/x')
self.assertIn('sub_context', response.context_data)
self.assertEqual(
response.context_data['sub_context']['node']['text'],
'4',
)
@override_settings(
PREAMBLE_INTRO={'1': {'meta': {
'publication_date': '2001-01-01',
'comments_close': (date.today() + timedelta(days=1)).isoformat()
}}})
@patch('regulations.views.preamble.ApiReader')
def test_comments_open_from_settings(self, ApiReader):
"""
Mock the PREAMBLE_INTRO data from settings for this test of the
comments being open.
"""
_, meta, _ = preamble.notice_data('1')
assert_equal(meta['comment_state'], CommentState.OPEN)
def _setup_mock_response(self, ApiReader, **kwargs):
"""Mock the ApiReader response, replacing meta data fields with
kwargs"""
ApiReader.return_value.preamble.return_value = self._mock_preamble
notice = {
"action": "Proposed rule",
"agencies": ["Environmental Protection Agency"],
"cfr_title": 40,
"cfr_parts": ["300"],
"comments_close": "2011-09-09",
"dockets": ["EPA-HQ-SFUND-2010-1086",
"FRL-9925-69-OLEM"],
"primary_agency": "Environmental Protection Agency",
"title": ("Addition of a Subsurface Intrusion Component to the "
"Hazard Ranking System"),
"publication_date": "2011-02-02",
"regulatory_id_numbers": ["2050-AG67"],
}
notice.update(kwargs)
ApiReader.return_value.notice.return_value = notice
@patch('regulations.views.preamble.ApiReader')
def test_comments_open(self, ApiReader):
future = date.today() + timedelta(days=10)
self._setup_mock_response(ApiReader, comments_close=future.isoformat())
_, meta, _ = preamble.notice_data('1')
assert_equal(meta['comment_state'], CommentState.OPEN)
@patch('regulations.views.preamble.ApiReader')
def test_comments_prepub(self, ApiReader):
future = date.today() + timedelta(days=10)
self._setup_mock_response(ApiReader,
publication_date=future.isoformat())
_, meta, _ = preamble.notice_data('1')
assert_equal(meta['comment_state'], CommentState.PREPUB)
@patch('regulations.views.preamble.ApiReader')
def test_comments_closed(self, ApiReader):
self._setup_mock_response(ApiReader)
_, meta, _ = preamble.notice_data('1')
assert_equal(meta['comment_state'], CommentState.CLOSED)
@patch('fr_notices.navigation.CFRChangeBuilder')
@patch('regulations.generator.generator.api_reader')
@patch('regulations.views.preamble.ApiReader')
def test_get_top_level_redirect(self, ApiReader, api_reader,
CFRChangeBuilder):
ApiReader.return_value.preamble.return_value = self._mock_preamble
api_reader.ApiReader.return_value.layer.return_value = {
'1-c-x': ['something']
}
view = preamble.PreambleView.as_view()
path = '/preamble/1'
response = view(RequestFactory().get(path), paragraphs='1')
assert_equal(response.status_code, 302)
assert_equal(response.get('Location'), '/preamble/1/c')
@patch('regulations.views.preamble.ApiReader')
def test_get_404(self, ApiReader):
"""When a requested doc is not present, we should return a 404"""
ApiReader.return_value.preamble.return_value = None
view = preamble.PreambleView.as_view()
self.assertRaises(Http404, view,
RequestFactory().get('/preamble/1/c/x'),
paragraphs='1/c/x')
@patch('regulations.views.preamble.ApiReader')
def test_get_subtree_404(self, ApiReader):
"""When a requested _subtree_ is not present, we should 404"""
ApiReader.return_value.preamble.return_value = self._mock_preamble
view = preamble.PreambleView.as_view()
self.assertRaises(Http404, view,
RequestFactory().get('/preamble/1/not/here'),
paragraphs='1/not/here')
@patch('regulations.views.preamble.ApiReader')
def test_notice_data(self, ApiReader):
"""We should try to fetch data corresponding to both the Preamble and
the Notice"""
ApiReader.return_value.preamble.return_value = self._mock_preamble
ApiReader.return_value.notice.return_value = {
'publication_date': '2002-02-02',
'comments_close': '2003-03-03',
'cfr_title': 21, 'cfr_parts': ['123']}
for doc_id in ('123_456', '123-456'):
preamble_, meta, notice = preamble.notice_data(doc_id)
self.assertEqual(preamble_, self._mock_preamble)
assert_equal(meta['comment_state'], CommentState.CLOSED)
self.assertEqual(meta['cfr_refs'],
[{'title': 21, 'parts': ['123']}])
self.assertEqual(ApiReader.return_value.preamble.call_args[0][0],
'123_456')
self.assertEqual(ApiReader.return_value.notice.call_args[0][0],
'123-456')
class CFRChangesViewTests(TestCase):
@patch('regulations.views.preamble.ApiReader')
@patch('regulations.views.preamble.get_appliers')
def test_new_regtext_changes(self, get_appliers, ApiReader):
"""We can add a whole new section without explosions"""
amendments = [{'instruction': '3. Add subpart M',
'changes': [
['111-Subpart-M', [{'node': {
'label': ['111', 'Subpart', 'M'],
'title': 'A New Subpart',
'child_labels': ['111-42', '111-43',
'111-44', '111-45']}}]],
['111-42', [{'some': 'thing'}]],
['111-43', [{'some': 'thing'}]],
['111-44', [{'some': 'thing'}]],
['111-45', [{'some': 'thing'}]]]},
{'instruction': '4. Unrelated'}]
version_info = {'111': {'left': '234-567', 'right': '8675-309'}}
# Section did not exist before
ApiReader.return_value.regulation.return_value = None
diff = {'111-44': {'op': 'added', 'node': {
'text': 'New node text', 'node_type': 'regtext',
'label': ['111', '44']}}}
get_appliers.return_value = (
layers_applier.InlineLayersApplier(),
layers_applier.ParagraphLayersApplier(),
layers_applier.SearchReplaceLayersApplier(),
diff_applier.DiffApplier(diff, '111-44'))
result = preamble.CFRChangesView.regtext_changes_context(
amendments, version_info, '111-44', '8675-309', 0)
self.assertEqual(result['instructions'], ['3. Add subpart M'])
self.assertEqual(result['tree']['marked_up'],
'<ins>New node text</ins>')
self.assertEqual(1, len(result['subparts']))
subpart_info = result['subparts'][0]
self.assertEqual('M', subpart_info.letter)
self.assertEqual('A New Subpart', subpart_info.title)
self.assertEqual(2, subpart_info.idx)
self.assertEqual(4, len(subpart_info.urls))
self.assertIn('111-42', subpart_info.urls[0])
self.assertIn('111-43', subpart_info.urls[1])
self.assertIn('111-44', subpart_info.urls[2])
self.assertIn('111-45', subpart_info.urls[3])
| cc0-1.0 |
karllessard/tensorflow | tensorflow/python/autograph/pyct/testing/codegen.py | 47 | 6645 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Random code generation for testing/fuzzing."""
# pylint: disable=invalid-name
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import random
import string
import gast
import numpy as np
from tensorflow.python.autograph.pyct import templates
class NodeSampler(object):
sample_map = None
def sample(self):
nodes, magnitudes = zip(*self.sample_map.items())
return np.random.choice(
nodes, p=np.array(magnitudes, dtype='float32') / np.sum(magnitudes))
class StatementSampler(NodeSampler):
sample_map = dict((
(gast.Assign, 10),
(gast.Print, 1),
(gast.If, 2),
(gast.While, 2),
(gast.For, 0),
))
class ExpressionSampler(NodeSampler):
sample_map = dict((
(gast.UnaryOp, 1),
(gast.BinOp, 8),
(gast.Name, 1),
(gast.Call, 0),
))
class CompareSampler(NodeSampler):
sample_map = dict((
(gast.Eq, 1),
(gast.NotEq, 1),
(gast.Lt, 1),
(gast.LtE, 1),
(gast.Gt, 1),
(gast.GtE, 1),
(gast.Is, 1),
(gast.IsNot, 1),
))
class BinaryOpSampler(NodeSampler):
sample_map = dict((
(gast.Add, 1),
(gast.Sub, 1),
(gast.Mult, 1),
(gast.Div, 1),
(gast.FloorDiv, 1),
(gast.Mod, 1),
(gast.Pow, 1),
))
class UnaryOpSampler(NodeSampler):
sample_map = dict(((gast.USub, 1), (gast.UAdd, 0)))
class NameSampler(NodeSampler):
sample_map = dict((
('new', 1),
('existing', 1),
))
N_CONTROLFLOW_STATEMENTS = 10
N_FUNCTIONDEF_STATEMENTS = 10
class CodeGenerator(object):
"""Generate random syntactically-valid Python ASTs."""
def __init__(self, max_depth=3, depth=0):
self.max_depth = max_depth
self.depth = depth
def generate_statement(self):
"""Generate a statement node, dispatching to the correct class method."""
desired_node = StatementSampler().sample()
self.depth += 1
# Enforce some constraints on generating statements.
# E.g., if statements need at least 3 readable variables.
# If we fail to satisfy our constraints, draw another sample.
if desired_node in (gast.While, gast.For, gast.If):
if self.depth > self.max_depth:
return self.generate_statement()
# Go get the generator method and run it
method = 'generate_' + desired_node.__name__
visitor = getattr(self, method)
node = visitor()
self.depth -= 1
return node
def sample_node_list(self, low, high, generator):
"""Generate a list of statements of random length.
Args:
low: Fewest number of statements to generate.
high: Highest number of statements to generate.
generator: Function to call to generate nodes.
Returns:
A list of statements.
"""
statements = []
for _ in range(np.random.randint(low, high)):
statements.append(generator())
return statements
def generate_Name(self, ctx=gast.Load()):
variable_name = '_' + ''.join(
random.choice(string.ascii_lowercase) for _ in range(4))
return gast.Name(variable_name, ctx=ctx, annotation=None)
def generate_BinOp(self):
# TODO(alexbw): convert to generate_expression when we get to limit
# expression depth.
op = BinaryOpSampler().sample()()
return gast.BinOp(self.generate_Name(), op, self.generate_Name())
def generate_Compare(self):
op = CompareSampler().sample()()
return gast.Compare(self.generate_Name(), [op], [self.generate_Name()])
def generate_UnaryOp(self):
operand = self.generate_Name()
op = UnaryOpSampler().sample()()
return gast.UnaryOp(op, operand)
def generate_expression(self):
desired_node = ExpressionSampler().sample()
# Go get the generator method and run it
method = 'generate_' + desired_node.__name__
generator = getattr(self, method)
return generator()
def generate_Assign(self):
"""Generate an Assign node."""
# Generate left-hand side
target_node = self.generate_Name(gast.Store())
# Generate right-hand side
value_node = self.generate_expression()
# Put it all together
node = gast.Assign(targets=[target_node], value=value_node)
return node
def generate_If(self):
"""Generate an If node."""
test = self.generate_Compare()
# Generate true branch statements
body = self.sample_node_list(
low=1,
high=N_CONTROLFLOW_STATEMENTS // 2,
generator=self.generate_statement)
# Generate false branch statements
orelse = self.sample_node_list(
low=1,
high=N_CONTROLFLOW_STATEMENTS // 2,
generator=self.generate_statement)
node = gast.If(test, body, orelse)
return node
def generate_While(self):
"""Generate a While node."""
test = self.generate_Compare()
body = self.sample_node_list(
low=1, high=N_CONTROLFLOW_STATEMENTS, generator=self.generate_statement)
orelse = [] # not generating else statements
node = gast.While(test, body, orelse)
return node
def generate_Call(self):
raise NotImplementedError
def generate_Return(self):
return gast.Return(self.generate_expression())
def generate_Print(self):
return templates.replace('print(x)', x=self.generate_expression())[0]
def generate_FunctionDef(self):
"""Generate a FunctionDef node."""
# Generate the arguments, register them as available
arg_vars = self.sample_node_list(
low=2, high=10, generator=lambda: self.generate_Name(gast.Param()))
args = gast.arguments(arg_vars, None, [], [], None, [])
# Generate the function body
body = self.sample_node_list(
low=1, high=N_FUNCTIONDEF_STATEMENTS, generator=self.generate_statement)
body.append(self.generate_Return())
fn_name = self.generate_Name().id
node = gast.FunctionDef(fn_name, args, body, (), None)
return node
def generate_random_functiondef():
return CodeGenerator().generate_FunctionDef()
| apache-2.0 |
epickrram/perf-workshop | src/main/python/perf_sched_stat_runtime.py | 1 | 1031 | __author__ = 'pricem'
import re
import sys
TIMESTAMP_REGEX=".* ([0-9]{1,12}\.[0-9]{6}):.*"
RUNTIME_REGEX=".* runtime=([0-9]+) "
def parse_timestamp(input):
return int(float(input) * 1000000) * 1000
def parse_runtime(input):
return int(input)
if len(sys.argv) == 1:
print "usage: " + sys.argv[0] + " <pid> <input_file>"
last_timestamp = 0
for line in open(sys.argv[2]).readlines():
if line.find("sched_stat_runtime") > 0 and line.find("pid=" + sys.argv[1]) > 0:
timestamp_match = re.search(TIMESTAMP_REGEX, line)
runtime_match = re.search(RUNTIME_REGEX, line)
timestamp_nanos = parse_timestamp(timestamp_match.group(1))
runtime_nanos = parse_runtime(runtime_match.group(1))
if last_timestamp != 0:
runtime_reporting_delta_nanos = timestamp_nanos - last_timestamp
nanos_not_on_cpu = abs(runtime_reporting_delta_nanos - runtime_nanos)
print str(timestamp_nanos) + " " + str(nanos_not_on_cpu)
last_timestamp = timestamp_nanos | apache-2.0 |
veger/ansible | test/units/modules/network/vyos/test_vyos_command.py | 45 | 4175 | # (c) 2016 Red Hat Inc.
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import json
from units.compat.mock import patch
from ansible.modules.network.vyos import vyos_command
from units.modules.utils import set_module_args
from .vyos_module import TestVyosModule, load_fixture
class TestVyosCommandModule(TestVyosModule):
module = vyos_command
def setUp(self):
super(TestVyosCommandModule, self).setUp()
self.mock_run_commands = patch('ansible.modules.network.vyos.vyos_command.run_commands')
self.run_commands = self.mock_run_commands.start()
def tearDown(self):
super(TestVyosCommandModule, self).tearDown()
self.mock_run_commands.stop()
def load_fixtures(self, commands=None):
def load_from_file(*args, **kwargs):
module, commands = args
output = list()
for item in commands:
try:
command = item['command']
except ValueError:
command = item
filename = str(command).replace(' ', '_')
output.append(load_fixture(filename))
return output
self.run_commands.side_effect = load_from_file
def test_vyos_command_simple(self):
set_module_args(dict(commands=['show version']))
result = self.execute_module()
self.assertEqual(len(result['stdout']), 1)
self.assertTrue(result['stdout'][0].startswith('Version: VyOS'))
def test_vyos_command_multiple(self):
set_module_args(dict(commands=['show version', 'show version']))
result = self.execute_module()
self.assertEqual(len(result['stdout']), 2)
self.assertTrue(result['stdout'][0].startswith('Version: VyOS'))
def test_vyos_command_wait_for(self):
wait_for = 'result[0] contains "VyOS maintainers"'
set_module_args(dict(commands=['show version'], wait_for=wait_for))
self.execute_module()
def test_vyos_command_wait_for_fails(self):
wait_for = 'result[0] contains "test string"'
set_module_args(dict(commands=['show version'], wait_for=wait_for))
self.execute_module(failed=True)
self.assertEqual(self.run_commands.call_count, 10)
def test_vyos_command_retries(self):
wait_for = 'result[0] contains "test string"'
set_module_args(dict(commands=['show version'], wait_for=wait_for, retries=2))
self.execute_module(failed=True)
self.assertEqual(self.run_commands.call_count, 2)
def test_vyos_command_match_any(self):
wait_for = ['result[0] contains "VyOS maintainers"',
'result[0] contains "test string"']
set_module_args(dict(commands=['show version'], wait_for=wait_for, match='any'))
self.execute_module()
def test_vyos_command_match_all(self):
wait_for = ['result[0] contains "VyOS maintainers"',
'result[0] contains "maintainers@vyos.net"']
set_module_args(dict(commands=['show version'], wait_for=wait_for, match='all'))
self.execute_module()
def test_vyos_command_match_all_failure(self):
wait_for = ['result[0] contains "VyOS maintainers"',
'result[0] contains "test string"']
commands = ['show version', 'show version']
set_module_args(dict(commands=commands, wait_for=wait_for, match='all'))
self.execute_module(failed=True)
| gpl-3.0 |
krzykwas/rhqagent | pyagent/event/CIMSubscriber.py | 1 | 4874 | # -*- coding: utf-8 -*-
#
# Krzysztof „krzykwas” Kwaśniewski
# Gdańsk, 05-11-2012
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
import logging
import pywbem
class CIMSubscriber(object):
"""
Subscribes or unsubscribes a CIMListener to a CIM server to receive indications
for a certain CIM class.
"""
def __init__(self, uri, username, password, namespace="root/cimv2"):
"""
@param uri: CIM server uri, for instance: https://localhost:5989
@param username, password: CIM server credentials
"""
self.__FILTER_CREATION_CLASS_NAME = "CIM_IndicationFilter"
self.__HANDLER_CREATION_CLASS_NAME = "CIM_IndicationHandlerCIMXML"
self.__INTEROP_NAMESPACE = "root/PG_InterOp"
self.__logger__ = logging.getLogger(__name__)
self.__uri = uri
self.__username = username
self.__password = password
self.__namespace = namespace
self.__client = pywbem.WBEMConnection(self.__uri, (self.__username, self.__password), self.__namespace)
def subscribe(self, cimClassName, listenerPort):
"""
(Re-)registers with a CIM server.
"""
self.unsubscribe(cimClassName)
self.__logger__.debug("Registering a new subscription.")
handlerName = self.__createHandler()
filterName = self.__createFilter(cimClassName)
self.__createSubscription(handlerName, filterName)
def unsubscribe(self, cimClassName):
"""
Cleans up old registrations.
"""
self.__logger__.debug("Cleaning up old registrations.")
subscriptionName = self.__getSubscriptionName()
handlerName = self.__getCIMInstanceName(self.__HANDLER_CREATION_CLASS_NAME)
filterName = self.__getCIMInstanceName(self.__FILTER_CREATION_CLASS_NAME)
for name in subscriptionName, filterName, handlerName:
try:
self.__client.DeleteInstance(name)
except Exception as e:
self.__logger__.error(e)
def __getSystemName(self):
for instance in self.__client.EnumerateInstances(self.__HANDLER_CREATION_CLASS_NAME, self.__INTEROP_NAMESPACE):
if instance["name"] == "PyAgentIndication":
return instance["systemName"]
return None
def __getKeybindings(self, creationClassName):
keybindings = {
"SystemCreationClassName": "CIM_ComputerSystem",
"Name": "PyAgentIndication",
"CreationClassName": creationClassName
}
systemName = self.__getSystemName()
if systemName is not None:
keybindings["SystemName"] = systemName
return keybindings
def __getCIMInstanceName(self, creationClassName):
return pywbem.CIMInstanceName(
creationClassName,
keybindings=self.__getKeybindings(creationClassName),
namespace=self.__INTEROP_NAMESPACE,
)
def __getSubscriptionName(self):
keybindings = {
"Filter": self.__getCIMInstanceName(self.__FILTER_CREATION_CLASS_NAME),
"Handler": self.__getCIMInstanceName(self.__HANDLER_CREATION_CLASS_NAME)
}
return pywbem.CIMInstanceName(
"CIM_IndicationSubscription",
keybindings=keybindings,
namespace=self.__INTEROP_NAMESPACE
)
def __createHandler(self):
"""
@return: new handler instance's name
"""
handlerInstance = pywbem.CIMInstance(
self.__HANDLER_CREATION_CLASS_NAME,
properties=self.__getKeybindings(self.__HANDLER_CREATION_CLASS_NAME),
path=self.__getCIMInstanceName(self.__HANDLER_CREATION_CLASS_NAME)
)
handlerInstance["Destination"] = self.__uri
return self.__client.CreateInstance(handlerInstance)
def __createFilter(self, cimClassName):
"""
@return: new filter instance's name
"""
filterInstance = pywbem.CIMInstance(
self.__FILTER_CREATION_CLASS_NAME,
properties=self.__getKeybindings(self.__FILTER_CREATION_CLASS_NAME),
path=self.__getCIMInstanceName(self.__FILTER_CREATION_CLASS_NAME)
)
filterInstance["SourceNamespace"] = "root/cimv2"
filterInstance["Query"] = "SELECT * FROM {0}".format(cimClassName)
filterInstance["QueryLanguage"] = "WQL"
return self.__client.CreateInstance(filterInstance)
def __createSubscription(self, handlerName, filterName):
subscriptionInstance = pywbem.CIMInstance(
"CIM_IndicationSubscription",
path=self.__getSubscriptionName()
)
subscriptionInstance["Filter"] = filterName
subscriptionInstance["Handler"] = handlerName
try:
self.__client.CreateInstance(subscriptionInstance)
except Exception as e:
self.__logger__.error(e)
| gpl-3.0 |
makacodewalker/etsgh | django/core/mail/backends/console.py | 308 | 1295 | """
Email backend that writes messages to console instead of sending them.
"""
import sys
import threading
from django.core.mail.backends.base import BaseEmailBackend
class EmailBackend(BaseEmailBackend):
def __init__(self, *args, **kwargs):
self.stream = kwargs.pop('stream', sys.stdout)
self._lock = threading.RLock()
super(EmailBackend, self).__init__(*args, **kwargs)
def send_messages(self, email_messages):
"""Write all messages to the stream in a thread-safe way."""
if not email_messages:
return
self._lock.acquire()
try:
# The try-except is nested to allow for
# Python 2.4 support (Refs #12147)
try:
stream_created = self.open()
for message in email_messages:
self.stream.write('%s\n' % message.message().as_string())
self.stream.write('-'*79)
self.stream.write('\n')
self.stream.flush() # flush after each message
if stream_created:
self.close()
except:
if not self.fail_silently:
raise
finally:
self._lock.release()
return len(email_messages)
| bsd-3-clause |
birdonwheels5/p2pool-apollo | nattraverso/utils.py | 288 | 1563 | """
Various utility functions used in the nattraverso package.
@author: Raphael Slinckx
@copyright: Copyright 2005
@license: LGPL
@contact: U{raphael@slinckx.net<mailto:raphael@slinckx.net>}
@version: 0.1.0
"""
__revision__ = "$id"
def is_rfc1918_ip(ip):
"""
Checks if the given ip address is a rfc1918 one.
@param ip: The ip address to test
@type ip: a string "x.x.x.x"
@return: True if it's a LAN address, False otherwise
"""
if isinstance(ip, basestring):
ip = _ip_to_number(ip)
for net, mask in _nets:
if ip&mask == net:
return True
return False
def is_bogus_ip(ip):
"""
Checks if the given ip address is bogus, i.e. 0.0.0.0 or 127.0.0.1.
@param ip: The ip address to test
@type ip: a string "x.x.x.x"
@return: True if it's bogus, False otherwise
"""
return ip.startswith('0.') or ip.startswith('127.')
def _ip_to_number(ipstr):
"""
Translate a string ip address to a packed number.
@param ipstr: the ip address to transform
@type ipstr: a string "x.x.x.x"
@return: an int32 number representing the ip address
"""
net = [ int(digit) for digit in ipstr.split('.') ] + [ 0, 0, 0 ]
net = net[:4]
return ((((((0L+net[0])<<8) + net[1])<<8) + net[2])<<8) +net[3]
# List of rfc1918 net/mask
_rfc1918_networks = [('127', 8), ('192.168', 16), ('10', 8), ('172.16', 12)]
# Machine readable form of the above
_nets = [(_ip_to_number(net), (2L**32 -1)^(2L**(32-mask)-1))
for net, mask in _rfc1918_networks]
| gpl-3.0 |
ScholzVolkmer/django-cms | cms/admin/change_list.py | 12 | 9158 | # -*- coding: utf-8 -*-
import bisect
from cms.models import Title, Page, EmptyTitle
from cms.utils import get_language_list
from cms.utils.compat import DJANGO_1_5
from cms.utils.conf import get_cms_setting
from cms.utils.permissions import get_user_sites_queryset
from django.contrib.admin.views.main import ChangeList, ALL_VAR, IS_POPUP_VAR, \
ORDER_TYPE_VAR, ORDER_VAR, SEARCH_VAR
from django.contrib.sites.models import Site
import django
COPY_VAR = "copy"
def cache_tree_children(queryset):
"""
For all items in the queryset, set the '_cached_children' attribute to a
list. This attribute is in turn used by the 'get_children' method on the
item, which would otherwise (if '_cached_children' is not set) cause a
database query.
The queryset must be ordered by 'lft', or the function will put the children
in the wrong order.
"""
parents_dict = {}
# Loop through the queryset twice, so that the function works even if the
# mptt tree is broken. Since django caches querysets internally, the extra
# computation time is minimal.
for obj in queryset:
parents_dict[obj.pk] = obj
obj._cached_children = []
for obj in queryset:
parent = parents_dict.get(obj.parent_id)
if parent:
parent._cached_children.append(obj)
class CMSChangeList(ChangeList):
"""
Renders a Changelist - In our case it looks like a tree - it's the list of
*instances* in the Admin.
It is usually responsible for pagination (not here though, we have a
treeview)
"""
real_queryset = False
def __init__(self, request, *args, **kwargs):
from cms.utils.plugins import current_site
self._current_site = current_site(request)
super(CMSChangeList, self).__init__(request, *args, **kwargs)
try:
self.queryset = self.get_query_set(request)
except:
raise
self.get_results(request)
if self._current_site:
request.session['cms_admin_site'] = self._current_site.pk
self.set_sites(request)
def get_query_set(self, request=None):
if COPY_VAR in self.params:
del self.params[COPY_VAR]
if 'language' in self.params:
del self.params['language']
if 'page_id' in self.params:
del self.params['page_id']
if django.VERSION[1] > 3:
qs = super(CMSChangeList, self).get_query_set(request).drafts()
else:
qs = super(CMSChangeList, self).get_query_set().drafts()
if request:
site = self.current_site()
permissions = Page.permissions.get_change_id_list(request.user, site)
if permissions != Page.permissions.GRANT_ALL:
qs = qs.filter(pk__in=permissions)
# root_query_set is a read-only property in Django 1.6
# and will be removed in Django 1.8.
queryset_attr = 'root_query_set' if DJANGO_1_5 else 'root_queryset'
setattr(self, queryset_attr, self.root_query_set.filter(pk__in=permissions))
self.real_queryset = True
qs = qs.filter(site=self._current_site)
return qs
def is_filtered(self):
from cms.utils.plugins import SITE_VAR
lookup_params = self.params.copy() # a dictionary of the query string
for i in (ALL_VAR, ORDER_VAR, ORDER_TYPE_VAR, SEARCH_VAR, IS_POPUP_VAR, SITE_VAR, 'language', 'page_id'):
if i in lookup_params:
del lookup_params[i]
if not lookup_params.items() and not self.query:
return False
return True
def get_results(self, request):
if self.real_queryset:
super(CMSChangeList, self).get_results(request)
if not self.is_filtered():
self.full_result_count = self.result_count = self.root_query_set.count()
else:
self.full_result_count = self.root_query_set.count()
def set_items(self, request):
site = self.current_site()
# Get all the pages, ordered by tree ID (it's convenient to build the
# tree using a stack now)
pages = self.get_query_set(request).drafts().order_by('tree_id', 'lft').select_related('publisher_public')
# Get lists of page IDs for which the current user has
# "permission to..." on the current site.
if get_cms_setting('PERMISSION'):
perm_edit_ids = Page.permissions.get_change_id_list(request.user, site)
perm_publish_ids = Page.permissions.get_publish_id_list(request.user, site)
perm_advanced_settings_ids = Page.permissions.get_advanced_settings_id_list(request.user, site)
restricted_ids = Page.permissions.get_restricted_id_list(site)
if perm_edit_ids and perm_edit_ids != Page.permissions.GRANT_ALL:
pages = pages.filter(pk__in=perm_edit_ids)
root_pages = []
pages = list(pages)
all_pages = pages[:] # That is, basically, a copy.
# Unfortunately we cannot use the MPTT builtin code for pre-caching
# the children here, because MPTT expects the tree to be 'complete'
# and otherwise complaints about 'invalid item order'
cache_tree_children(pages)
ids = dict((page.id, page) for page in pages)
for page in pages:
children = list(page.get_children())
# If the parent page is not among the nodes shown, this node should
# be a "root node". The filtering for this has already been made, so
# using the ids dictionary means this check is constant time
page.root_node = page.parent_id not in ids
if get_cms_setting('PERMISSION'):
# caching the permissions
page.permission_edit_cache = perm_edit_ids == Page.permissions.GRANT_ALL or page.pk in perm_edit_ids
page.permission_publish_cache = perm_publish_ids == Page.permissions.GRANT_ALL or page.pk in perm_publish_ids
page.permission_advanced_settings_cache = perm_advanced_settings_ids == Page.permissions.GRANT_ALL or page.pk in perm_advanced_settings_ids
page.permission_user_cache = request.user
page.permission_restricted = page.pk in restricted_ids
if page.root_node or self.is_filtered():
page.last = True
if len(children):
# TODO: WTF!?!
# The last one is not the last... wait, what?
# children should NOT be a queryset. If it is, check that
# your django-mptt version is 0.5.1
children[-1].last = False
page.menu_level = 0
root_pages.append(page)
if page.parent_id:
page.get_cached_ancestors(ascending=True)
else:
page.ancestors_ascending = []
# Because 'children' is the reverse-FK accessor for the 'parent'
# FK from Page->Page, we have to use wrong English here and set
# an attribute called 'childrens'. We are aware that this is WRONG
# but what should we do?
# If the queryset is filtered, do NOT set the 'childrens' attribute
# since *ALL* pages will be in the 'root_pages' list and therefore
# be displayed. (If the queryset is filtered, the result is not a
# tree but rather a flat list).
if self.is_filtered():
page.childrens = []
else:
page.childrens = children
for page in all_pages:
page.title_cache = {}
page.all_languages = []
if page.publisher_public_id:
page.publisher_public.title_cache = {}
page.publisher_public.all_languages = []
ids[page.publisher_public_id] = page.publisher_public
titles = Title.objects.filter(page__in=ids)
insort = bisect.insort # local copy to avoid globals lookup in the loop
for title in titles:
page = ids[title.page_id]
page.title_cache[title.language] = title
if not title.language in page.all_languages:
insort(page.all_languages, title.language)
site_id = self.current_site()
languages = get_language_list(site_id)
for page in all_pages:
for lang in languages:
if not lang in page.title_cache:
page.title_cache[lang] = EmptyTitle(lang)
self.root_pages = root_pages
def get_items(self):
return self.root_pages
def set_sites(self, request):
"""Sets sites property to current instance - used in tree view for
sites combo.
"""
if get_cms_setting('PERMISSION'):
self.sites = get_user_sites_queryset(request.user)
else:
self.sites = Site.objects.all()
self.has_access_to_multiple_sites = len(self.sites) > 1
def current_site(self):
return self._current_site
| bsd-3-clause |
subramani95/neutron | setup.py | 608 | 1045 | #!/usr/bin/env python
# Copyright (c) 2013 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# THIS FILE IS MANAGED BY THE GLOBAL REQUIREMENTS REPO - DO NOT EDIT
import setuptools
# In python < 2.7.4, a lazy loading of package `pbr` will break
# setuptools if some other modules registered functions in `atexit`.
# solution from: http://bugs.python.org/issue15881#msg170215
try:
import multiprocessing # noqa
except ImportError:
pass
setuptools.setup(
setup_requires=['pbr'],
pbr=True)
| apache-2.0 |
mgagne/nova | nova/virt/libvirt/utils.py | 2 | 19553 | # Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
# Copyright (c) 2010 Citrix Systems, Inc.
# Copyright (c) 2011 Piston Cloud Computing, Inc
# Copyright (c) 2011 OpenStack Foundation
# (c) Copyright 2013 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import errno
import os
import re
from lxml import etree
from oslo.config import cfg
from oslo_concurrency import processutils
from nova.compute import arch
from nova.i18n import _
from nova.i18n import _LI
from nova.i18n import _LW
from nova.openstack.common import log as logging
from nova import utils
from nova.virt import images
from nova.virt.libvirt import config as vconfig
from nova.virt import volumeutils
libvirt_opts = [
cfg.BoolOpt('snapshot_compression',
default=False,
help='Compress snapshot images when possible. This '
'currently applies exclusively to qcow2 images'),
]
CONF = cfg.CONF
CONF.register_opts(libvirt_opts, 'libvirt')
CONF.import_opt('instances_path', 'nova.compute.manager')
LOG = logging.getLogger(__name__)
def execute(*args, **kwargs):
return utils.execute(*args, **kwargs)
def get_iscsi_initiator():
return volumeutils.get_iscsi_initiator()
def get_fc_hbas():
"""Get the Fibre Channel HBA information."""
out = None
try:
out, err = execute('systool', '-c', 'fc_host', '-v',
run_as_root=True)
except processutils.ProcessExecutionError as exc:
# This handles the case where rootwrap is used
# and systool is not installed
# 96 = nova.cmd.rootwrap.RC_NOEXECFOUND:
if exc.exit_code == 96:
LOG.warn(_LW("systool is not installed"))
return []
except OSError as exc:
# This handles the case where rootwrap is NOT used
# and systool is not installed
if exc.errno == errno.ENOENT:
LOG.warn(_LW("systool is not installed"))
return []
if out is None:
raise RuntimeError(_("Cannot find any Fibre Channel HBAs"))
lines = out.split('\n')
# ignore the first 2 lines
lines = lines[2:]
hbas = []
hba = {}
lastline = None
for line in lines:
line = line.strip()
# 2 newlines denotes a new hba port
if line == '' and lastline == '':
if len(hba) > 0:
hbas.append(hba)
hba = {}
else:
val = line.split('=')
if len(val) == 2:
key = val[0].strip().replace(" ", "")
value = val[1].strip()
hba[key] = value.replace('"', '')
lastline = line
return hbas
def get_fc_hbas_info():
"""Get Fibre Channel WWNs and device paths from the system, if any."""
# Note modern linux kernels contain the FC HBA's in /sys
# and are obtainable via the systool app
hbas = get_fc_hbas()
hbas_info = []
for hba in hbas:
wwpn = hba['port_name'].replace('0x', '')
wwnn = hba['node_name'].replace('0x', '')
device_path = hba['ClassDevicepath']
device = hba['ClassDevice']
hbas_info.append({'port_name': wwpn,
'node_name': wwnn,
'host_device': device,
'device_path': device_path})
return hbas_info
def get_fc_wwpns():
"""Get Fibre Channel WWPNs from the system, if any."""
# Note modern linux kernels contain the FC HBA's in /sys
# and are obtainable via the systool app
hbas = get_fc_hbas()
wwpns = []
if hbas:
for hba in hbas:
if hba['port_state'] == 'Online':
wwpn = hba['port_name'].replace('0x', '')
wwpns.append(wwpn)
return wwpns
def get_fc_wwnns():
"""Get Fibre Channel WWNNs from the system, if any."""
# Note modern linux kernels contain the FC HBA's in /sys
# and are obtainable via the systool app
hbas = get_fc_hbas()
wwnns = []
if hbas:
for hba in hbas:
if hba['port_state'] == 'Online':
wwnn = hba['node_name'].replace('0x', '')
wwnns.append(wwnn)
return wwnns
def create_image(disk_format, path, size):
"""Create a disk image
:param disk_format: Disk image format (as known by qemu-img)
:param path: Desired location of the disk image
:param size: Desired size of disk image. May be given as an int or
a string. If given as an int, it will be interpreted
as bytes. If it's a string, it should consist of a number
with an optional suffix ('K' for Kibibytes,
M for Mebibytes, 'G' for Gibibytes, 'T' for Tebibytes).
If no suffix is given, it will be interpreted as bytes.
"""
execute('qemu-img', 'create', '-f', disk_format, path, size)
def create_cow_image(backing_file, path, size=None):
"""Create COW image
Creates a COW image with the given backing file
:param backing_file: Existing image on which to base the COW image
:param path: Desired location of the COW image
"""
base_cmd = ['qemu-img', 'create', '-f', 'qcow2']
cow_opts = []
if backing_file:
cow_opts += ['backing_file=%s' % backing_file]
base_details = images.qemu_img_info(backing_file)
else:
base_details = None
# This doesn't seem to get inherited so force it to...
# http://paste.ubuntu.com/1213295/
# TODO(harlowja) probably file a bug against qemu-img/qemu
if base_details and base_details.cluster_size is not None:
cow_opts += ['cluster_size=%s' % base_details.cluster_size]
# For now don't inherit this due the following discussion...
# See: http://www.gossamer-threads.com/lists/openstack/dev/10592
# if 'preallocation' in base_details:
# cow_opts += ['preallocation=%s' % base_details['preallocation']]
if base_details and base_details.encrypted:
cow_opts += ['encryption=%s' % base_details.encrypted]
if size is not None:
cow_opts += ['size=%s' % size]
if cow_opts:
# Format as a comma separated list
csv_opts = ",".join(cow_opts)
cow_opts = ['-o', csv_opts]
cmd = base_cmd + cow_opts + [path]
execute(*cmd)
def pick_disk_driver_name(hypervisor_version, is_block_dev=False):
"""Pick the libvirt primary backend driver name
If the hypervisor supports multiple backend drivers we have to tell libvirt
which one should be used.
Xen supports the following drivers: "tap", "tap2", "phy", "file", or
"qemu", being "qemu" the preferred one. Qemu only supports "qemu".
:param is_block_dev:
:returns: driver_name or None
"""
if CONF.libvirt.virt_type == "xen":
if is_block_dev:
return "phy"
else:
# 4002000 == 4.2.0
if hypervisor_version >= 4002000:
try:
execute('xend', 'status',
run_as_root=True, check_exit_code=True)
except OSError as exc:
if exc.errno == errno.ENOENT:
LOG.debug("xend is not found")
# libvirt will try to use libxl toolstack
return 'qemu'
else:
raise
except processutils.ProcessExecutionError as exc:
LOG.debug("xend is not started")
# libvirt will try to use libxl toolstack
return 'qemu'
# libvirt will use xend/xm toolstack
try:
out, err = execute('tap-ctl', 'check', check_exit_code=False)
if out == 'ok\n':
# 4000000 == 4.0.0
if hypervisor_version > 4000000:
return "tap2"
else:
return "tap"
else:
LOG.info(_LI("tap-ctl check: %s"), out)
except OSError as exc:
if exc.errno == errno.ENOENT:
LOG.debug("tap-ctl tool is not installed")
else:
raise
return "file"
elif CONF.libvirt.virt_type in ('kvm', 'qemu'):
return "qemu"
else:
# UML doesn't want a driver_name set
return None
def get_disk_size(path):
"""Get the (virtual) size of a disk image
:param path: Path to the disk image
:returns: Size (in bytes) of the given disk image as it would be seen
by a virtual machine.
"""
size = images.qemu_img_info(path).virtual_size
return int(size)
def get_disk_backing_file(path, basename=True):
"""Get the backing file of a disk image
:param path: Path to the disk image
:returns: a path to the image's backing store
"""
backing_file = images.qemu_img_info(path).backing_file
if backing_file and basename:
backing_file = os.path.basename(backing_file)
return backing_file
def copy_image(src, dest, host=None):
"""Copy a disk image to an existing directory
:param src: Source image
:param dest: Destination path
:param host: Remote host
"""
if not host:
# We shell out to cp because that will intelligently copy
# sparse files. I.E. holes will not be written to DEST,
# rather recreated efficiently. In addition, since
# coreutils 8.11, holes can be read efficiently too.
execute('cp', src, dest)
else:
dest = "%s:%s" % (host, dest)
# Try rsync first as that can compress and create sparse dest files.
# Note however that rsync currently doesn't read sparse files
# efficiently: https://bugzilla.samba.org/show_bug.cgi?id=8918
# At least network traffic is mitigated with compression.
try:
# Do a relatively light weight test first, so that we
# can fall back to scp, without having run out of space
# on the destination for example.
execute('rsync', '--sparse', '--compress', '--dry-run', src, dest)
except processutils.ProcessExecutionError:
execute('scp', src, dest)
else:
execute('rsync', '--sparse', '--compress', src, dest)
def write_to_file(path, contents, umask=None):
"""Write the given contents to a file
:param path: Destination file
:param contents: Desired contents of the file
:param umask: Umask to set when creating this file (will be reset)
"""
if umask:
saved_umask = os.umask(umask)
try:
with open(path, 'w') as f:
f.write(contents)
finally:
if umask:
os.umask(saved_umask)
def chown(path, owner):
"""Change ownership of file or directory
:param path: File or directory whose ownership to change
:param owner: Desired new owner (given as uid or username)
"""
execute('chown', owner, path, run_as_root=True)
def _id_map_to_config(id_map):
return "%s:%s:%s" % (id_map.start, id_map.target, id_map.count)
def chown_for_id_maps(path, id_maps):
"""Change ownership of file or directory for an id mapped
environment
:param path: File or directory whose ownership to change
:param id_maps: List of type LibvirtConfigGuestIDMap
"""
uid_maps_str = ','.join([_id_map_to_config(id_map) for id_map in id_maps if
isinstance(id_map,
vconfig.LibvirtConfigGuestUIDMap)])
gid_maps_str = ','.join([_id_map_to_config(id_map) for id_map in id_maps if
isinstance(id_map,
vconfig.LibvirtConfigGuestGIDMap)])
execute('nova-idmapshift', '-i', '-u', uid_maps_str,
'-g', gid_maps_str, path, run_as_root=True)
def extract_snapshot(disk_path, source_fmt, out_path, dest_fmt):
"""Extract a snapshot from a disk image.
Note that nobody should write to the disk image during this operation.
:param disk_path: Path to disk image
:param out_path: Desired path of extracted snapshot
"""
# NOTE(markmc): ISO is just raw to qemu-img
if dest_fmt == 'iso':
dest_fmt = 'raw'
qemu_img_cmd = ('qemu-img', 'convert', '-f', source_fmt, '-O', dest_fmt)
# Conditionally enable compression of snapshots.
if CONF.libvirt.snapshot_compression and dest_fmt == "qcow2":
qemu_img_cmd += ('-c',)
qemu_img_cmd += (disk_path, out_path)
execute(*qemu_img_cmd)
def load_file(path):
"""Read contents of file
:param path: File to read
"""
with open(path, 'r') as fp:
return fp.read()
def file_open(*args, **kwargs):
"""Open file
see built-in file() documentation for more details
Note: The reason this is kept in a separate module is to easily
be able to provide a stub module that doesn't alter system
state at all (for unit tests)
"""
return file(*args, **kwargs)
def file_delete(path):
"""Delete (unlink) file
Note: The reason this is kept in a separate module is to easily
be able to provide a stub module that doesn't alter system
state at all (for unit tests)
"""
return os.unlink(path)
def path_exists(path):
"""Returns if path exists
Note: The reason this is kept in a separate module is to easily
be able to provide a stub module that doesn't alter system
state at all (for unit tests)
"""
return os.path.exists(path)
def find_disk(virt_dom):
"""Find root device path for instance
May be file or device
"""
xml_desc = virt_dom.XMLDesc(0)
domain = etree.fromstring(xml_desc)
if CONF.libvirt.virt_type == 'lxc':
source = domain.find('devices/filesystem/source')
disk_path = source.get('dir')
disk_path = disk_path[0:disk_path.rfind('rootfs')]
disk_path = os.path.join(disk_path, 'disk')
else:
source = domain.find('devices/disk/source')
disk_path = source.get('file') or source.get('dev')
if not disk_path and CONF.libvirt.images_type == 'rbd':
disk_path = source.get('name')
if disk_path:
disk_path = 'rbd:' + disk_path
if not disk_path:
raise RuntimeError(_("Can't retrieve root device path "
"from instance libvirt configuration"))
return disk_path
def get_disk_type(path):
"""Retrieve disk type (raw, qcow2, lvm) for given file."""
if path.startswith('/dev'):
return 'lvm'
elif path.startswith('rbd:'):
return 'rbd'
return images.qemu_img_info(path).file_format
def get_fs_info(path):
"""Get free/used/total space info for a filesystem
:param path: Any dirent on the filesystem
:returns: A dict containing:
:free: How much space is free (in bytes)
:used: How much space is used (in bytes)
:total: How big the filesystem is (in bytes)
"""
hddinfo = os.statvfs(path)
total = hddinfo.f_frsize * hddinfo.f_blocks
free = hddinfo.f_frsize * hddinfo.f_bavail
used = hddinfo.f_frsize * (hddinfo.f_blocks - hddinfo.f_bfree)
return {'total': total,
'free': free,
'used': used}
def fetch_image(context, target, image_id, user_id, project_id, max_size=0):
"""Grab image."""
images.fetch_to_raw(context, image_id, target, user_id, project_id,
max_size=max_size)
def get_instance_path(instance, forceold=False, relative=False):
"""Determine the correct path for instance storage.
This method determines the directory name for instance storage, while
handling the fact that we changed the naming style to something more
unique in the grizzly release.
:param instance: the instance we want a path for
:param forceold: force the use of the pre-grizzly format
:param relative: if True, just the relative path is returned
:returns: a path to store information about that instance
"""
pre_grizzly_name = os.path.join(CONF.instances_path, instance['name'])
if forceold or os.path.exists(pre_grizzly_name):
if relative:
return instance['name']
return pre_grizzly_name
if relative:
return instance['uuid']
return os.path.join(CONF.instances_path, instance['uuid'])
def get_instance_path_at_destination(instance, migrate_data=None):
"""Get the the instance path on destination node while live migration.
This method determines the directory name for instance storage on
destination node, while live migration.
:param instance: the instance we want a path for
:param migrate_data: if not None, it is a dict which holds data
required for live migration without shared
storage.
:returns: a path to store information about that instance
"""
instance_relative_path = None
if migrate_data:
instance_relative_path = migrate_data.get('instance_relative_path')
# NOTE(mikal): this doesn't use libvirt_utils.get_instance_path
# because we are ensuring that the same instance directory name
# is used as was at the source
if instance_relative_path:
instance_dir = os.path.join(CONF.instances_path,
instance_relative_path)
else:
instance_dir = get_instance_path(instance)
return instance_dir
def get_arch(image_meta):
"""Determine the architecture of the guest (or host).
This method determines the CPU architecture that must be supported by
the hypervisor. It gets the (guest) arch info from image_meta properties,
and it will fallback to the nova-compute (host) arch if no architecture
info is provided in image_meta.
:param image_meta: the metadata associated with the instance image
:returns: guest (or host) architecture
"""
if image_meta:
image_arch = image_meta.get('properties', {}).get('architecture')
if image_arch is not None:
return image_arch
return arch.from_host()
def is_mounted(mount_path, source=None):
"""Check if the given source is mounted at given destination point."""
try:
check_cmd = ['findmnt', '--target', mount_path]
if source:
check_cmd.extend(['--source', source])
utils.execute(*check_cmd)
return True
except processutils.ProcessExecutionError as exc:
return False
except OSError as exc:
# info since it's not required to have this tool.
if exc.errno == errno.ENOENT:
LOG.info(_LI("findmnt tool is not installed"))
return False
def is_valid_hostname(hostname):
return re.match(r"^[\w\-\.:]+$", hostname)
| apache-2.0 |
Menooker/gem5_pcm | src/python/m5/util/dot_writer.py | 4 | 11370 | # Copyright (c) 2012-2013 ARM Limited
# All rights reserved.
#
# The license below extends only to copyright in the software and shall
# not be construed as granting a license to any other intellectual
# property including but not limited to intellectual property relating
# to a hardware implementation of the functionality of the software
# licensed hereunder. You may use the software subject to the license
# terms below provided that you ensure that this notice is replicated
# unmodified and in its entirety in all distributions of the software,
# modified or unmodified, in source code or in binary form.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Andreas Hansson
# Uri Wiener
#####################################################################
#
# System visualization using DOT
#
# While config.ini and config.json provide an almost complete listing
# of a system's components and connectivity, they lack a birds-eye
# view. The output generated by do_dot() is a DOT-based figure (as a
# pdf and an editable svg file) and its source dot code. Nodes are
# components, and edges represent the memory hierarchy: the edges are
# directed, from a master to slave. Initially all nodes are
# generated, and then all edges are added. do_dot should be called
# with the top-most SimObject (namely root but not necessarily), the
# output folder and the output dot source filename. From the given
# node, both processes (node and edge creation) is performed
# recursivly, traversing all children of the given root.
#
# pydot is required. When missing, no output will be generated.
#
#####################################################################
import m5, os, re
from m5.SimObject import isRoot, isSimObjectVector
from m5.params import PortRef
from m5.util import warn
try:
import pydot
except:
pydot = False
# need to create all nodes (components) before creating edges (memory channels)
def dot_create_nodes(simNode, callgraph):
if isRoot(simNode):
label = "root"
else:
label = simNode._name
full_path = re.sub('\.', '_', simNode.path())
# add class name under the label
label = "\"" + label + " \\n: " + simNode.__class__.__name__ + "\""
# each component is a sub-graph (cluster)
cluster = dot_create_cluster(simNode, full_path, label)
# create nodes per port
for port_name in simNode._ports.keys():
port = simNode._port_refs.get(port_name, None)
if port != None:
full_port_name = full_path + "_" + port_name
port_node = dot_create_node(simNode, full_port_name, port_name)
cluster.add_node(port_node)
# recurse to children
if simNode._children:
for c in simNode._children:
child = simNode._children[c]
if isSimObjectVector(child):
for obj in child:
dot_create_nodes(obj, cluster)
else:
dot_create_nodes(child, cluster)
callgraph.add_subgraph(cluster)
# create all edges according to memory hierarchy
def dot_create_edges(simNode, callgraph):
for port_name in simNode._ports.keys():
port = simNode._port_refs.get(port_name, None)
if port != None:
full_path = re.sub('\.', '_', simNode.path())
full_port_name = full_path + "_" + port_name
port_node = dot_create_node(simNode, full_port_name, port_name)
# create edges
if isinstance(port, PortRef):
dot_add_edge(simNode, callgraph, full_port_name, port)
else:
for p in port.elements:
dot_add_edge(simNode, callgraph, full_port_name, p)
# recurse to children
if simNode._children:
for c in simNode._children:
child = simNode._children[c]
if isSimObjectVector(child):
for obj in child:
dot_create_edges(obj, callgraph)
else:
dot_create_edges(child, callgraph)
def dot_add_edge(simNode, callgraph, full_port_name, peerPort):
if peerPort.role == "MASTER":
peer_port_name = re.sub('\.', '_', peerPort.peer.simobj.path() \
+ "." + peerPort.peer.name)
callgraph.add_edge(pydot.Edge(full_port_name, peer_port_name))
def dot_create_cluster(simNode, full_path, label):
# get the parameter values of the node and use them as a tooltip
ini_strings = []
for param in sorted(simNode._params.keys()):
value = simNode._values.get(param)
if value != None:
# parameter name = value in HTML friendly format
ini_strings.append(str(param) + "=" +
simNode._values[param].ini_str())
# join all the parameters with an HTML newline
tooltip = " ".join(ini_strings)
return pydot.Cluster( \
full_path, \
shape = "Mrecord", \
label = label, \
tooltip = "\"" + tooltip + "\"", \
style = "\"rounded, filled\"", \
color = "#000000", \
fillcolor = dot_gen_colour(simNode), \
fontname = "Arial", \
fontsize = "14", \
fontcolor = "#000000" \
)
def dot_create_node(simNode, full_path, label):
return pydot.Node( \
full_path, \
shape = "Mrecord", \
label = label, \
style = "\"rounded, filled\"", \
color = "#000000", \
fillcolor = dot_gen_colour(simNode, True), \
fontname = "Arial", \
fontsize = "14", \
fontcolor = "#000000" \
)
# an enumerator for different kinds of node types, at the moment we
# discern the majority of node types, with the caches being the
# notable exception
class NodeType:
SYS = 0
CPU = 1
BUS = 2
MEM = 3
DEV = 4
OTHER = 5
# based on the sim object, determine the node type
def get_node_type(simNode):
if isinstance(simNode, m5.objects.System):
return NodeType.SYS
# NULL ISA has no BaseCPU or PioDevice, so check if these names
# exists before using them
elif 'BaseCPU' in dir(m5.objects) and \
isinstance(simNode, m5.objects.BaseCPU):
return NodeType.CPU
elif 'PioDevice' in dir(m5.objects) and \
isinstance(simNode, m5.objects.PioDevice):
return NodeType.DEV
elif isinstance(simNode, m5.objects.BaseBus):
return NodeType.BUS
elif isinstance(simNode, m5.objects.AbstractMemory):
return NodeType.MEM
else:
return NodeType.OTHER
# based on the node type, determine the colour as an RGB tuple, the
# palette is rather arbitrary at this point (some coherent natural
# tones), and someone that feels artistic should probably have a look
def get_type_colour(nodeType):
if nodeType == NodeType.SYS:
return (228, 231, 235)
elif nodeType == NodeType.CPU:
return (187, 198, 217)
elif nodeType == NodeType.BUS:
return (111, 121, 140)
elif nodeType == NodeType.MEM:
return (94, 89, 88)
elif nodeType == NodeType.DEV:
return (199, 167, 147)
elif nodeType == NodeType.OTHER:
# use a relatively gray shade
return (186, 182, 174)
# generate colour for a node, either corresponding to a sim object or a
# port
def dot_gen_colour(simNode, isPort = False):
# determine the type of the current node, and also its parent, if
# the node is not the same type as the parent then we use the base
# colour for its type
node_type = get_node_type(simNode)
if simNode._parent:
parent_type = get_node_type(simNode._parent)
else:
parent_type = NodeType.OTHER
# if this node is the same type as the parent, then scale the
# colour based on the depth such that the deeper levels in the
# hierarchy get darker colours
if node_type == parent_type:
# start out with a depth of zero
depth = 0
parent = simNode._parent
# find the closes parent that is not the same type
while parent and get_node_type(parent) == parent_type:
depth = depth + 1
parent = parent._parent
node_colour = get_type_colour(parent_type)
# slightly arbitrary, but assume that the depth is less than
# five levels
r, g, b = map(lambda x: x * max(1 - depth / 7.0, 0.3), node_colour)
else:
node_colour = get_type_colour(node_type)
r, g, b = node_colour
# if we are colouring a port, then make it a slightly darker shade
# than the node that encapsulates it, once again use a magic constant
if isPort:
r, g, b = map(lambda x: 0.8 * x, (r, g, b))
return dot_rgb_to_html(r, g, b)
def dot_rgb_to_html(r, g, b):
return "#%.2x%.2x%.2x" % (r, g, b)
def do_dot(root, outdir, dotFilename):
if not pydot:
return
# * use ranksep > 1.0 for for vertical separation between nodes
# especially useful if you need to annotate edges using e.g. visio
# which accepts svg format
# * no need for hoizontal separation as nothing moves horizonally
callgraph = pydot.Dot(graph_type='digraph', ranksep='1.3')
dot_create_nodes(root, callgraph)
dot_create_edges(root, callgraph)
dot_filename = os.path.join(outdir, dotFilename)
callgraph.write(dot_filename)
try:
# dot crashes if the figure is extremely wide.
# So avoid terminating simulation unnecessarily
callgraph.write_svg(dot_filename + ".svg")
callgraph.write_pdf(dot_filename + ".pdf")
except:
warn("failed to generate dot output from %s", dot_filename)
| bsd-3-clause |
phani00/tovp | tovp/contacts/management/commands/import_north_american_contacts_csv.py | 2 | 3294 | import os
import csv
from optparse import make_option
from django.core.management.base import BaseCommand, CommandError
from django.contrib.auth import get_user_model
from django.core.exceptions import ObjectDoesNotExist
from ...models import Person
class Command(BaseCommand):
option_list = BaseCommand.option_list + (
make_option(
"-f",
"--file",
dest="filename",
help="Specify import file",
metavar="FILE"),
make_option(
"-c",
"--country",
dest="country",
help="Specify country"),
make_option(
"-l",
"--location",
dest="location",
help="Specify location of collection"),
)
help = 'Imports contacts from North American csv files.'
def handle(self, *args, **options):
# make sure file option is present
if options['filename'] is None:
raise CommandError("Option `--file=...` must be specified.")
if options['country'] is None:
country = 'US'
else:
country = options['country']
# make sure file path resolves
if not os.path.isfile(options['filename']):
raise CommandError("File does not exist at the specified path.")
self.stdout.write("Opening input file...")
user = get_user_model().objects.get(pk=1)
count = 0
with open(options['filename']) as csvfile:
csv_reader = csv.DictReader(csvfile, delimiter=',', quotechar='"')
for row in csv_reader:
field_names = {
'Temple': 'temple',
'Spiritual Name': 'initiated_name',
'First Name': 'first_name',
'Middle Name': 'middle_name',
'Last Name': 'last_name',
'Phone': 'phone_number',
'Email': 'email',
'Street Address': 'address',
'City': 'city',
'State': 'state',
'Zip Code': 'postcode',
}
kwargs = {}
for field in field_names:
if row[field]:
kwargs[field_names[field]] = row[field]
# setattr(person, field_names[field], row[field].strip())
try:
person = Person.objects.get(country=country, pan_card_number='', **kwargs)
except ObjectDoesNotExist:
person = Person(country=country, yatra='north-america',
pan_card_number='', **kwargs)
if options['location']:
person.location = options['location']
person.created_by = user
if (person.first_name and person.last_name) or person.initiated_name:
person.save()
else:
print('ERROR - skipping, record missing name')
print(row)
count += 1
print(person.pk)
except:
print('not-imported')
print('Imported %d new contacts.' % count)
| mit |
rubikloud/scikit-learn | examples/ensemble/plot_gradient_boosting_regularization.py | 355 | 2843 | """
================================
Gradient Boosting regularization
================================
Illustration of the effect of different regularization strategies
for Gradient Boosting. The example is taken from Hastie et al 2009.
The loss function used is binomial deviance. Regularization via
shrinkage (``learning_rate < 1.0``) improves performance considerably.
In combination with shrinkage, stochastic gradient boosting
(``subsample < 1.0``) can produce more accurate models by reducing the
variance via bagging.
Subsampling without shrinkage usually does poorly.
Another strategy to reduce the variance is by subsampling the features
analogous to the random splits in Random Forests
(via the ``max_features`` parameter).
.. [1] T. Hastie, R. Tibshirani and J. Friedman, "Elements of Statistical
Learning Ed. 2", Springer, 2009.
"""
print(__doc__)
# Author: Peter Prettenhofer <peter.prettenhofer@gmail.com>
#
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn import ensemble
from sklearn import datasets
X, y = datasets.make_hastie_10_2(n_samples=12000, random_state=1)
X = X.astype(np.float32)
# map labels from {-1, 1} to {0, 1}
labels, y = np.unique(y, return_inverse=True)
X_train, X_test = X[:2000], X[2000:]
y_train, y_test = y[:2000], y[2000:]
original_params = {'n_estimators': 1000, 'max_leaf_nodes': 4, 'max_depth': None, 'random_state': 2,
'min_samples_split': 5}
plt.figure()
for label, color, setting in [('No shrinkage', 'orange',
{'learning_rate': 1.0, 'subsample': 1.0}),
('learning_rate=0.1', 'turquoise',
{'learning_rate': 0.1, 'subsample': 1.0}),
('subsample=0.5', 'blue',
{'learning_rate': 1.0, 'subsample': 0.5}),
('learning_rate=0.1, subsample=0.5', 'gray',
{'learning_rate': 0.1, 'subsample': 0.5}),
('learning_rate=0.1, max_features=2', 'magenta',
{'learning_rate': 0.1, 'max_features': 2})]:
params = dict(original_params)
params.update(setting)
clf = ensemble.GradientBoostingClassifier(**params)
clf.fit(X_train, y_train)
# compute test set deviance
test_deviance = np.zeros((params['n_estimators'],), dtype=np.float64)
for i, y_pred in enumerate(clf.staged_decision_function(X_test)):
# clf.loss_ assumes that y_test[i] in {0, 1}
test_deviance[i] = clf.loss_(y_test, y_pred)
plt.plot((np.arange(test_deviance.shape[0]) + 1)[::5], test_deviance[::5],
'-', color=color, label=label)
plt.legend(loc='upper left')
plt.xlabel('Boosting Iterations')
plt.ylabel('Test Set Deviance')
plt.show()
| bsd-3-clause |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.